From 070462031cd5119dbcc8a24db5505ab04144caca Mon Sep 17 00:00:00 2001 From: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> Date: Thu, 22 Aug 2024 08:56:32 -0700 Subject: [PATCH 1/7] Remove Python 2 Support (#1195) * Remove Python 2 iteration techniques * Remove six.b * Replace six types * Clean up async_wrapper module * Clean up object_names module * Clean up exception messages tests * Delete RedisBlaster tests (Python 2 only) * Delete gRPC tests Python 2 garbage collection test * Clean up pika tests * Remove py2_namespace from CLM validator * Clean up testing fixtures * Update feed parser tests to Python 3 * Remove all Python 2 tests from tox * Fix imports on BaseHTTPServer * Remove Python 2.7 from docker image * Remove python 2.7 from setup.py * Remove Python 2 reimplementations * Remove Python 2 build logic * Clean up import hook module * Trim comments related to Python 2 * Remove Python 2 vs 3 conditionals * Delete six import statments * Remove six package * Removing python 2 specific closure logic * Trim comments related to python 2 * Remove try catch on importing importlib reload * Rename configparser module imports * Rename __builtin__ module * Remove __future__ imports * Remove Python 2 specific argument binding code * Use os.sep not "/" * Remove longs from attribute tests * Fix Python 2 imports * Fix functools lru_cache * Fix bug in django tastypie tests * Fix imports for test_http_client * Update import hook tests to use find_spec * Fix mistakes in pika tests * Remove references to urllib2 * Remove iscoroutinefunction reimplementation * Remove falcon master tests for py37 (unsupported) * Add banner to gearman tests * Drop virtualenv pin from tox --- .github/containers/Dockerfile | 2 +- .github/containers/Makefile | 3 +- .github/containers/requirements.txt | 2 +- .github/workflows/deploy-python.yml | 30 - THIRD_PARTY_NOTICES.md | 9 - newrelic/admin/__init__.py | 2 - newrelic/admin/debug_console.py | 2 - newrelic/admin/generate_config.py | 2 - newrelic/admin/license_key.py | 2 - newrelic/admin/local_config.py | 2 - newrelic/admin/network_config.py | 2 - newrelic/admin/record_deploy.py | 2 - newrelic/admin/run_program.py | 2 - newrelic/admin/run_python.py | 2 - newrelic/admin/server_config.py | 2 - newrelic/admin/validate_config.py | 2 - newrelic/api/application.py | 3 +- newrelic/api/asgi_application.py | 4 +- newrelic/api/import_hook.py | 166 +-- newrelic/api/log.py | 3 +- newrelic/api/profile_trace.py | 3 +- newrelic/api/time_trace.py | 3 +- newrelic/api/transaction.py | 13 +- newrelic/api/web_transaction.py | 22 +- newrelic/api/wsgi_application.py | 14 +- newrelic/bootstrap/sitecustomize.py | 18 +- newrelic/common/agent_http.py | 2 - newrelic/common/async_proxy.py | 9 +- newrelic/common/async_wrapper.py | 73 +- newrelic/common/coroutine.py | 19 +- newrelic/common/encoding_utils.py | 50 +- newrelic/common/object_names.py | 140 +-- newrelic/common/package_version_utils.py | 39 +- newrelic/common/signature.py | 21 +- newrelic/common/system_info.py | 24 +- newrelic/config.py | 56 +- newrelic/console.py | 62 +- newrelic/core/agent.py | 9 +- newrelic/core/application.py | 12 +- newrelic/core/attribute.py | 11 +- newrelic/core/config.py | 14 +- newrelic/core/data_collector.py | 2 - newrelic/core/database_utils.py | 3 +- newrelic/core/external_node.py | 5 +- newrelic/core/function_node.py | 3 +- newrelic/core/profile_sessions.py | 15 +- newrelic/core/stats_engine.py | 42 +- newrelic/core/trace_cache.py | 7 +- newrelic/hooks/component_piston.py | 3 +- newrelic/hooks/database_psycopg.py | 11 +- newrelic/hooks/database_psycopg2.py | 11 +- newrelic/hooks/datastore_elasticsearch.py | 3 +- newrelic/hooks/datastore_pyelasticsearch.py | 3 +- newrelic/hooks/external_feedparser.py | 3 +- newrelic/hooks/external_httplib.py | 11 +- newrelic/hooks/external_urllib.py | 9 +- newrelic/hooks/external_urllib2.py | 51 - newrelic/hooks/framework_django.py | 14 +- newrelic/hooks/framework_webpy.py | 3 +- newrelic/hooks/logger_logging.py | 7 +- newrelic/hooks/middleware_flask_compress.py | 3 +- newrelic/network/addresses.py | 5 +- newrelic/newrelic.ini | 2 +- newrelic/packages/six.py | 998 ------------------ setup.py | 10 +- tests/agent_features/conftest.py | 23 +- tests/agent_features/test_asgi_browser.py | 6 +- tests/agent_features/test_attribute.py | 53 +- tests/agent_features/test_browser.py | 6 +- .../agent_features/test_browser_middleware.py | 5 - .../agent_features/test_code_level_metrics.py | 17 +- tests/agent_features/test_configuration.py | 7 +- .../agent_features/test_dead_transactions.py | 6 - .../test_dimensional_metrics.py | 13 +- .../agent_features/test_exception_messages.py | 261 +---- tests/agent_features/test_logs_in_context.py | 11 +- tests/agent_features/test_ml_events.py | 12 +- tests/agent_features/test_web_transaction.py | 6 +- .../agent_streaming/test_infinite_tracing.py | 6 +- tests/agent_unittests/conftest.py | 10 +- tests/agent_unittests/test_agent_protocol.py | 3 +- tests/agent_unittests/test_http_client.py | 18 +- tests/agent_unittests/test_import_hook.py | 21 +- .../test_package_version_utils.py | 2 - tests/agent_unittests/test_sampler_metrics.py | 53 +- .../test_utilization_settings.py | 8 +- tests/application_gearman/test_gearman.py | 4 +- .../test_application.py | 3 +- .../component_flask_rest/test_application.py | 5 +- tests/component_tastypie/test_application.py | 26 +- tests/cross_agent/test_cat_map.py | 19 +- tests/cross_agent/test_collector_hostname.py | 9 +- tests/cross_agent/test_utilization_configs.py | 8 +- tests/cross_agent/test_w3c_trace_context.py | 3 - tests/datastore_psycopg2/test_cursor.py | 4 +- tests/datastore_pymongo/test_pymongo.py | 5 +- tests/datastore_redis/test_rb.py | 127 --- tests/external_http/test_http.py | 19 +- tests/external_httplib/test_httplib.py | 46 +- tests/external_httplib/test_urllib2.py | 5 +- tests/framework_ariadne/conftest.py | 5 - tests/framework_bottle/test_application.py | 11 +- tests/framework_cherrypy/test_application.py | 9 +- tests/framework_cherrypy/test_dispatch.py | 7 +- tests/framework_flask/_test_compress.py | 9 +- tests/framework_flask/test_application.py | 14 +- tests/framework_flask/test_blueprints.py | 5 +- tests/framework_flask/test_compress.py | 5 +- tests/framework_flask/test_middleware.py | 5 +- tests/framework_flask/test_not_found.py | 5 +- tests/framework_flask/test_user_exceptions.py | 5 +- tests/framework_flask/test_views.py | 5 +- tests/framework_graphene/conftest.py | 5 - tests/framework_graphql/conftest.py | 5 - tests/framework_grpc/conftest.py | 24 - tests/framework_grpc/test_clients.py | 29 +- tests/framework_grpc/test_server.py | 15 +- tests/framework_pyramid/test_application.py | 16 +- tests/framework_pyramid/test_cornice.py | 8 +- tests/logger_logging/test_attributes.py | 2 - tests/logger_logging/test_metrics.py | 4 +- tests/logger_logging/test_settings.py | 4 +- tests/logger_structlog/test_metrics.py | 7 +- .../test_consumer.py | 13 +- .../test_producer.py | 8 +- .../test_serialization.py | 5 +- .../test_consumer.py | 13 +- .../test_producer.py | 8 +- .../test_serialization.py | 7 +- tests/messagebroker_pika/compat.py | 10 +- tests/messagebroker_pika/minversion.py | 17 +- tests/messagebroker_pika/test_cat.py | 7 +- .../test_distributed_tracing.py | 10 +- tests/messagebroker_pika/test_memory_leak.py | 10 +- .../test_pika_async_connection_consume.py | 105 +- .../test_pika_blocking_connection_consume.py | 64 +- .../test_pika_supportability.py | 9 +- .../test_calibration_models.py | 9 +- tests/mlmodel_sklearn/test_cluster_models.py | 15 +- tests/mlmodel_sklearn/test_compose_models.py | 9 +- .../mlmodel_sklearn/test_covariance_models.py | 8 +- .../test_cross_decomposition_models.py | 8 +- .../test_discriminant_analysis_models.py | 9 +- tests/mlmodel_sklearn/test_dummy_models.py | 9 +- tests/mlmodel_sklearn/test_ensemble_models.py | 23 +- .../test_feature_selection_models.py | 16 +- .../test_gaussian_process_models.py | 9 +- .../test_kernel_ridge_models.py | 9 +- tests/mlmodel_sklearn/test_linear_models.py | 13 +- tests/mlmodel_sklearn/test_mixture_models.py | 9 +- .../test_model_selection_models.py | 9 +- .../mlmodel_sklearn/test_multiclass_models.py | 9 +- .../test_multioutput_models.py | 19 +- .../test_naive_bayes_models.py | 16 +- .../mlmodel_sklearn/test_neighbors_models.py | 16 +- .../test_neural_network_models.py | 9 +- tests/mlmodel_sklearn/test_pipeline_models.py | 9 +- .../test_semi_supervised_models.py | 16 +- tests/mlmodel_sklearn/test_svm_models.py | 9 +- tests/mlmodel_sklearn/test_tree_models.py | 13 +- tests/testing_support/external_fixtures.py | 6 +- tests/testing_support/fixture/event_loop.py | 18 +- tests/testing_support/fixtures.py | 46 +- .../mock_external_http_server.py | 8 +- tests/testing_support/mock_http_client.py | 7 +- tests/testing_support/sample_applications.py | 5 +- .../validators/validate_code_level_metrics.py | 8 +- .../validators/validate_custom_events.py | 7 +- .../validate_error_trace_collector_json.py | 7 +- .../validators/validate_log_events.py | 3 +- ...validate_log_events_outside_transaction.py | 3 +- .../validators/validate_ml_events.py | 7 +- .../validate_slow_sql_collector_json.py | 11 +- .../validators/validate_span_events.py | 15 +- .../validators/validate_tt_collector_json.py | 13 +- tox.ini | 96 +- 176 files changed, 572 insertions(+), 3361 deletions(-) delete mode 100644 newrelic/hooks/external_urllib2.py delete mode 100644 newrelic/packages/six.py delete mode 100644 tests/datastore_redis/test_rb.py diff --git a/.github/containers/Dockerfile b/.github/containers/Dockerfile index 438163e670..62232800ab 100644 --- a/.github/containers/Dockerfile +++ b/.github/containers/Dockerfile @@ -96,7 +96,7 @@ RUN echo 'eval "$(pyenv init -)"' >>${HOME}/.bashrc && \ pyenv update # Install Python -ARG PYTHON_VERSIONS="3.11 3.10 3.9 3.8 3.7 3.12 2.7 pypy2.7-7.3.12 pypy3.10-7.3.15" +ARG PYTHON_VERSIONS="3.11 3.10 3.9 3.8 3.7 3.12 pypy3.10-7.3.15" COPY --chown=1000:1000 --chmod=+x ./install-python.sh /tmp/install-python.sh RUN /tmp/install-python.sh && \ rm /tmp/install-python.sh diff --git a/.github/containers/Makefile b/.github/containers/Makefile index 870808655d..5dae271877 100644 --- a/.github/containers/Makefile +++ b/.github/containers/Makefile @@ -23,7 +23,7 @@ REPO_ROOT:=$(realpath ${MAKEFILE_DIR}../../) UNAME_P:=$(shell uname -p) PLATFORM_AUTOMATIC:=$(if $(findstring arm,${UNAME_P}),linux/arm64,linux/amd64) PLATFORM:=$(if ${PLATFORM_OVERRIDE},${PLATFORM_OVERRIDE},${PLATFORM_AUTOMATIC}) -PYTHON_VERSIONS_AUTOMATIC:=3.10 2.7 +PYTHON_VERSIONS_AUTOMATIC:=3.10 PYTHON_VERSIONS:=$(if ${PYTHON_VERSIONS_OVERRIDE},${PYTHON_VERSIONS_OVERRIDE},${PYTHON_VERSIONS_AUTOMATIC}) .PHONY: default @@ -70,6 +70,5 @@ test: build ghcr.io/newrelic/python-agent-ci:local \ /bin/bash -c '\ python3.10 --version && \ - python2.7 --version && \ touch tox.ini && tox --version && \ echo "Success! Python versions installed."' diff --git a/.github/containers/requirements.txt b/.github/containers/requirements.txt index 68bdfe4fe7..9ead18f430 100644 --- a/.github/containers/requirements.txt +++ b/.github/containers/requirements.txt @@ -5,5 +5,5 @@ isort pip setuptools tox -virtualenv<20.22.0 +virtualenv wheel \ No newline at end of file diff --git a/.github/workflows/deploy-python.yml b/.github/workflows/deploy-python.yml index 45ec91fed5..e743f631cc 100644 --- a/.github/workflows/deploy-python.yml +++ b/.github/workflows/deploy-python.yml @@ -64,35 +64,6 @@ jobs: path: ./wheelhouse/*.whl retention-days: 1 - build-linux-py2: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1 - with: - persist-credentials: false - fetch-depth: 0 - - - name: Setup QEMU - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # 3.0.0 - - - name: Build Wheels - uses: pypa/cibuildwheel@bf3a5590c9aeb9a7e4ff4025ef7400e0c6ad1248 # 1.12.0 (Last release to support Python 2) - env: - CIBW_PLATFORM: linux - CIBW_BUILD: cp27-manylinux_x86_64 - CIBW_ARCHS_LINUX: x86_64 - CIBW_ENVIRONMENT: "LD_LIBRARY_PATH=/opt/rh/devtoolset-8/root/usr/lib64:/opt/rh/devtoolset-8/root/usr/lib:/opt/rh/devtoolset-8/root/usr/lib64/dyninst:/opt/rh/devtoolset-8/root/usr/lib/dyninst:/usr/local/lib64:/usr/local/lib" - CIBW_TEST_REQUIRES: pytest==4.6.11 - CIBW_TEST_COMMAND: "PYTHONPATH={project}/tests pytest {project}/tests/agent_unittests -vx" - - - name: Upload Artifacts - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # 4.3.1 - with: - name: ${{ github.job }} - path: ./wheelhouse/*.whl - retention-days: 1 - build-sdist: runs-on: ubuntu-latest steps: @@ -130,7 +101,6 @@ jobs: needs: - build-linux-py3 - - build-linux-py2 - build-sdist steps: diff --git a/THIRD_PARTY_NOTICES.md b/THIRD_PARTY_NOTICES.md index 7c4242cc26..7aa68f22dd 100644 --- a/THIRD_PARTY_NOTICES.md +++ b/THIRD_PARTY_NOTICES.md @@ -35,15 +35,6 @@ Distributed under the following license(s): * [The Apache License, Version 2.0 License](https://opensource.org/license/apache-2-0/) -## [six](https://pypi.org/project/six) - -Copyright (c) 2010-2013 Benjamin Peterson - -Distributed under the following license(s): - -* [The MIT License](http://opensource.org/licenses/MIT) - - ## [time.monotonic](newrelic/common/_monotonic.c) Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Python Software Foundation; All Rights Reserved diff --git a/newrelic/admin/__init__.py b/newrelic/admin/__init__.py index 509037dd50..4ad2e8cb21 100644 --- a/newrelic/admin/__init__.py +++ b/newrelic/admin/__init__.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import logging import sys diff --git a/newrelic/admin/debug_console.py b/newrelic/admin/debug_console.py index 1a61629946..65fff008d0 100644 --- a/newrelic/admin/debug_console.py +++ b/newrelic/admin/debug_console.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - from newrelic.admin import command, usage @command('debug-console', 'config_file [session_log]', diff --git a/newrelic/admin/generate_config.py b/newrelic/admin/generate_config.py index c48dff6a28..1613143c0d 100644 --- a/newrelic/admin/generate_config.py +++ b/newrelic/admin/generate_config.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - from newrelic.admin import command, usage @command('generate-config', 'license_key [output_file]', diff --git a/newrelic/admin/license_key.py b/newrelic/admin/license_key.py index e1eaaa39b2..ea1e65bb62 100644 --- a/newrelic/admin/license_key.py +++ b/newrelic/admin/license_key.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - from newrelic.admin import command, usage from newrelic.common.encoding_utils import obfuscate_license_key diff --git a/newrelic/admin/local_config.py b/newrelic/admin/local_config.py index 6585bfcf70..9d1459c902 100644 --- a/newrelic/admin/local_config.py +++ b/newrelic/admin/local_config.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - from newrelic.admin import command, usage diff --git a/newrelic/admin/network_config.py b/newrelic/admin/network_config.py index d2ce41aaf4..9d6c3b3f27 100644 --- a/newrelic/admin/network_config.py +++ b/newrelic/admin/network_config.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - from newrelic.admin import command, usage diff --git a/newrelic/admin/record_deploy.py b/newrelic/admin/record_deploy.py index 65748cc2a3..8de478c72c 100644 --- a/newrelic/admin/record_deploy.py +++ b/newrelic/admin/record_deploy.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import os import pwd diff --git a/newrelic/admin/run_program.py b/newrelic/admin/run_program.py index 0a0f8e7787..cb99c87a79 100644 --- a/newrelic/admin/run_program.py +++ b/newrelic/admin/run_program.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - from newrelic.admin import command, usage diff --git a/newrelic/admin/run_python.py b/newrelic/admin/run_python.py index ccdb5be55e..5a4454fb7c 100644 --- a/newrelic/admin/run_python.py +++ b/newrelic/admin/run_python.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - from newrelic.admin import command, usage @command('run-python', '...', diff --git a/newrelic/admin/server_config.py b/newrelic/admin/server_config.py index cd463226d5..6141e1f4b2 100644 --- a/newrelic/admin/server_config.py +++ b/newrelic/admin/server_config.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - from newrelic.admin import command, usage diff --git a/newrelic/admin/validate_config.py b/newrelic/admin/validate_config.py index 64645b0c62..86195470ec 100644 --- a/newrelic/admin/validate_config.py +++ b/newrelic/admin/validate_config.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - from newrelic.admin import command, usage diff --git a/newrelic/api/application.py b/newrelic/api/application.py index ebc8356a76..cc613a4dd9 100644 --- a/newrelic/api/application.py +++ b/newrelic/api/application.py @@ -18,7 +18,6 @@ import newrelic.api.import_hook import newrelic.core.agent import newrelic.core.config -import newrelic.packages.six as six class Application(object): @@ -106,7 +105,7 @@ def shutdown(self): @property def linked_applications(self): - return list(six.iterkeys(self._linked)) + return list(self._linked.keys()) def link_to_application(self, name): self._linked[name] = True diff --git a/newrelic/api/asgi_application.py b/newrelic/api/asgi_application.py index dc2fd5bbf4..72e2da1a07 100644 --- a/newrelic/api/asgi_application.py +++ b/newrelic/api/asgi_application.py @@ -26,7 +26,7 @@ function_wrapper, wrap_object, ) -from newrelic.packages import asgiref_compatibility, six +from newrelic.packages import asgiref_compatibility def _bind_scope(scope, *args, **kwargs): @@ -158,7 +158,7 @@ async def send_inject_browser_agent(self, message): # if there's a valid body string, attempt to insert the HTML if verify_body_exists(self.body): body = insert_html_snippet( - self.body, lambda: six.b(self.transaction.browser_timing_header()), self.search_maximum + self.body, lambda: self.transaction.browser_timing_header().encode("latin-1"), self.search_maximum ) # If we have inserted the browser agent diff --git a/newrelic/api/import_hook.py b/newrelic/api/import_hook.py index b36262afc3..2b6359c6d3 100644 --- a/newrelic/api/import_hook.py +++ b/newrelic/api/import_hook.py @@ -15,14 +15,11 @@ import logging import sys -from newrelic.packages import six +from importlib.util import find_spec + _logger = logging.getLogger(__name__) -try: - from importlib.util import find_spec -except ImportError: - find_spec = None _import_hooks = {} @@ -30,7 +27,6 @@ # These modules are imported by the newrelic package and/or do not do # nested imports, so they're ok to import before newrelic. "urllib", - "urllib2", "httplib", "http.client", "urllib.request", @@ -62,71 +58,61 @@ def register_import_hook(name, callable): # pylint: disable=redefined-builtin - if six.PY2: - import imp - - imp.acquire_lock() - - try: - hooks = _import_hooks.get(name, None) + hooks = _import_hooks.get(name, None) - if name not in _import_hooks or hooks is None: + if name not in _import_hooks or hooks is None: - # If no entry in registry or entry already flagged with - # None then module may have been loaded, in which case - # need to check and fire hook immediately. + # If no entry in registry or entry already flagged with + # None then module may have been loaded, in which case + # need to check and fire hook immediately. - hooks = _import_hooks.get(name) + hooks = _import_hooks.get(name) - module = sys.modules.get(name, None) + module = sys.modules.get(name, None) - if module is not None: + if module is not None: - # The module has already been loaded so fire hook - # immediately. + # The module has already been loaded so fire hook + # immediately. - if module.__name__ not in _ok_modules: - _logger.debug( - "Module %s has been imported before the " - "newrelic.agent.initialize call. Import and " - "initialize the New Relic agent before all " - "other modules for best monitoring " - "results.", - module, - ) + if module.__name__ not in _ok_modules: + _logger.debug( + "Module %s has been imported before the " + "newrelic.agent.initialize call. Import and " + "initialize the New Relic agent before all " + "other modules for best monitoring " + "results.", + module, + ) - # Add the module name to the set of uninstrumented modules. - # During harvest, this set will be used to produce metrics. - # The adding of names here and the reading of them during - # harvest should be thread safe. This is because the code - # here is only run during `initialize` which will no-op if - # run multiple times (even if in a thread). The set is read - # from the harvest thread which will run one minute after - # `initialize` is called. + # Add the module name to the set of uninstrumented modules. + # During harvest, this set will be used to produce metrics. + # The adding of names here and the reading of them during + # harvest should be thread safe. This is because the code + # here is only run during `initialize` which will no-op if + # run multiple times (even if in a thread). The set is read + # from the harvest thread which will run one minute after + # `initialize` is called. - _uninstrumented_modules.add(module.__name__) + _uninstrumented_modules.add(module.__name__) - _import_hooks[name] = None + _import_hooks[name] = None - callable(module) + callable(module) - else: + else: - # No hook has been registered so far so create list - # and add current hook. + # No hook has been registered so far so create list + # and add current hook. - _import_hooks[name] = [callable] + _import_hooks[name] = [callable] - else: + else: - # Hook has already been registered, so append current - # hook. + # Hook has already been registered, so append current + # hook. - _import_hooks[name].append(callable) - - finally: - if six.PY2: - imp.release_lock() + _import_hooks[name].append(callable) def _notify_import_hooks(name, module): @@ -181,63 +167,10 @@ class ImportHookFinder: def __init__(self): self._skip = {} - def find_module(self, fullname, path=None): - """ - Find spec and patch import hooks into loader before returning. - - Required for Python 2. - - https://docs.python.org/3/library/importlib.html#importlib.abc.MetaPathFinder.find_module - """ - - # If not something we are interested in we can return. - - if fullname not in _import_hooks: - return None - - # Check whether this is being called on the second time - # through and return. - - if fullname in self._skip: - return None - - # We are now going to call back into import. We set a - # flag to see we are handling the module so that check - # above drops out on subsequent pass and we don't go - # into an infinite loop. - - self._skip[fullname] = True - - try: - # For Python 3 we need to use find_spec() from the importlib - # module. - - if find_spec: - spec = find_spec(fullname) - loader = getattr(spec, "loader", None) - - if loader and not isinstance(loader, (_ImportHookChainedLoader, _ImportHookLoader)): - return _ImportHookChainedLoader(loader) - - else: - __import__(fullname) - - # If we get this far then the module we are - # interested in does actually exist and so return - # our loader to trigger import hooks and then return - # the module. - - return _ImportHookLoader() - - finally: - del self._skip[fullname] - def find_spec(self, fullname, path=None, target=None): """ Find spec and patch import hooks into loader before returning. - Required for Python 3.10+ to avoid warnings. - https://docs.python.org/3/library/importlib.html#importlib.abc.MetaPathFinder.find_spec """ @@ -260,23 +193,18 @@ def find_spec(self, fullname, path=None, target=None): self._skip[fullname] = True try: - # For Python 3 we need to use find_spec() from the importlib - # module. - - if find_spec: - spec = find_spec(fullname) - loader = getattr(spec, "loader", None) + # We call find_spec() from the importlib module. - if loader and not isinstance(loader, (_ImportHookChainedLoader, _ImportHookLoader)): - spec.loader = _ImportHookChainedLoader(loader) + spec = find_spec(fullname) + loader = getattr(spec, "loader", None) - return spec + if loader and not isinstance(loader, (_ImportHookChainedLoader, _ImportHookLoader)): + spec.loader = _ImportHookChainedLoader(loader) - else: - # Not possible, Python 3 defines find_spec and Python 2 does not have find_spec on Finders - return None + return spec finally: + # Delete flag now that it's not needed del self._skip[fullname] diff --git a/newrelic/api/log.py b/newrelic/api/log.py index 9edd40e2e1..1bff50865c 100644 --- a/newrelic/api/log.py +++ b/newrelic/api/log.py @@ -26,7 +26,6 @@ from newrelic.common.object_names import parse_exc_info from newrelic.core.attribute import truncate from newrelic.core.config import global_settings, is_expected_error -from newrelic.packages import six def safe_json_encode(obj, ignore_string_types=False, **kwargs): @@ -34,7 +33,7 @@ def safe_json_encode(obj, ignore_string_types=False, **kwargs): # If ignore_string_types is True, do not encode string types further. # Currently used for safely encoding logging attributes. - if ignore_string_types and isinstance(obj, (six.string_types, six.binary_type)): + if ignore_string_types and isinstance(obj, (str, bytes)): return obj # Attempt to run through JSON serialization diff --git a/newrelic/api/profile_trace.py b/newrelic/api/profile_trace.py index 93aa191a4a..98a321cb92 100644 --- a/newrelic/api/profile_trace.py +++ b/newrelic/api/profile_trace.py @@ -21,7 +21,6 @@ from newrelic.api.time_trace import current_trace from newrelic.common.object_names import callable_name from newrelic.common.object_wrapper import FunctionWrapper, wrap_object -from newrelic.packages import six AGENT_PACKAGE_DIRECTORY = os.path.dirname(AGENT_PACKAGE_FILE) + "/" @@ -71,7 +70,7 @@ def _callable(): except Exception: pass - for name, obj in six.iteritems(frame.f_globals): + for name, obj in frame.f_globals.items(): try: if obj.__dict__[func_name].func_code is co: return obj.__dict__[func_name] diff --git a/newrelic/api/time_trace.py b/newrelic/api/time_trace.py index 40ef225129..5abbc0bb6e 100644 --- a/newrelic/api/time_trace.py +++ b/newrelic/api/time_trace.py @@ -29,7 +29,6 @@ ) from newrelic.core.config import is_expected_error, should_ignore_error from newrelic.core.trace_cache import trace_cache -from newrelic.packages import six _logger = logging.getLogger(__name__) @@ -442,7 +441,7 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, ) if error_group_name_raw: _, error_group_name = process_user_attribute("error.group.name", error_group_name_raw) - if error_group_name is None or not isinstance(error_group_name, six.string_types): + if error_group_name is None or not isinstance(error_group_name, str): raise ValueError( "Invalid attribute value for error.group.name. Expected string, got: %s" % repr(error_group_name_raw) diff --git a/newrelic/api/transaction.py b/newrelic/api/transaction.py index 5b44d1f81f..94d3f79cde 100644 --- a/newrelic/api/transaction.py +++ b/newrelic/api/transaction.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import logging import os import random @@ -77,7 +75,6 @@ TraceCacheNoActiveTraceError, trace_cache, ) -from newrelic.packages import six _logger = logging.getLogger(__name__) @@ -555,7 +552,7 @@ def __exit__(self, exc, value, tb): # Record supportability metrics for api calls - for key, value in six.iteritems(self._transaction_metrics): + for key, value in self._transaction_metrics.items(): self.record_custom_metric(key, {"count": value}) if self._frameworks: @@ -1420,9 +1417,9 @@ def _process_incoming_cat_headers(self, encoded_cross_process_id, encoded_txn_he self.record_tt = self.record_tt or txn_header[1] - if isinstance(txn_header[2], six.string_types): + if isinstance(txn_header[2], str): self._trip_id = txn_header[2] - if isinstance(txn_header[3], six.string_types): + if isinstance(txn_header[3], str): self._referring_path_hash = txn_header[3] except Exception: pass @@ -1568,7 +1565,7 @@ def record_log_event(self, message, level=None, timestamp=None, attributes=None, if message is not None: # Coerce message into a string type - if not isinstance(message, six.string_types): + if not isinstance(message, str): try: message = str(message) except Exception: @@ -1957,7 +1954,7 @@ def set_user_id(user_id): if not user_id or not transaction: return - if not isinstance(user_id, six.string_types): + if not isinstance(user_id, str): _logger.warning("The set_user_id API requires a string-based user ID.") return diff --git a/newrelic/api/web_transaction.py b/newrelic/api/web_transaction.py index 3b7a06e19a..66ee81015a 100644 --- a/newrelic/api/web_transaction.py +++ b/newrelic/api/web_transaction.py @@ -17,10 +17,7 @@ import time import warnings -try: - import urlparse -except ImportError: - import urllib.parse as urlparse +import urllib.parse as urlparse from newrelic.api.application import Application, application_instance from newrelic.api.transaction import Transaction, current_transaction @@ -34,7 +31,6 @@ from newrelic.common.object_names import callable_name from newrelic.common.object_wrapper import FunctionWrapper, wrap_object from newrelic.core.attribute_filter import DST_BROWSER_MONITORING -from newrelic.packages import six _logger = logging.getLogger(__name__) @@ -88,7 +84,7 @@ def _lookup_environ_setting(environ, name, default=False): flag = environ[name] - if isinstance(flag, six.string_types): + if isinstance(flag, str): flag = flag.lower() if flag in TRUE_VALUES: @@ -377,10 +373,7 @@ def _update_agent_attributes(self): def browser_timing_header(self, nonce=None): """Returns the JavaScript header to be included in any HTML response to perform real user monitoring. This function returns - the header as a native Python string. In Python 2 native strings - are stored as bytes. In Python 3 native strings are stored as - unicode. - + the header as a native Python string. """ if not self.enabled: @@ -473,16 +466,11 @@ def browser_timing_header(self, nonce=None): # encodable. Since we obfuscate all agent and user attributes, and # the transaction name with base 64 encoding, this will preserve # those strings, if they have values outside of the ASCII character - # set. In the case of Python 2, we actually then use the encoded - # value as we need a native string, which for Python 2 is a byte - # string. If encoding as ASCII fails we will return an empty + # set. If encoding as ASCII fails we will return an empty # string. try: - if six.PY2: - header = header.encode("ascii") - else: - header.encode("ascii") + header.encode("ascii") except UnicodeError: if not WebTransaction.unicode_error_reported: diff --git a/newrelic/api/wsgi_application.py b/newrelic/api/wsgi_application.py index 5d12e94f30..f5f234a2e4 100644 --- a/newrelic/api/wsgi_application.py +++ b/newrelic/api/wsgi_application.py @@ -25,7 +25,6 @@ from newrelic.api.web_transaction import WSGIWebTransaction from newrelic.common.object_names import callable_name from newrelic.common.object_wrapper import FunctionWrapper, wrap_object -from newrelic.packages import six _logger = logging.getLogger(__name__) @@ -202,7 +201,7 @@ def process_data(self, data): # works then we are done, else we move to next phase of # buffering up content until we find the body element. - html_to_be_inserted = lambda: six.b(self.transaction.browser_timing_header()) + html_to_be_inserted = lambda: self.transaction.browser_timing_header().encode("latin-1") if not self.response_data: modified = insert_html_snippet(data, html_to_be_inserted) @@ -496,23 +495,20 @@ def __iter__(self): def WSGIApplicationWrapper(wrapped, application=None, name=None, group=None, framework=None, dispatcher=None): - # Python 2 does not allow rebinding nonlocal variables, so to fix this - # framework must be stored in list so it can be edited by closure. - _framework = [framework] - def get_framework(): """Used to delay imports by passing framework as a callable.""" - framework = _framework[0] + + # Use same framework variable as closure + nonlocal framework + if isinstance(framework, tuple) or framework is None: return framework if callable(framework): framework = framework() - _framework[0] = framework if framework is not None and not isinstance(framework, tuple): framework = (framework, None) - _framework[0] = framework return framework diff --git a/newrelic/bootstrap/sitecustomize.py b/newrelic/bootstrap/sitecustomize.py index 43a0d6d187..b479295aa6 100644 --- a/newrelic/bootstrap/sitecustomize.py +++ b/newrelic/bootstrap/sitecustomize.py @@ -16,8 +16,7 @@ import sys import time -# Avoiding additional imports by defining PY2 manually -PY2 = sys.version_info[0] == 2 +from importlib.machinery import PathFinder # Define some debug logging routines to help sort out things when this # all doesn't work as expected. @@ -82,25 +81,14 @@ def del_sys_path_entry(path): del_sys_path_entry(boot_directory) try: - if PY2: - import imp - - module_spec = imp.find_module("sitecustomize", sys.path) - else: - from importlib.machinery import PathFinder - - module_spec = PathFinder.find_spec("sitecustomize", path=sys.path) - + module_spec = PathFinder.find_spec("sitecustomize", path=sys.path) except ImportError: pass else: if module_spec is not None: # Import error not raised in importlib log_message("sitecustomize = %r", module_spec) - if PY2: - imp.load_module("sitecustomize", *module_spec) - else: - module_spec.loader.load_module("sitecustomize") + module_spec.loader.load_module("sitecustomize") # Because the PYTHONPATH environment variable has been amended and the # bootstrap directory added, if a Python application creates a sub diff --git a/newrelic/common/agent_http.py b/newrelic/common/agent_http.py index 0e1fa682be..7b4c85fb55 100644 --- a/newrelic/common/agent_http.py +++ b/newrelic/common/agent_http.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import os import sys import time diff --git a/newrelic/common/async_proxy.py b/newrelic/common/async_proxy.py index 56eaba73c2..ffc65f0085 100644 --- a/newrelic/common/async_proxy.py +++ b/newrelic/common/async_proxy.py @@ -14,7 +14,6 @@ import logging import time -import newrelic.packages.six as six from newrelic.common.coroutine import (is_coroutine_callable, is_asyncio_coroutine, is_generator_function) @@ -140,12 +139,8 @@ class GeneratorProxy(Coroutine): def __iter__(self): return self - if six.PY2: - def next(self): - return self.send(None) - else: - def __next__(self): - return self.send(None) + def __next__(self): + return self.send(None) class AwaitableGeneratorProxy(GeneratorProxy): diff --git a/newrelic/common/async_wrapper.py b/newrelic/common/async_wrapper.py index 2d3db2b4be..0c17d68dc3 100644 --- a/newrelic/common/async_wrapper.py +++ b/newrelic/common/async_wrapper.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import textwrap import functools from newrelic.common.coroutine import ( is_coroutine_callable, @@ -20,32 +19,18 @@ is_generator_function, is_async_generator_function, ) -from newrelic.packages import six - - -def evaluate_wrapper(wrapper_string, wrapped, trace): - values = {'wrapper': None, 'wrapped': wrapped, - 'trace': trace, 'functools': functools} - exec(wrapper_string, values) - return values['wrapper'] def coroutine_wrapper(wrapped, trace): - WRAPPER = textwrap.dedent(""" @functools.wraps(wrapped) async def wrapper(*args, **kwargs): with trace: return await wrapped(*args, **kwargs) - """) - - try: - return evaluate_wrapper(WRAPPER, wrapped, trace) - except Exception: - return wrapped + + return wrapper def awaitable_generator_wrapper(wrapped, trace): - WRAPPER = textwrap.dedent(""" import asyncio @functools.wraps(wrapped) @@ -54,53 +39,21 @@ def wrapper(*args, **kwargs): with trace: result = yield from wrapped(*args, **kwargs) return result - """) - try: - return evaluate_wrapper(WRAPPER, wrapped, trace) - except: - return wrapped + return wrapper -if six.PY3: - def generator_wrapper(wrapped, trace): - WRAPPER = textwrap.dedent(""" - @functools.wraps(wrapped) - def wrapper(*args, **kwargs): - with trace: - result = yield from wrapped(*args, **kwargs) - return result - """) +def generator_wrapper(wrapped, trace): + @functools.wraps(wrapped) + def wrapper(*args, **kwargs): + with trace: + result = yield from wrapped(*args, **kwargs) + return result - try: - return evaluate_wrapper(WRAPPER, wrapped, trace) - except: - return wrapped -else: - def generator_wrapper(wrapped, trace): - @functools.wraps(wrapped) - def wrapper(*args, **kwargs): - g = wrapped(*args, **kwargs) - with trace: - try: - yielded = g.send(None) - while True: - try: - sent = yield yielded - except GeneratorExit as e: - g.close() - raise - except BaseException as e: - yielded = g.throw(e) - else: - yielded = g.send(sent) - except StopIteration: - return - return wrapper + return wrapper def async_generator_wrapper(wrapped, trace): - WRAPPER = textwrap.dedent(""" @functools.wraps(wrapped) async def wrapper(*args, **kwargs): g = wrapped(*args, **kwargs) @@ -119,12 +72,8 @@ async def wrapper(*args, **kwargs): yielded = await g.asend(sent) except StopAsyncIteration: return - """) - try: - return evaluate_wrapper(WRAPPER, wrapped, trace) - except: - return wrapped + return wrapper def async_wrapper(wrapped): diff --git a/newrelic/common/coroutine.py b/newrelic/common/coroutine.py index 33a4922f56..9df83da276 100644 --- a/newrelic/common/coroutine.py +++ b/newrelic/common/coroutine.py @@ -13,24 +13,15 @@ # limitations under the License. import inspect -import newrelic.packages.six as six -if hasattr(inspect, 'iscoroutinefunction'): - def is_coroutine_function(wrapped): - return inspect.iscoroutinefunction(wrapped) -else: - def is_coroutine_function(wrapped): - return False +def is_coroutine_function(wrapped): + return inspect.iscoroutinefunction(wrapped) -if six.PY3: - def is_asyncio_coroutine(wrapped): - """Return True if func is a decorated coroutine function.""" - return getattr(wrapped, '_is_coroutine', None) is not None -else: - def is_asyncio_coroutine(wrapped): - return False +def is_asyncio_coroutine(wrapped): + """Return True if func is a decorated coroutine function.""" + return getattr(wrapped, '_is_coroutine', None) is not None def is_generator_function(wrapped): diff --git a/newrelic/common/encoding_utils.py b/newrelic/common/encoding_utils.py index 41ffb1dfa7..a7ae1ad614 100644 --- a/newrelic/common/encoding_utils.py +++ b/newrelic/common/encoding_utils.py @@ -29,7 +29,6 @@ import zlib from collections import OrderedDict -from newrelic.packages import six HEXDIGLC_RE = re.compile("^[0-9a-f]+$") DELIMITER_FORMAT_RE = re.compile("[ \t]*,[ \t]*") @@ -55,22 +54,11 @@ def json_encode(obj, **kwargs): # This wrapper function needs to deal with a few issues. # # The first is that when a byte string is provided, we need to - # ensure that it is interpreted as being Latin-1. This is necessary - # as by default JSON will treat it as UTF-8, which means if an - # invalid UTF-8 byte string is provided, a failure will occur when - # encoding the value. - # - # The json.dumps() function in Python 2 had an encoding argument - # which needs to be used to dictate what encoding a byte string - # should be interpreted as being. We need to supply this and set it - # to Latin-1 to avoid the failures if the byte string is not valid - # UTF-8. - # - # For Python 3, it will simply fail if provided any byte string. To - # be compatible with Python 2, we still want to accept them, but as - # before interpret it as being Latin-1. For Python 3 we can only do - # this by overriding the fallback encoder used when a type is - # encountered that the JSON encoder doesn't know what to do with. + # ensure that it is accepted as a string, and interpreted as + # being Latin-1. The default JSON encoder will not accept byte + # strings, so a we can do this by overriding the fallback encoder + # used when a type is encountered that the JSON encoder doesn't + # know what to do with. # # The second issue we want to deal with is allowing generators or # iterables to be supplied and for them to be automatically expanded @@ -80,9 +68,6 @@ def json_encode(obj, **kwargs): # The third is eliminate white space after separators to trim the # size of the data being sent. - if type(b"") is type(""): # noqa, pylint: disable=C0123 - _kwargs["encoding"] = "latin-1" - def _encode(o): if isinstance(o, bytes): return o.decode("latin-1") @@ -166,12 +151,9 @@ def xor_cipher_encrypt_base64(text, key): array using xor_cipher_genkey(). The key cannot be an empty byte array or string. Where the key is shorter than the text to be encrypted, the same key will continually be reapplied in succession. - In Python 2 either a byte string or Unicode string can be provided - for the text input. In the case of a byte string, it will be - interpreted as having Latin-1 encoding. In Python 3 only a Unicode - string can be provided for the text input. Having being encrypted, - the result will then be base64 encoded with the result being a - Unicode string. + In Python 3 only a Unicode string can be provided for the text input. + Having being encrypted, the result will then be base64 encoded with + the result being a Unicode string. """ @@ -203,10 +185,7 @@ def xor_cipher_encrypt_base64(text, key): # use ASCII when decoding the byte string as base64 encoding only # produces characters within that codeset. - if six.PY3: - return result.decode("ascii") - - return result + return result.decode("ascii") def xor_cipher_decrypt_base64(text, key): @@ -305,10 +284,7 @@ def base64_encode(text): # use ASCII when decoding the byte string as base64 encoding only # produces characters within that codeset. - if six.PY3: - return result.decode("ascii") - - return result + return result.decode("ascii") def base64_decode(text): @@ -326,7 +302,7 @@ def gzip_compress(text): """ compressed_data = io.BytesIO() - if six.PY3 and isinstance(text, str): + if isinstance(text, str): text = text.encode("utf-8") with gzip.GzipFile(fileobj=compressed_data, mode="wb") as f: @@ -358,7 +334,7 @@ def serverless_payload_encode(payload): def ensure_str(s): - if not isinstance(s, six.string_types): + if not isinstance(s, str): try: s = s.decode("utf-8") except Exception: @@ -621,7 +597,7 @@ def snake_case(string): def obfuscate_license_key(license_key): """Obfuscate license key to allow it to be printed out.""" - if not isinstance(license_key, six.string_types): + if not isinstance(license_key, str): # For non-string values passed in such as None, return the original. return license_key elif len(license_key) == 40: diff --git a/newrelic/common/object_names.py b/newrelic/common/object_names.py index e37b03315e..4859cf7fd8 100644 --- a/newrelic/common/object_names.py +++ b/newrelic/common/object_names.py @@ -16,21 +16,13 @@ """ +import builtins import sys import types import inspect import functools -from newrelic.packages import six -if six.PY2: - import exceptions - _exceptions_module = exceptions -elif six.PY3: - import builtins - _exceptions_module = builtins -else: - _exceptions_module = None # Object model terminology for quick reference. # @@ -120,116 +112,7 @@ def _module_name(object): return mname -def _object_context_py2(object): - - cname = None - fname = None - - if inspect.isclass(object) or isinstance(object, type): - # Old and new style class types. - - cname = object.__name__ - - elif inspect.ismethod(object): - # Bound and unbound class methods. In the case of an - # unbound method the im_self attribute will be None. The - # rules around whether im_self is an instance or a class - # type are strange so need to cope with both. - - if object.im_self is not None: - cname = getattr(object.im_self, '__name__', None) - if cname is None: - cname = getattr(object.im_self.__class__, '__name__') - - else: - cname = object.im_class.__name__ - - fname = object.__name__ - - elif inspect.isfunction(object): - # Normal functions and static methods. For a static we - # method don't know of any way of being able to work out - # the name of the class the static method is against. - - fname = object.__name__ - - elif inspect.isbuiltin(object): - # Builtin function. Can also be be bound to class to - # create a method. Uses __self__ instead of im_self. The - # rules around whether __self__ is an instance or a class - # type are strange so need to cope with both. - - if object.__self__ is not None: - cname = getattr(object.__self__, '__name__', None) - if cname is None: - cname = getattr(object.__self__.__class__, '__name__') - - fname = object.__name__ - - elif isinstance(object, types.InstanceType): - # Instances of old style classes. Instances of a class - # don't normally have __name__. Where the object has a - # __name__, assume it is likely going to be a decorator - # implemented as a class and don't use the class name - # else it mucks things up. - - fname = getattr(object, '__name__', None) - - if fname is None: - cname = object.__class__.__name__ - - elif hasattr(object, '__class__'): - # Instances of new style classes. Instances of a class - # don't normally have __name__. Where the object has a - # __name__, assume it is likely going to be a decorator - # implemented as a class and don't use the class name - # else it mucks things up. The exception to this is when - # it is a descriptor and has __objclass__, in which case - # the class name from __objclass__ is used. - - fname = getattr(object, '__name__', None) - - if fname is not None: - if hasattr(object, '__objclass__'): - cname = object.__objclass__.__name__ - elif not hasattr(object, '__get__'): - cname = object.__class__.__name__ - else: - cname = object.__class__.__name__ - - # Calculate the qualified path from the class name and the - # function name. - - path = '' - - if cname: - path = cname - - if fname: - if path: - path += '.' - path += fname - - # Now calculate the name of the module object is defined in. - - owner = None - - if inspect.ismethod(object): - if object.__self__ is not None: - cname = getattr(object.__self__, '__name__', None) - if cname is None: - owner = object.__self__.__class__ # bound method - else: - owner = object.__self__ # class method - - else: - owner = getattr(object, 'im_class', None) # unbound method - - mname = _module_name(owner or object) - - return (mname, path) - -def _object_context_py3(object): +def _object_context(object): if inspect.ismethod(object): @@ -297,11 +180,11 @@ def object_context(target): details = getattr(target, '_nr_object_path', None) - # Disallow cache lookup for python 3 methods. In the case where the method + # Disallow cache lookup for methods. In the case where the method # is defined on a parent class, the name of the parent class is incorrectly # returned. Avoid this by recalculating the details each time. - if details and not _is_py3_method(target): + if details and not inspect.ismethod(target): return details # Check whether the object is actually one of our own @@ -319,7 +202,7 @@ def object_context(target): if source: details = getattr(source, '_nr_object_path', None) - if details and not _is_py3_method(source): + if details and not inspect.ismethod(source): return details else: @@ -327,11 +210,7 @@ def object_context(target): # If it wasn't cached we generate the name details and then # attempt to cache them against the object. - - if six.PY3: - details = _object_context_py3(source) - else: - details = _object_context_py2(source) + details = _object_context(source) try: # If the original target is not the same as the source we @@ -395,7 +274,7 @@ def expand_builtin_exception_name(name): # Otherwise, return it unchanged. try: - exception = getattr(_exceptions_module, name) + exception = getattr(builtins, name) except AttributeError: pass else: @@ -404,9 +283,6 @@ def expand_builtin_exception_name(name): return name -def _is_py3_method(target): - return six.PY3 and inspect.ismethod(target) - def parse_exc_info(exc_info): """Parse exc_info and return commonly used strings.""" _, value, _ = exc_info @@ -423,7 +299,7 @@ def parse_exc_info(exc_info): # Favor unicode in exception messages. - message = six.text_type(value) + message = str(value) except Exception: try: diff --git a/newrelic/common/package_version_utils.py b/newrelic/common/package_version_utils.py index 5081f1bd07..9ab213ddd5 100644 --- a/newrelic/common/package_version_utils.py +++ b/newrelic/common/package_version_utils.py @@ -15,42 +15,7 @@ import sys import warnings -try: - from functools import cache as _cache_package_versions -except ImportError: - from functools import wraps - from threading import Lock - - _package_version_cache = {} - _package_version_cache_lock = Lock() - - def _cache_package_versions(wrapped): - """ - Threadsafe implementation of caching for _get_package_version. - - Python 2.7 does not have the @functools.cache decorator, and - must be reimplemented with support for clearing the cache. - """ - - @wraps(wrapped) - def _wrapper(name): - if name in _package_version_cache: - return _package_version_cache[name] - - with _package_version_cache_lock: - if name in _package_version_cache: - return _package_version_cache[name] - - version = _package_version_cache[name] = wrapped(name) - return version - - def cache_clear(): - """Cache clear function to mimic @functools.cache""" - with _package_version_cache_lock: - _package_version_cache.clear() - - _wrapper.cache_clear = cache_clear - return _wrapper +from functools import lru_cache # Need to account for 4 possible variations of version declaration specified in (rejected) PEP 396 @@ -106,7 +71,7 @@ def int_or_str(value): return version -@_cache_package_versions +@lru_cache() def _get_package_version(name): module = sys.modules.get(name, None) version = None diff --git a/newrelic/common/signature.py b/newrelic/common/signature.py index 3fe516bdc2..68c9c6253a 100644 --- a/newrelic/common/signature.py +++ b/newrelic/common/signature.py @@ -12,20 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from newrelic.packages import six +from inspect import Signature -if six.PY3: - from inspect import Signature - def bind_args(func, args, kwargs): - """Bind arguments and apply defaults to missing arguments for a callable.""" - bound_args = Signature.from_callable(func).bind(*args, **kwargs) - bound_args.apply_defaults() - return bound_args.arguments - -else: - from inspect import getcallargs - - def bind_args(func, args, kwargs): - """Bind arguments and apply defaults to missing arguments for a callable.""" - return getcallargs(func, *args, **kwargs) +def bind_args(func, args, kwargs): + """Bind arguments and apply defaults to missing arguments for a callable.""" + bound_args = Signature.from_callable(func).bind(*args, **kwargs) + bound_args.apply_defaults() + return bound_args.arguments diff --git a/newrelic/common/system_info.py b/newrelic/common/system_info.py index 58a7118592..30f6b2a4e6 100644 --- a/newrelic/common/system_info.py +++ b/newrelic/common/system_info.py @@ -17,7 +17,6 @@ """ -import logging import multiprocessing import os import re @@ -25,36 +24,15 @@ import subprocess import sys import threading +from subprocess import check_output as _execute_program from newrelic.common.utilization import CommonUtilization -try: - from subprocess import check_output as _execute_program -except ImportError: - - def _execute_program(*popenargs, **kwargs): - # Replicates check_output() implementation from Python 2.7+. - # Should only be used for Python 2.6. - - if "stdout" in kwargs: - raise ValueError("stdout argument not allowed, it will be overridden.") - process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) # nosec - output, unused_err = process.communicate() - retcode = process.poll() - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - raise subprocess.CalledProcessError(retcode, cmd, output=output) - return output - - try: import resource except ImportError: pass -_logger = logging.getLogger(__name__) LOCALHOST_EQUIVALENTS = set( [ diff --git a/newrelic/config.py b/newrelic/config.py index 0a4dd68f5a..df45894d9a 100644 --- a/newrelic/config.py +++ b/newrelic/config.py @@ -12,17 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import configparser import fnmatch import logging import os import sys import traceback -try: - import ConfigParser -except ImportError: - import configparser as ConfigParser - import newrelic.api.application import newrelic.api.background_task import newrelic.api.database_trace @@ -50,7 +46,6 @@ default_host, fetch_config_setting, ) -from newrelic.packages import six __all__ = ["initialize", "filter_app_factory"] @@ -93,7 +88,7 @@ # modules to look up customised settings defined in the loaded # configuration file. -_config_object = ConfigParser.RawConfigParser() +_config_object = configparser.RawConfigParser() # Cache of the parsed global settings found in the configuration # file. We cache these so can dump them out to the log file once @@ -105,7 +100,7 @@ def _reset_config_parser(): global _config_object global _cache_object - _config_object = ConfigParser.RawConfigParser() + _config_object = configparser.RawConfigParser() _cache_object = [] @@ -304,10 +299,10 @@ def _process_setting(section, option, getter, mapper): _cache_object.append((option, value)) - except ConfigParser.NoSectionError: + except configparser.NoSectionError: pass - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: @@ -1192,7 +1187,7 @@ def _process_module_configuration(): try: enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -1284,7 +1279,7 @@ def _process_wsgi_application_configuration(): try: enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -1333,7 +1328,7 @@ def _process_background_task_configuration(): try: enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -1392,7 +1387,7 @@ def _process_database_trace_configuration(): try: enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -1442,7 +1437,7 @@ def _process_external_trace_configuration(): try: enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -1503,7 +1498,7 @@ def _process_function_trace_configuration(): try: enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -1571,7 +1566,7 @@ def _process_generator_trace_configuration(): try: enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -1627,7 +1622,7 @@ def _process_profile_trace_configuration(): try: enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -1686,7 +1681,7 @@ def _process_memcache_trace_configuration(): try: enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -1737,7 +1732,7 @@ def _process_transaction_name_configuration(): try: enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -1796,7 +1791,7 @@ def _process_error_trace_configuration(): try: enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -1846,7 +1841,7 @@ def _process_data_source_configuration(): try: enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -1957,7 +1952,7 @@ def _process_function_profile_configuration(): try: enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -2003,7 +1998,7 @@ def _process_module_definition(target, module, function="instrument"): section = "import-hook:%s" % target if _config_object.has_section(section): enabled = _config_object.getboolean(section, "enabled") - except ConfigParser.NoOptionError: + except configparser.NoOptionError: pass except Exception: _raise_configuration_error(section) @@ -3214,20 +3209,11 @@ def _process_module_builtin_defaults(): _process_module_definition("genshi.template.base", "newrelic.hooks.template_genshi") - if six.PY2: - _process_module_definition("httplib", "newrelic.hooks.external_httplib") - else: - _process_module_definition("http.client", "newrelic.hooks.external_httplib") + _process_module_definition("http.client", "newrelic.hooks.external_httplib") _process_module_definition("httplib2", "newrelic.hooks.external_httplib2") - if six.PY2: - _process_module_definition("urllib", "newrelic.hooks.external_urllib") - else: - _process_module_definition("urllib.request", "newrelic.hooks.external_urllib") - - if six.PY2: - _process_module_definition("urllib2", "newrelic.hooks.external_urllib2") + _process_module_definition("urllib.request", "newrelic.hooks.external_urllib") _process_module_definition( "urllib3.connectionpool", diff --git a/newrelic/console.py b/newrelic/console.py index 31b664b55a..3559b07735 100644 --- a/newrelic/console.py +++ b/newrelic/console.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import atexit +import builtins import cmd import code +import configparser import functools import glob import inspect @@ -29,47 +29,8 @@ import time import traceback -try: - import ConfigParser -except ImportError: - import configparser as ConfigParser - -try: - import __builtin__ -except ImportError: - import builtins as __builtin__ - - -def _argspec_py2(func): - return inspect.getargspec(func) - - -def _argspec_py3(func): - a = inspect.getfullargspec(func) - return (a.args, a.varargs, a.varkw, a.defaults) - - -if hasattr(inspect, "getfullargspec"): - _argspec = _argspec_py3 -else: - _argspec = _argspec_py2 - -try: - from collections import OrderedDict - from inspect import signature - - def doc_signature(func): - sig = signature(func) - sig._parameters = OrderedDict(list(sig._parameters.items())[1:]) - return str(sig) - - -except ImportError: - from inspect import formatargspec - - def doc_signature(func): - args, varargs, keywords, defaults = _argspec(func) - return formatargspec(args[1:], varargs, keywords, defaults) +from collections import OrderedDict +from inspect import signature from newrelic.common.object_wrapper import ObjectProxy @@ -77,11 +38,18 @@ def doc_signature(func): from newrelic.core.config import flatten_settings, global_settings from newrelic.core.trace_cache import trace_cache + +def doc_signature(func): + sig = signature(func) + sig._parameters = OrderedDict(list(sig._parameters.items())[1:]) + return str(sig) + + _trace_cache = trace_cache() def shell_command(wrapped): - args, varargs, keywords, defaults = _argspec(wrapped) + args = inspect.getfullargspec(wrapped).args parser = optparse.OptionParser() for name in args[1:]: @@ -156,8 +124,8 @@ def __call__(self, code=None): pass raise SystemExit(code) - __builtin__.quit = Quitter("quit") - __builtin__.exit = Quitter("exit") + builtins.quit = Quitter("quit") + builtins.exit = Quitter("exit") class OutputWrapper(ObjectProxy): @@ -540,7 +508,7 @@ def __init__(self, config_file, stdin=None, stdout=None, log=None): cmd.Cmd.__init__(self, stdin=stdin, stdout=stdout) self.__config_file = config_file - self.__config_object = ConfigParser.RawConfigParser() + self.__config_object = configparser.RawConfigParser() self.__log_object = log if not self.__config_object.read([config_file]): diff --git a/newrelic/core/agent.py b/newrelic/core/agent.py index 31cef43e89..67d5a4140a 100644 --- a/newrelic/core/agent.py +++ b/newrelic/core/agent.py @@ -17,8 +17,6 @@ """ -from __future__ import print_function - import atexit import logging import os @@ -32,7 +30,6 @@ import newrelic import newrelic.core.application import newrelic.core.config -import newrelic.packages.six as six from newrelic.common.log_file import initialize_logging from newrelic.core.thread_utilization import thread_utilization_data_source from newrelic.samplers.cpu_usage import cpu_usage_data_source @@ -436,7 +433,7 @@ def register_data_source(self, source, application=None, name=None, settings=Non if application is None: # Bind to any applications that already exist. - for application in list(six.itervalues(self._applications)): + for application in list(self._applications.values()): application.register_data_source(source, name, settings, **properties) else: @@ -619,7 +616,7 @@ def _harvest_flexible(self, shutdown=False): self._flexible_harvest_count += 1 self._last_flexible_harvest = time.time() - for application in list(six.itervalues(self._applications)): + for application in list(self._applications.values()): try: application.harvest(shutdown=False, flexible=True) except Exception: @@ -643,7 +640,7 @@ def _harvest_default(self, shutdown=False): self._default_harvest_count += 1 self._last_default_harvest = time.time() - for application in list(six.itervalues(self._applications)): + for application in list(self._applications.values()): try: application.harvest(shutdown, flexible=False) except Exception: diff --git a/newrelic/core/application.py b/newrelic/core/application.py index 6c99026031..3fc4f0f431 100644 --- a/newrelic/core/application.py +++ b/newrelic/core/application.py @@ -16,8 +16,6 @@ """ -from __future__ import print_function - import logging import os import sys @@ -50,7 +48,6 @@ NetworkInterfaceException, RetryDataForRequest, ) -from newrelic.packages import six from newrelic.samplers.data_sampler import DataSampler _logger = logging.getLogger(__name__) @@ -319,14 +316,7 @@ def connect_to_data_collector(self, activate_agent): # code run from this thread performs a deferred module import. if self._detect_deadlock: - if six.PY2: - import imp - - imp.acquire_lock() - self._deadlock_event.set() - imp.release_lock() - else: - self._deadlock_event.set() + self._deadlock_event.set() # Register the application with the data collector. Any errors # that occur will be dealt with by create_session(). The result diff --git a/newrelic/core/attribute.py b/newrelic/core/attribute.py index fb36808ecf..791392b261 100644 --- a/newrelic/core/attribute.py +++ b/newrelic/core/attribute.py @@ -24,7 +24,6 @@ DST_TRANSACTION_SEGMENTS, DST_TRANSACTION_TRACER, ) -from newrelic.packages import six _logger = logging.getLogger(__name__) @@ -230,7 +229,7 @@ def truncate(text, maxsize=MAX_ATTRIBUTE_LENGTH, encoding="utf-8", ending=None): # If text is unicode (Python 2 or 3), return unicode. # If text is a Python 2 string, return str. - if isinstance(text, six.text_type): + if isinstance(text, str): truncated = _truncate_unicode(text, maxsize, encoding) else: truncated = _truncate_bytes(text, maxsize) @@ -258,12 +257,12 @@ def check_name_length(name, max_length=MAX_ATTRIBUTE_LENGTH, encoding="utf-8"): def check_name_is_string(name): - if not isinstance(name, (six.text_type, six.binary_type)): + if not isinstance(name, (str, bytes)): raise NameIsNotStringException() def check_max_int(value, max_int=MAX_64_BIT_INT): - if isinstance(value, six.integer_types) and value > max_int: + if isinstance(value, int) and value > max_int: raise IntTooLargeException() @@ -314,7 +313,7 @@ def process_user_attribute(name, value, max_length=MAX_ATTRIBUTE_LENGTH, ending= else: # Check length after casting - valid_types_text = (six.text_type, six.binary_type) + valid_types_text = (str, bytes) if isinstance(value, valid_types_text): trunc_value = truncate(value, maxsize=max_length, ending=ending) @@ -340,7 +339,7 @@ def sanitize(value): Raise NullValueException, if value is None (null values SHOULD NOT be reported). """ - valid_value_types = (six.text_type, six.binary_type, bool, float, six.integer_types) + valid_value_types = (str, bytes, bool, float, int) # According to the agent spec, agents should not report None attribute values. # There is no difference between omitting the key and sending a None, so we can # reduce the payload size by not sending None values. diff --git a/newrelic/core/config.py b/newrelic/core/config.py index 9139b39344..5873b2374c 100644 --- a/newrelic/core/config.py +++ b/newrelic/core/config.py @@ -28,16 +28,12 @@ import os import re import threading +import urllib.parse as urlparse -import newrelic.packages.six as six from newrelic.common.object_names import parse_exc_info from newrelic.core.attribute import MAX_ATTRIBUTE_LENGTH from newrelic.core.attribute_filter import AttributeFilter -try: - import urlparse -except ImportError: - import urllib.parse as urlparse try: import grpc @@ -1091,14 +1087,14 @@ def global_settings_dump(settings_object=None, serializable=False): settings["proxy_host"] = uri if serializable: - for key, value in list(six.iteritems(settings)): - if not isinstance(key, six.string_types): + for key, value in list(settings.items()): + if not isinstance(key, str): del settings[key] if ( - not isinstance(value, six.string_types) + not isinstance(value, str) and not isinstance(value, float) - and not isinstance(value, six.integer_types) + and not isinstance(value, int) ): settings[key] = repr(value) diff --git a/newrelic/core/data_collector.py b/newrelic/core/data_collector.py index 2691396641..6012a57cdd 100644 --- a/newrelic/core/data_collector.py +++ b/newrelic/core/data_collector.py @@ -16,8 +16,6 @@ """ -from __future__ import print_function - import logging from newrelic.common.agent_http import ( diff --git a/newrelic/core/database_utils.py b/newrelic/core/database_utils.py index 774fb93490..28bb7f7295 100644 --- a/newrelic/core/database_utils.py +++ b/newrelic/core/database_utils.py @@ -21,7 +21,6 @@ import re import weakref -import newrelic.packages.six as six from newrelic.core.internal_metrics import internal_metric from newrelic.core.config import global_settings @@ -822,7 +821,7 @@ def __init__(self, sql, database=None): self._normalized = None self._identifier = None - if isinstance(sql, six.binary_type): + if isinstance(sql, bytes): try: sql = sql.decode('utf-8') except UnicodeError as e: diff --git a/newrelic/core/external_node.py b/newrelic/core/external_node.py index 20e07e9a5f..7554fbb3b9 100644 --- a/newrelic/core/external_node.py +++ b/newrelic/core/external_node.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import urlparse -except ImportError: - import urllib.parse as urlparse +import urllib.parse as urlparse from collections import namedtuple diff --git a/newrelic/core/function_node.py b/newrelic/core/function_node.py index 6acf7c81e0..1dfbc3c0ef 100644 --- a/newrelic/core/function_node.py +++ b/newrelic/core/function_node.py @@ -19,7 +19,6 @@ from newrelic.core.node_mixin import GenericNodeMixin from newrelic.core.metric import TimeMetric -from newrelic.packages import six _FunctionNode = namedtuple('_FunctionNode', ['group', 'name', 'children', 'start_time', 'end_time', @@ -60,7 +59,7 @@ def time_metrics(self, stats, root, parent): # own rollup categories. if self.rollup: - if isinstance(self.rollup, six.string_types): + if isinstance(self.rollup, str): rollups = [self.rollup] else: rollups = self.rollup diff --git a/newrelic/core/profile_sessions.py b/newrelic/core/profile_sessions.py index 663e90fe4e..299d03ddc9 100644 --- a/newrelic/core/profile_sessions.py +++ b/newrelic/core/profile_sessions.py @@ -19,21 +19,17 @@ import time import zlib from collections import defaultdict, deque +from sys import intern import newrelic -import newrelic.packages.six as six from newrelic.common.encoding_utils import json_encode from newrelic.core.config import global_settings from newrelic.core.trace_cache import trace_cache -try: - from sys import intern -except ImportError: - pass _logger = logging.getLogger(__name__) -AGENT_PACKAGE_DIRECTORY = os.path.dirname(newrelic.__file__) + "/" +AGENT_PACKAGE_DIRECTORY = os.path.dirname(newrelic.__file__) + os.sep class SessionState(object): @@ -393,7 +389,7 @@ def profile_data(self): flat_tree = {} thread_count = 0 - for category, bucket in six.iteritems(self.call_buckets): + for category, bucket in self.call_buckets.items(): # Only flatten buckets that have data in them. No need to send # empty buckets. @@ -414,10 +410,7 @@ def profile_data(self): level = settings.agent_limits.data_compression_level level = level or zlib.Z_DEFAULT_COMPRESSION - encoded_tree = base64.standard_b64encode(zlib.compress(six.b(json_call_tree), level)) - - if six.PY3: - encoded_tree = encoded_tree.decode("Latin-1") + encoded_tree = base64.standard_b64encode(zlib.compress(json_call_tree.encode("latin-1"), level)).decode("Latin-1") profile = [ [ diff --git a/newrelic/core/stats_engine.py b/newrelic/core/stats_engine.py index 42a785d4ac..506f9a15c4 100644 --- a/newrelic/core/stats_engine.py +++ b/newrelic/core/stats_engine.py @@ -31,7 +31,6 @@ import zlib from heapq import heapify, heapreplace -import newrelic.packages.six as six from newrelic.api.settings import STRIP_EXCEPTION_MESSAGE from newrelic.api.time_trace import get_linking_metadata from newrelic.common.encoding_utils import json_encode @@ -232,7 +231,7 @@ def metrics(self): """ - return six.iteritems(self.__stats_table) + return self.__stats_table.items() def reset_metric_stats(self): """Resets the accumulated statistics back to initial state for @@ -301,7 +300,7 @@ def metrics(self): stats for the metric. """ - return six.iteritems(self.__stats_table) + return self.__stats_table.items() def metrics_count(self): """Returns a count of the number of unique metrics currently @@ -859,7 +858,7 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, ) if error_group_name_raw: _, error_group_name = process_user_attribute("error.group.name", error_group_name_raw) - if error_group_name is None or not isinstance(error_group_name, six.string_types): + if error_group_name is None or not isinstance(error_group_name, str): raise ValueError( "Invalid attribute value for error.group.name. Expected string, got: %s" % repr(error_group_name_raw) @@ -1252,7 +1251,7 @@ def record_log_event(self, message, level=None, timestamp=None, attributes=None, if message is not None: # Coerce message into a string type - if not isinstance(message, six.string_types): + if not isinstance(message, str): try: message = str(message) except Exception: @@ -1333,11 +1332,11 @@ def metric_data(self, normalizer=None): _logger.info( "Raw metric data for harvest of %r is %r.", self.__settings.app_name, - list(six.iteritems(self.__stats_table)), + list(self.__stats_table.items()), ) if normalizer is not None: - for key, value in six.iteritems(self.__stats_table): + for key, value in self.__stats_table.items(): normalized_name, ignored = normalizer(key[0]) if ignored: continue @@ -1355,10 +1354,10 @@ def metric_data(self, normalizer=None): _logger.info( "Normalized metric data for harvest of %r is %r.", self.__settings.app_name, - list(six.iteritems(normalized_stats)), + list(normalized_stats.items()), ) - for key, value in six.iteritems(normalized_stats): + for key, value in normalized_stats.items(): key = dict(name=key[0], scope=key[1]) result.append((key, value)) @@ -1457,7 +1456,7 @@ def slow_sql_data(self, connections): maximum = self.__settings.agent_limits.slow_sql_data - slow_sql_nodes = sorted(six.itervalues(self.__sql_stats_table), key=lambda x: x.max_call_time)[-maximum:] + slow_sql_nodes = sorted(self.__sql_stats_table.values(), key=lambda x: x.max_call_time)[-maximum:] result = [] @@ -1498,10 +1497,7 @@ def slow_sql_data(self, connections): level = self.__settings.agent_limits.data_compression_level level = level or zlib.Z_DEFAULT_COMPRESSION - params_data = base64.standard_b64encode(zlib.compress(six.b(json_data), level)) - - if six.PY3: - params_data = params_data.decode("Latin-1") + params_data = base64.standard_b64encode(zlib.compress(json_data.encode("latin-1"), level)).decode("Latin-1") # Limit the length of any SQL that is reported back. @@ -1612,12 +1608,9 @@ def transaction_trace_data(self, connections): level = self.__settings.agent_limits.data_compression_level level = level or zlib.Z_DEFAULT_COMPRESSION - zlib_data = zlib.compress(six.b(json_data), level) + zlib_data = zlib.compress(json_data.encode("latin-1"), level) - pack_data = base64.standard_b64encode(zlib_data) - - if six.PY3: - pack_data = pack_data.decode("Latin-1") + pack_data = base64.standard_b64encode(zlib_data).decode("Latin-1") root = transaction_trace.root @@ -1677,12 +1670,9 @@ def slow_transaction_data(self): level = self.__settings.agent_limits.data_compression_level level = level or zlib.Z_DEFAULT_COMPRESSION - zlib_data = zlib.compress(six.b(json_data), level) - - pack_data = base64.standard_b64encode(zlib_data) + zlib_data = zlib.compress(json_data.encode("latin-1"), level) - if six.PY3: - pack_data = pack_data.decode("Latin-1") + pack_data = base64.standard_b64encode(zlib_data).decode("Latin-1") root = transaction_trace.root @@ -1943,7 +1933,7 @@ def merge_metric_stats(self, snapshot): if not self.__settings: return - for key, other in six.iteritems(snapshot.__stats_table): + for key, other in snapshot.__stats_table.items(): stats = self.__stats_table.get(key) if not stats: self.__stats_table[key] = other @@ -2031,7 +2021,7 @@ def _merge_sql(self, snapshot): # the limit of how many to collect, only merge in if already # seen the specific SQL. - for key, slow_sql_stats in six.iteritems(snapshot.__sql_stats_table): + for key, slow_sql_stats in snapshot.__sql_stats_table.items(): stats = self.__sql_stats_table.get(key) if not stats: maximum = self.__settings.agent_limits.slow_sql_data diff --git a/newrelic/core/trace_cache.py b/newrelic/core/trace_cache.py index 5f0ddcd3da..bc271959f4 100644 --- a/newrelic/core/trace_cache.py +++ b/newrelic/core/trace_cache.py @@ -22,17 +22,14 @@ import threading import traceback import weakref + +from collections.abc import MutableMapping try: import thread except ImportError: import _thread as thread -try: - from collections.abc import MutableMapping -except ImportError: - from collections import MutableMapping - from newrelic.core.config import global_settings from newrelic.core.loop_node import LoopNode diff --git a/newrelic/hooks/component_piston.py b/newrelic/hooks/component_piston.py index 96204f404c..db9da94b7d 100644 --- a/newrelic/hooks/component_piston.py +++ b/newrelic/hooks/component_piston.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import newrelic.packages.six as six import newrelic.api.transaction import newrelic.api.function_trace @@ -69,7 +68,7 @@ def __getattr__(self, name): def __call__(self, *args, **kwargs): self._nr_wrapped(*args, **kwargs) handler = self.__instance.handler - for name in six.itervalues(self.__instance.callmap): + for name in self.__instance.callmap.values(): if hasattr(handler, name): setattr(handler, name, MethodWrapper( getattr(handler, name), priority=6)) diff --git a/newrelic/hooks/database_psycopg.py b/newrelic/hooks/database_psycopg.py index a392ff7e1a..474ffc9a98 100644 --- a/newrelic/hooks/database_psycopg.py +++ b/newrelic/hooks/database_psycopg.py @@ -15,6 +15,8 @@ import inspect import os +from urllib.parse import unquote, parse_qsl + from newrelic.api.database_trace import DatabaseTrace, register_database_client from newrelic.api.function_trace import FunctionTrace from newrelic.common.object_names import callable_name @@ -37,15 +39,6 @@ AsyncCursorWrapper as DBAPI2AsyncCursorWrapper, ) -try: - from urllib import unquote -except ImportError: - from urllib.parse import unquote -try: - from urlparse import parse_qsl -except ImportError: - from urllib.parse import parse_qsl - from newrelic.packages.urllib3 import util as ul3_util # These functions return True if a non-default connection or cursor class is diff --git a/newrelic/hooks/database_psycopg2.py b/newrelic/hooks/database_psycopg2.py index bbed13184c..8efb84cce7 100644 --- a/newrelic/hooks/database_psycopg2.py +++ b/newrelic/hooks/database_psycopg2.py @@ -15,6 +15,8 @@ import inspect import os +from urllib.parse import unquote, parse_qsl + from newrelic.api.database_trace import DatabaseTrace, register_database_client from newrelic.api.function_trace import FunctionTrace from newrelic.api.transaction import current_transaction @@ -29,15 +31,6 @@ from newrelic.hooks.database_dbapi2 import ConnectionWrapper as DBAPI2ConnectionWrapper from newrelic.hooks.database_dbapi2 import CursorWrapper as DBAPI2CursorWrapper -try: - from urllib import unquote -except ImportError: - from urllib.parse import unquote -try: - from urlparse import parse_qsl -except ImportError: - from urllib.parse import parse_qsl - from newrelic.packages.urllib3 import util as ul3_util diff --git a/newrelic/hooks/datastore_elasticsearch.py b/newrelic/hooks/datastore_elasticsearch.py index 2417aabfe5..b72e5c4c5e 100644 --- a/newrelic/hooks/datastore_elasticsearch.py +++ b/newrelic/hooks/datastore_elasticsearch.py @@ -16,7 +16,6 @@ from newrelic.api.transaction import current_transaction from newrelic.common.object_wrapper import function_wrapper, wrap_function_wrapper from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six # An index name can be a string, None or a sequence. In the case of None # an empty string or '*', it is the same as using '_all'. When a string @@ -30,7 +29,7 @@ def _index_name(index): if not index or index == "*": return "_all" - if not isinstance(index, six.string_types) or "," in index: + if not isinstance(index, str) or "," in index: return "other" return index diff --git a/newrelic/hooks/datastore_pyelasticsearch.py b/newrelic/hooks/datastore_pyelasticsearch.py index 63e33a9bb7..b8418e3470 100644 --- a/newrelic/hooks/datastore_pyelasticsearch.py +++ b/newrelic/hooks/datastore_pyelasticsearch.py @@ -15,7 +15,6 @@ from newrelic.api.datastore_trace import DatastoreTraceWrapper from newrelic.api.transaction import current_transaction from newrelic.common.object_wrapper import wrap_function_wrapper -from newrelic.packages import six # An index name can be a string, None or a sequence. In the case of None # an empty string or '*', it is the same as using '_all'. When a string @@ -27,7 +26,7 @@ def _index_name(index): if not index or index == "*": return "_all" - if not isinstance(index, six.string_types) or "," in index: + if not isinstance(index, str) or "," in index: return "other" return index diff --git a/newrelic/hooks/external_feedparser.py b/newrelic/hooks/external_feedparser.py index 277c872a37..d505b61d2b 100644 --- a/newrelic/hooks/external_feedparser.py +++ b/newrelic/hooks/external_feedparser.py @@ -15,7 +15,6 @@ import sys import types -import newrelic.packages.six as six import newrelic.api.transaction import newrelic.api.object_wrapper @@ -35,7 +34,7 @@ def __call__(self, url, *args, **kwargs): # The URL be a string or a file like object. Pass call # through if not a string. - if not isinstance(url, six.string_types): + if not isinstance(url, str): return self._nr_next_object(url, *args, **kwargs) # Only then wrap the call if it looks like a URL. To diff --git a/newrelic/hooks/external_httplib.py b/newrelic/hooks/external_httplib.py index ca8decb40c..e31aa8e20c 100644 --- a/newrelic/hooks/external_httplib.py +++ b/newrelic/hooks/external_httplib.py @@ -14,7 +14,6 @@ import functools -from newrelic.packages import six from newrelic.api.external_trace import ExternalTrace from newrelic.api.transaction import current_transaction @@ -119,13 +118,7 @@ def nr_header(header, *args, **kwargs): def instrument(module): - - if six.PY2: - library = 'httplib' - else: - library = 'http' - - wrap_function_wrapper(module, "HTTPConnection.endheaders", functools.partial(httplib_endheaders_wrapper, scheme='http', library=library)) - wrap_function_wrapper(module, "HTTPSConnection.endheaders", functools.partial(httplib_endheaders_wrapper, scheme='https', library=library)) + wrap_function_wrapper(module, "HTTPConnection.endheaders", functools.partial(httplib_endheaders_wrapper, scheme='http', library='http')) + wrap_function_wrapper(module, "HTTPSConnection.endheaders", functools.partial(httplib_endheaders_wrapper, scheme='https', library='http')) wrap_function_wrapper(module, "HTTPConnection.getresponse", httplib_getresponse_wrapper) wrap_function_wrapper(module, "HTTPConnection.putheader", httplib_putheader_wrapper) diff --git a/newrelic/hooks/external_urllib.py b/newrelic/hooks/external_urllib.py index e14477d0a0..c3ea4777f4 100644 --- a/newrelic/hooks/external_urllib.py +++ b/newrelic/hooks/external_urllib.py @@ -12,12 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import urlparse -except ImportError: - import urllib.parse as urlparse - -import newrelic.packages.six as six +import urllib.parse as urlparse from newrelic.api.external_trace import ExternalTraceWrapper from newrelic.api.transaction import current_transaction @@ -56,7 +51,7 @@ def bind_params_urlretrieve(url, *args, **kwargs): def bind_params_open(fullurl, *args, **kwargs): - if isinstance(fullurl, six.string_types): + if isinstance(fullurl, str): return fullurl else: return fullurl.get_full_url() diff --git a/newrelic/hooks/external_urllib2.py b/newrelic/hooks/external_urllib2.py deleted file mode 100644 index c0a2f6e7e0..0000000000 --- a/newrelic/hooks/external_urllib2.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -try: - import urlparse -except ImportError: - import urllib.parse as urlparse - -import newrelic.packages.six as six - -from newrelic.api.external_trace import ExternalTraceWrapper -from newrelic.api.transaction import current_transaction -from newrelic.common.object_wrapper import wrap_function_wrapper - -def _nr_wrapper_opener_director_open_(wrapped, instance, args, kwargs): - transaction = current_transaction() - - if transaction is None: - return wrapped(*args, **kwargs) - - def _bind_params(fullurl, *args, **kwargs): - if isinstance(fullurl, six.string_types): - return fullurl - else: - return fullurl.get_full_url() - - url = _bind_params(*args, **kwargs) - - details = urlparse.urlparse(url) - - if details.hostname is None: - return wrapped(*args, **kwargs) - - return ExternalTraceWrapper(wrapped, 'urllib2', url)(*args, **kwargs) - -def instrument(module): - - if hasattr(module, 'OpenerDirector'): - wrap_function_wrapper(module, 'OpenerDirector.open', - _nr_wrapper_opener_director_open_) diff --git a/newrelic/hooks/framework_django.py b/newrelic/hooks/framework_django.py index 91d6fec200..5f7c6d797a 100644 --- a/newrelic/hooks/framework_django.py +++ b/newrelic/hooks/framework_django.py @@ -42,13 +42,11 @@ ) from newrelic.config import extra_settings from newrelic.core.config import global_settings -from newrelic.packages import six -if six.PY3: - from newrelic.hooks.framework_django_py3 import ( - _nr_wrap_converted_middleware_async_, - _nr_wrapper_BaseHandler_get_response_async_, - ) +from newrelic.hooks.framework_django_py3 import ( + _nr_wrap_converted_middleware_async_, + _nr_wrapper_BaseHandler_get_response_async_, +) _logger = logging.getLogger(__name__) @@ -166,7 +164,7 @@ def browser_timing_insertion(response, transaction): # assign it back to the response object to avoid having multiple copies of the string in memory at the same time # as we progress through steps below. - result = insert_html_snippet(response.content, lambda: six.b(transaction.browser_timing_header())) + result = insert_html_snippet(response.content, lambda: transaction.browser_timing_header().encode("latin-1")) if result is not None: if transaction.settings.debug.log_autorum_middleware: @@ -436,7 +434,7 @@ def instrument_django_core_handlers_base(module): wrap_post_function(module, "BaseHandler.load_middleware", insert_and_wrap_middleware) - if six.PY3 and hasattr(module.BaseHandler, "get_response_async"): + if hasattr(module.BaseHandler, "get_response_async"): wrap_function_wrapper(module, "BaseHandler.get_response_async", _nr_wrapper_BaseHandler_get_response_async_) wrap_function_wrapper(module, "BaseHandler.get_response", _nr_wrapper_BaseHandler_get_response_) diff --git a/newrelic/hooks/framework_webpy.py b/newrelic/hooks/framework_webpy.py index 717610ac98..d89c979557 100644 --- a/newrelic/hooks/framework_webpy.py +++ b/newrelic/hooks/framework_webpy.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import newrelic.packages.six as six import newrelic.api.transaction import newrelic.api.function_trace @@ -27,7 +26,7 @@ def transaction_name_delegate(*args, **kwargs): transaction = newrelic.api.transaction.current_transaction() if transaction: - if isinstance(args[1], six.string_types): + if isinstance(args[1], str): f = args[1] else: f = callable_name(args[1]) diff --git a/newrelic/hooks/logger_logging.py b/newrelic/hooks/logger_logging.py index 7b320cd911..5939f4d87f 100644 --- a/newrelic/hooks/logger_logging.py +++ b/newrelic/hooks/logger_logging.py @@ -12,17 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from urllib.parse import quote + from newrelic.api.application import application_instance from newrelic.api.time_trace import get_linking_metadata from newrelic.api.transaction import current_transaction, record_log_event from newrelic.common.object_wrapper import function_wrapper, wrap_function_wrapper from newrelic.core.config import global_settings -try: - from urllib import quote -except ImportError: - from urllib.parse import quote - IGNORED_LOG_RECORD_KEYS = set(["message", "msg"]) diff --git a/newrelic/hooks/middleware_flask_compress.py b/newrelic/hooks/middleware_flask_compress.py index 078cc3d989..f40016ab84 100644 --- a/newrelic/hooks/middleware_flask_compress.py +++ b/newrelic/hooks/middleware_flask_compress.py @@ -18,7 +18,6 @@ from newrelic.api.transaction import current_transaction from newrelic.common.object_wrapper import wrap_function_wrapper from newrelic.config import extra_settings -from newrelic.packages import six _logger = logging.getLogger(__name__) @@ -137,7 +136,7 @@ def _params(response, *args, **kwargs): # multiple copies of the string in memory at the same time # as we progress through steps below. - result = insert_html_snippet(response.get_data(), lambda: six.b(transaction.browser_timing_header())) + result = insert_html_snippet(response.get_data(), lambda: transaction.browser_timing_header().encode("latin-1")) if result is not None: if transaction.settings.debug.log_autorum_middleware: diff --git a/newrelic/network/addresses.py b/newrelic/network/addresses.py index d26281e70c..662fbc555c 100644 --- a/newrelic/network/addresses.py +++ b/newrelic/network/addresses.py @@ -16,10 +16,7 @@ """ -try: - import urlparse -except ImportError: - import urllib.parse as urlparse +import urllib.parse as urlparse def proxy_details(proxy_scheme, proxy_host, proxy_port, proxy_user, diff --git a/newrelic/newrelic.ini b/newrelic/newrelic.ini index d06d8a2926..967a448c47 100644 --- a/newrelic/newrelic.ini +++ b/newrelic/newrelic.ini @@ -8,7 +8,7 @@ # # The configuration file follows a structure similar to what you would # find for Microsoft Windows INI files. For further information on the -# configuration file format see the Python ConfigParser documentation at: +# configuration file format see the Python configparser documentation at: # # https://docs.python.org/library/configparser.html # diff --git a/newrelic/packages/six.py b/newrelic/packages/six.py deleted file mode 100644 index 4e15675d8b..0000000000 --- a/newrelic/packages/six.py +++ /dev/null @@ -1,998 +0,0 @@ -# Copyright (c) 2010-2020 Benjamin Peterson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -"""Utilities for writing code that runs on Python 2 and 3""" - -from __future__ import absolute_import - -import functools -import itertools -import operator -import sys -import types - -__author__ = "Benjamin Peterson " -__version__ = "1.16.0" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -PY34 = sys.version_info[0:2] >= (3, 4) - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - del X - -if PY34: - from importlib.util import spec_from_loader -else: - spec_from_loader = None - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - try: - # This is a bit ugly, but it avoids running this again by - # removing this descriptor. - delattr(obj.__class__, self.name) - except AttributeError: - pass - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def find_spec(self, fullname, path, target=None): - if fullname in self.known_modules: - return spec_from_loader(fullname, self) - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - - def create_module(self, spec): - return self.load_module(spec.name) - - def exec_module(self, module): - pass - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("intern", "__builtin__", "sys"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), - MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), - MovedAttribute("getoutput", "commands", "subprocess"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), -] -# Add windows specific modules. -if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] - -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), - MovedAttribute("splittag", "urllib", "urllib.parse"), - MovedAttribute("splituser", "urllib", "urllib.parse"), - MovedAttribute("splitvalue", "urllib", "urllib.parse"), - MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), - MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), - MovedAttribute("uses_params", "urlparse", "urllib.parse"), - MovedAttribute("uses_query", "urlparse", "urllib.parse"), - MovedAttribute("uses_relative", "urlparse", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), - MovedAttribute("parse_http_list", "urllib2", "urllib.request"), - MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - def create_unbound_method(func, cls): - return func - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - def create_unbound_method(func, cls): - return types.MethodType(func, None, cls) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) - - viewkeys = operator.methodcaller("keys") - - viewvalues = operator.methodcaller("values") - - viewitems = operator.methodcaller("items") -else: - def iterkeys(d, **kw): - return d.iterkeys(**kw) - - def itervalues(d, **kw): - return d.itervalues(**kw) - - def iteritems(d, **kw): - return d.iteritems(**kw) - - def iterlists(d, **kw): - return d.iterlists(**kw) - - viewkeys = operator.methodcaller("viewkeys") - - viewvalues = operator.methodcaller("viewvalues") - - viewitems = operator.methodcaller("viewitems") - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - - def u(s): - return s - unichr = chr - import struct - int2byte = struct.Struct(">B").pack - del struct - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO - del io - _assertCountEqual = "assertCountEqual" - if sys.version_info[1] <= 1: - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - _assertNotRegex = "assertNotRegexpMatches" - else: - _assertRaisesRegex = "assertRaisesRegex" - _assertRegex = "assertRegex" - _assertNotRegex = "assertNotRegex" -else: - def b(s): - return s - # Workaround for standalone backslash - - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - - def byte2int(bs): - return ord(bs[0]) - - def indexbytes(buf, i): - return ord(buf[i]) - iterbytes = functools.partial(itertools.imap, ord) - import StringIO - StringIO = BytesIO = StringIO.StringIO - _assertCountEqual = "assertItemsEqual" - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - _assertNotRegex = "assertNotRegexpMatches" -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -def assertCountEqual(self, *args, **kwargs): - return getattr(self, _assertCountEqual)(*args, **kwargs) - - -def assertRaisesRegex(self, *args, **kwargs): - return getattr(self, _assertRaisesRegex)(*args, **kwargs) - - -def assertRegex(self, *args, **kwargs): - return getattr(self, _assertRegex)(*args, **kwargs) - - -def assertNotRegex(self, *args, **kwargs): - return getattr(self, _assertNotRegex)(*args, **kwargs) - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - def reraise(tp, value, tb=None): - try: - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - finally: - value = None - tb = None - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - exec_("""def reraise(tp, value, tb=None): - try: - raise tp, value, tb - finally: - tb = None -""") - - -if sys.version_info[:2] > (3,): - exec_("""def raise_from(value, from_value): - try: - raise value from from_value - finally: - value = None -""") -else: - def raise_from(value, from_value): - raise value - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) -if sys.version_info[:2] < (3, 3): - _print = print_ - - def print_(*args, **kwargs): - fp = kwargs.get("file", sys.stdout) - flush = kwargs.pop("flush", False) - _print(*args, **kwargs) - if flush and fp is not None: - fp.flush() - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - # This does exactly the same what the :func:`py3:functools.update_wrapper` - # function does on Python versions after 3.2. It sets the ``__wrapped__`` - # attribute on ``wrapper`` object and it doesn't raise an error if any of - # the attributes mentioned in ``assigned`` and ``updated`` are missing on - # ``wrapped`` object. - def _update_wrapper(wrapper, wrapped, - assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - for attr in assigned: - try: - value = getattr(wrapped, attr) - except AttributeError: - continue - else: - setattr(wrapper, attr, value) - for attr in updated: - getattr(wrapper, attr).update(getattr(wrapped, attr, {})) - wrapper.__wrapped__ = wrapped - return wrapper - _update_wrapper.__doc__ = functools.update_wrapper.__doc__ - - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - return functools.partial(_update_wrapper, wrapped=wrapped, - assigned=assigned, updated=updated) - wraps.__doc__ = functools.wraps.__doc__ - -else: - wraps = functools.wraps - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(type): - - def __new__(cls, name, this_bases, d): - if sys.version_info[:2] >= (3, 7): - # This version introduced PEP 560 that requires a bit - # of extra care (we mimic what is done by __build_class__). - resolved_bases = types.resolve_bases(bases) - if resolved_bases is not bases: - d['__orig_bases__'] = bases - else: - resolved_bases = bases - return meta(name, resolved_bases, d) - - @classmethod - def __prepare__(cls, name, this_bases): - return meta.__prepare__(name, bases) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - if hasattr(cls, '__qualname__'): - orig_vars['__qualname__'] = cls.__qualname__ - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - - -def ensure_binary(s, encoding='utf-8', errors='strict'): - """Coerce **s** to six.binary_type. - - For Python 2: - - `unicode` -> encoded to `str` - - `str` -> `str` - - For Python 3: - - `str` -> encoded to `bytes` - - `bytes` -> `bytes` - """ - if isinstance(s, binary_type): - return s - if isinstance(s, text_type): - return s.encode(encoding, errors) - raise TypeError("not expecting type '%s'" % type(s)) - - -def ensure_str(s, encoding='utf-8', errors='strict'): - """Coerce *s* to `str`. - - For Python 2: - - `unicode` -> encoded to `str` - - `str` -> `str` - - For Python 3: - - `str` -> `str` - - `bytes` -> decoded to `str` - """ - # Optimization: Fast return for the common case. - if type(s) is str: - return s - if PY2 and isinstance(s, text_type): - return s.encode(encoding, errors) - elif PY3 and isinstance(s, binary_type): - return s.decode(encoding, errors) - elif not isinstance(s, (text_type, binary_type)): - raise TypeError("not expecting type '%s'" % type(s)) - return s - - -def ensure_text(s, encoding='utf-8', errors='strict'): - """Coerce *s* to six.text_type. - - For Python 2: - - `unicode` -> `unicode` - - `str` -> `unicode` - - For Python 3: - - `str` -> `str` - - `bytes` -> decoded to `str` - """ - if isinstance(s, binary_type): - return s.decode(encoding, errors) - elif isinstance(s, text_type): - return s - else: - raise TypeError("not expecting type '%s'" % type(s)) - - -def python_2_unicode_compatible(klass): - """ - A class decorator that defines __unicode__ and __str__ methods under Python 2. - Under Python 3 it does nothing. - - To support Python 2 and 3 with a single code base, define a __str__ method - returning text and apply this decorator to the class. - """ - if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) - klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') - return klass - - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) diff --git a/setup.py b/setup.py index 3a92d06a45..b5df81d18d 100644 --- a/setup.py +++ b/setup.py @@ -12,17 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import os import sys python_version = sys.version_info[:2] -assert python_version in ((2, 7),) or python_version >= ( - 3, - 7, -), "The New Relic Python agent only supports Python 2.7 and 3.7+." +assert python_version >= (3, 7), "The New Relic Python agent only supports Python 3.7+." with_setuptools = False @@ -118,7 +113,6 @@ def build_extension(self, ext): classifiers = [ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", @@ -151,7 +145,7 @@ def build_extension(self, ext): zip_safe=False, classifiers=classifiers, packages=packages, - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*", + python_requires=">=3.7", package_data={ "newrelic": ["newrelic.ini", "version.txt", "packages/urllib3/LICENSE.txt", "common/cacert.pem"], }, diff --git a/tests/agent_features/conftest.py b/tests/agent_features/conftest.py index b1d3354194..a8c6c42adb 100644 --- a/tests/agent_features/conftest.py +++ b/tests/agent_features/conftest.py @@ -19,8 +19,10 @@ from testing_support.fixtures import ( # noqa: F401; pylint: disable=W0611 newrelic_caplog as caplog, ) +from testing_support.fixture.event_loop import ( # noqa: F401; pylint: disable=W0611 + event_loop, +) -from newrelic.packages import six _default_settings = { "package_reporting.enabled": False, # Turn off package reporting for testing as it causes slow downs. @@ -37,22 +39,3 @@ collector_agent_registration = collector_agent_registration_fixture( app_name="Python Agent Test (agent_features)", default_settings=_default_settings ) - - -if six.PY2: - collect_ignore = [ - "test_async_context_propagation.py", - "test_coroutine_trace.py", - "test_coroutine_transaction.py", - "test_async_timing.py", - "test_event_loop_wait_time.py", - "test_asgi_transaction.py", - "test_asgi_browser.py", - "test_asgi_distributed_tracing.py", - "test_asgi_w3c_trace_context.py", - "test_ml_events.py", - ] -else: - from testing_support.fixture.event_loop import ( # noqa: F401; pylint: disable=W0611 - event_loop, - ) diff --git a/tests/agent_features/test_asgi_browser.py b/tests/agent_features/test_asgi_browser.py index 26c0d56702..b91be67cd0 100644 --- a/tests/agent_features/test_asgi_browser.py +++ b/tests/agent_features/test_asgi_browser.py @@ -33,7 +33,6 @@ get_browser_timing_header, ) from newrelic.common.encoding_utils import deobfuscate -from newrelic.packages import six _runtime_error_name = RuntimeError.__module__ + ":" + RuntimeError.__name__ @@ -71,7 +70,7 @@ def test_header_attributes(): assert settings.browser_key assert settings.browser_monitoring.loader_version assert settings.js_agent_loader - assert isinstance(settings.js_agent_file, six.string_types) + assert isinstance(settings.js_agent_file, str) assert settings.beacon assert settings.error_beacon @@ -109,8 +108,7 @@ def test_header_attributes(): obfuscation_key = settings.license_key[:13] - type_transaction_data = unicode if six.PY2 else str # noqa: F821, pylint: disable=E0602 - assert isinstance(data["transactionName"], type_transaction_data) + assert isinstance(data["transactionName"], str) txn_name = deobfuscate(data["transactionName"], obfuscation_key) diff --git a/tests/agent_features/test_attribute.py b/tests/agent_features/test_attribute.py index f4b9e896fe..168825173d 100644 --- a/tests/agent_features/test_attribute.py +++ b/tests/agent_features/test_attribute.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - import pytest import webtest from testing_support.fixtures import ( @@ -38,12 +36,7 @@ sanitize, truncate, ) -from newrelic.packages import six - -# Python 3 lacks longs -if sys.version_info >= (3, 0): - long = int try: from newrelic.core._thread_utilization import ThreadUtilization except ImportError: @@ -192,60 +185,57 @@ def test_display_host_custom(): def test_truncate_string(): s = "blahblah" result = truncate(s, maxsize=4) - assert isinstance(result, six.string_types) + assert isinstance(result, str) assert result == "blah" def test_truncate_bytes(): b = b"foobar" result = truncate(b, maxsize=3) - assert isinstance(result, six.binary_type) + assert isinstance(result, bytes) assert result == b"foo" def test_truncate_unicode_snowman(): # '\u2603' is 'SNOWMAN' - # decode("unicode-escape") is used to get Py2 unicode - u = "snow\u2603".decode("unicode-escape") if six.PY2 else "snow\u2603" + u = "snow\u2603" assert u.encode("utf-8") == b"snow\xe2\x98\x83" result = truncate(u, maxsize=5) - assert isinstance(result, six.text_type) + assert isinstance(result, str) assert result == "snow" def test_truncate_combining_characters(): # '\u0308' is 'COMBINING DIAERESIS' (AKA 'umlaut') - # decode("unicode-escape") is used to get Py2 unicode - u = "Zoe\u0308".decode("unicode-escape") if six.PY2 else "Zoe\u0308" + u = "Zoe\u0308" assert u.encode("utf-8") == b"Zoe\xcc\x88" # truncate will chop off 'COMBINING DIAERESIS', which leaves # 'LATIN SMALL LETTER E' by itself. result = truncate(u, maxsize=3) - assert isinstance(result, six.text_type) + assert isinstance(result, str) assert result == "Zoe" def test_truncate_empty_string(): s = "" result = truncate(s, maxsize=4) - assert isinstance(result, six.string_types) + assert isinstance(result, str) assert result == "" def test_truncate_empty_bytes(): b = b"" result = truncate(b, maxsize=3) - assert isinstance(result, six.binary_type) + assert isinstance(result, bytes) assert result == b"" def test_truncate_empty_unicode(): - # decode("unicode-escape") is used to get Py2 unicode - u = "".decode("unicode-escape") if six.PY2 else "" + u = "" result = truncate(u, maxsize=5) - assert isinstance(result, six.text_type) + assert isinstance(result, str) assert result == "" @@ -418,17 +408,17 @@ def test_capture_request_params_value_too_long(): # Types are only defined in the spec for agent attributes, not intrinsics. agent_attributes = { - "request.headers.accept": six.string_types, + "request.headers.accept": str, "request.headers.contentLength": int, - "request.headers.contentType": six.string_types, - "request.headers.host": six.string_types, - "request.headers.referer": six.string_types, - "request.headers.userAgent": six.string_types, - "request.method": six.string_types, - "request.parameters.test": six.string_types, + "request.headers.contentType": str, + "request.headers.host": str, + "request.headers.referer": str, + "request.headers.userAgent": str, + "request.method": str, + "request.parameters.test": str, "response.headers.contentLength": int, - "response.headers.contentType": six.string_types, - "response.status": six.string_types, + "response.headers.contentType": str, + "response.status": str, } @@ -474,11 +464,6 @@ def test_sanitize_int(): assert sanitize(9876) == 9876 -def test_sanitize_long(): - long_int = long(123456) - assert sanitize(long_int) == long_int - - def test_sanitize_dict(): d = {1: "foo"} assert sanitize(d) == "{1: 'foo'}" diff --git a/tests/agent_features/test_browser.py b/tests/agent_features/test_browser.py index af8d0bbe4d..29e26fbd8f 100644 --- a/tests/agent_features/test_browser.py +++ b/tests/agent_features/test_browser.py @@ -34,7 +34,6 @@ from newrelic.api.web_transaction import web_transaction from newrelic.api.wsgi_application import wsgi_application from newrelic.common.encoding_utils import deobfuscate -from newrelic.packages import six _runtime_error_name = RuntimeError.__module__ + ":" + RuntimeError.__name__ @@ -71,7 +70,7 @@ def test_header_attributes(): assert settings.browser_key assert settings.browser_monitoring.loader_version assert settings.js_agent_loader - assert isinstance(settings.js_agent_file, six.string_types) + assert isinstance(settings.js_agent_file, str) assert settings.beacon assert settings.error_beacon @@ -108,8 +107,7 @@ def test_header_attributes(): obfuscation_key = settings.license_key[:13] - type_transaction_data = unicode if six.PY2 else str # noqa: F821 - assert isinstance(data["transactionName"], type_transaction_data) + assert isinstance(data["transactionName"], str) txn_name = deobfuscate(data["transactionName"], obfuscation_key) diff --git a/tests/agent_features/test_browser_middleware.py b/tests/agent_features/test_browser_middleware.py index c8c0caa7d7..e08a87c29b 100644 --- a/tests/agent_features/test_browser_middleware.py +++ b/tests/agent_features/test_browser_middleware.py @@ -20,7 +20,6 @@ ) from newrelic.api.wsgi_application import wsgi_application -from newrelic.packages import six PAGE_CONTENTS = b"Hello World" @@ -116,10 +115,6 @@ def _app_iter_exc_2(environ, start_response): _target_applications = [ target_application_list, target_application_iter, - pytest.param( - target_application_str, - marks=pytest.mark.skipif(six.PY3, reason="PY3 webtest expects type(byte) " "so this test doesnt apply"), - ), target_application_list_exc_1, target_application_list_exc_2, target_application_iter_exc_1, diff --git a/tests/agent_features/test_code_level_metrics.py b/tests/agent_features/test_code_level_metrics.py index a7aeaa39a5..9dce616c95 100644 --- a/tests/agent_features/test_code_level_metrics.py +++ b/tests/agent_features/test_code_level_metrics.py @@ -35,7 +35,6 @@ from testing_support.fixtures import dt_enabled, override_application_settings from testing_support.validators.validate_span_events import validate_span_events -import newrelic.packages.six as six from newrelic.api.background_task import background_task from newrelic.api.function_trace import FunctionTrace @@ -46,7 +45,6 @@ CALLABLE_CLASS_NAMESPACE = ".".join((NAMESPACE, "ExerciseClassCallable")) TYPE_CONSTRUCTOR_NAMESPACE = ".".join((NAMESPACE, "ExerciseTypeConstructor")) TYPE_CONSTRUCTOR_CALLABLE_NAMESPACE = ".".join((NAMESPACE, "ExerciseTypeConstructorCallable")) -FUZZY_NAMESPACE = CLASS_NAMESPACE if six.PY3 else NAMESPACE if FILE_PATH.endswith(".pyc"): FILE_PATH = FILE_PATH[:-1] @@ -108,7 +106,7 @@ def _extract(obj): merge_dicts( { "code.function": "max", - "code.namespace": "builtins" if six.PY3 else "__builtin__", + "code.namespace": "builtins", }, BUILTIN_ATTRS, ), @@ -129,7 +127,7 @@ def _extract(obj): @pytest.mark.parametrize( "func,args,agents", - [pytest.param(*args, id=id_) for id_, args in six.iteritems(_TEST_BASIC_CALLABLES)], + [pytest.param(*args, id=id_) for id_, args in _TEST_BASIC_CALLABLES.items()], ) def test_code_level_metrics_basic_callables(func, args, agents, extract): @override_application_settings( @@ -167,7 +165,7 @@ def _test(): "code.filepath": FILE_PATH, "code.function": "exercise_static_method", "code.lineno": 25, - "code.namespace": FUZZY_NAMESPACE, + "code.namespace": CLASS_NAMESPACE, }, ), "class_method": ( @@ -206,7 +204,7 @@ def _test(): @pytest.mark.parametrize( "func,args,agents", - [pytest.param(*args, id=id_) for id_, args in six.iteritems(_TEST_METHODS)], + [pytest.param(*args, id=id_) for id_, args in _TEST_METHODS.items()], ) def test_code_level_metrics_methods(func, args, agents, extract): @override_application_settings( @@ -264,8 +262,7 @@ def _test(): "code.filepath": FILE_PATH, "code.function": "", "code.lineno": 61, - # Lambdas behave strangely in type constructors on Python 2 and use the class namespace. - "code.namespace": NAMESPACE if six.PY3 else TYPE_CONSTRUCTOR_NAMESPACE, + "code.namespace": NAMESPACE, }, ), "call_method": ( @@ -283,7 +280,7 @@ def _test(): @pytest.mark.parametrize( "func,args,agents", - [pytest.param(*args, id=id_) for id_, args in six.iteritems(_TEST_TYPE_CONSTRUCTOR_METHODS)], + [pytest.param(*args, id=id_) for id_, args in _TEST_TYPE_CONSTRUCTOR_METHODS.items()], ) def test_code_level_metrics_type_constructor_methods(func, args, agents, extract): @override_application_settings( @@ -352,7 +349,7 @@ def _test(): @pytest.mark.parametrize( "obj,agents", - [pytest.param(*args, id=id_) for id_, args in six.iteritems(_TEST_OBJECTS)], + [pytest.param(*args, id=id_) for id_, args in _TEST_OBJECTS.items()], ) def test_code_level_metrics_objects(obj, agents, extract): @override_application_settings( diff --git a/tests/agent_features/test_configuration.py b/tests/agent_features/test_configuration.py index f43b08495b..83082398a6 100644 --- a/tests/agent_features/test_configuration.py +++ b/tests/agent_features/test_configuration.py @@ -15,12 +15,9 @@ import collections import tempfile -import pytest +import urllib.parse as urlparse -try: - import urlparse -except ImportError: - import urllib.parse as urlparse +import pytest import logging diff --git a/tests/agent_features/test_dead_transactions.py b/tests/agent_features/test_dead_transactions.py index 60d03003ec..6eaf2d2be9 100644 --- a/tests/agent_features/test_dead_transactions.py +++ b/tests/agent_features/test_dead_transactions.py @@ -14,7 +14,6 @@ import gc import pytest -import newrelic.packages.six as six from newrelic.api.background_task import BackgroundTask from newrelic.api.application import application_instance @@ -43,11 +42,6 @@ def capture_errors(wrapped, instance, args, kwargs): @pytest.mark.parametrize('circular', (True, False)) @capture_errors def test_dead_transaction_ends(circular): - if circular and six.PY2: - pytest.skip("Circular references in py2 result in a memory leak. " - "There is no way to remove transactions from the weakref " - "cache in this case.") - transaction = BackgroundTask( application_instance(), "test_dead_transaction_ends") if circular: diff --git a/tests/agent_features/test_dimensional_metrics.py b/tests/agent_features/test_dimensional_metrics.py index f3510e7dd9..a4b5054797 100644 --- a/tests/agent_features/test_dimensional_metrics.py +++ b/tests/agent_features/test_dimensional_metrics.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from importlib import reload + import pytest from testing_support.fixtures import reset_core_stats_engine from testing_support.validators.validate_dimensional_metric_payload import ( @@ -33,21 +35,10 @@ ) from newrelic.common.metric_utils import create_metric_identity from newrelic.core.config import global_settings -from newrelic.packages import six - -try: - # python 2.x - reload -except NameError: - # python 3.x - from importlib import reload @pytest.fixture(scope="module", autouse=True, params=["protobuf", "json"]) def otlp_content_encoding(request): - if six.PY2 and request.param == "protobuf": - pytest.skip("OTLP protos are not compatible with Python 2.") - _settings = global_settings() prev = _settings.debug.otlp_content_encoding _settings.debug.otlp_content_encoding = request.param diff --git a/tests/agent_features/test_exception_messages.py b/tests/agent_features/test_exception_messages.py index e7089930fd..c9effea4ca 100644 --- a/tests/agent_features/test_exception_messages.py +++ b/tests/agent_features/test_exception_messages.py @@ -13,10 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest from testing_support.fixtures import ( reset_core_stats_engine, - set_default_encoding, validate_application_exception_message, validate_transaction_exception_message, ) @@ -24,134 +22,20 @@ from newrelic.api.application import application_instance as application from newrelic.api.background_task import background_task from newrelic.api.time_trace import notice_error -from newrelic.packages import six -# Turn off black formatting for this section of the code. -# While Python 2 has been EOL'd since 2020, New Relic still -# supports it and therefore these messages need to keep this -# specific formatting. -# fmt: off -UNICODE_MESSAGE = u'I💜🐍' -UNICODE_ENGLISH = u'I love python' + +UNICODE_MESSAGE = 'I💜🐍' +UNICODE_ENGLISH = 'I love python' BYTES_ENGLISH = b'I love python' BYTES_UTF8_ENCODED = b'I\xf0\x9f\x92\x9c\xf0\x9f\x90\x8d' -INCORRECTLY_DECODED_BYTES_PY2 = u'I\u00f0\u009f\u0092\u009c\u00f0\u009f\u0090\u008d' -INCORRECTLY_DECODED_BYTES_PY3 = u"b'I\\xf0\\x9f\\x92\\x9c\\xf0\\x9f\\x90\\x8d'" -# fmt: on -# =================== Exception messages during transaction ==================== - -# ---------------- Python 2 - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding("ascii") -@validate_transaction_exception_message(UNICODE_MESSAGE) -@background_task() -def test_py2_transaction_exception_message_unicode(): - """Assert unicode message when using non-ascii characters is preserved, - with sys default encoding""" - try: - raise ValueError(UNICODE_MESSAGE) - except ValueError: - notice_error() - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding("ascii") -@validate_transaction_exception_message(UNICODE_ENGLISH) -@background_task() -def test_py2_transaction_exception_message_unicode_english(): - """Assert unicode message when using ascii compatible characters preserved, - with sys default encoding""" - try: - raise ValueError(UNICODE_ENGLISH) - except ValueError: - notice_error() - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding("ascii") -@validate_transaction_exception_message(UNICODE_ENGLISH) -@background_task() -def test_py2_transaction_exception_message_bytes_english(): - """Assert byte string of ascii characters decodes sensibly""" - try: - raise ValueError(BYTES_ENGLISH) - except ValueError: - notice_error() - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding("ascii") -@validate_transaction_exception_message(INCORRECTLY_DECODED_BYTES_PY2) -@background_task() -def test_py2_transaction_exception_message_bytes_non_english(): - """Assert known situation where (explicitly) utf-8 encoded byte string gets - mangled when default sys encoding is ascii. THIS TEST ASSERTS THAT THE - MESSAGE IS WRONG. We do not expect it to work now, or in the future. - """ - try: - raise ValueError(BYTES_UTF8_ENCODED) - except ValueError: - notice_error() - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding("ascii") -@validate_transaction_exception_message(INCORRECTLY_DECODED_BYTES_PY2) -@background_task() -def test_py2_transaction_exception_message_bytes_implicit_encoding_non_english(): - """Assert known situation where (implicitly) utf-8 encoded byte string gets - mangled when default sys encoding is ascii. THIS TEST ASSERTS THAT THE - MESSAGE IS WRONG. We do not expect it to work now, or in the future. - """ - try: - # Bytes literal with non-ascii compatible characters only allowed in - # python 2 - - raise ValueError("I💜🐍") - except ValueError: - notice_error() - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding("utf-8") -@validate_transaction_exception_message(UNICODE_MESSAGE) -@background_task() -def test_py2_transaction_exception_message_unicode_utf8_encoding(): - """Assert unicode error message is preserved with sys non-default utf-8 - encoding - """ - try: - raise ValueError(UNICODE_MESSAGE) - except ValueError: - notice_error() - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding("utf-8") -@validate_transaction_exception_message(UNICODE_MESSAGE) -@background_task() -def test_py2_transaction_exception_message_bytes_utf8_encoding_non_english(): - """Assert utf-8 encoded byte produces correct exception message when sys - encoding is also utf-8. - """ - try: - # Bytes literal with non-ascii compatible characters only allowed in - # python 2 - - raise ValueError("I💜🐍") - except ValueError: - notice_error() - - -# ---------------- Python 3 +INCORRECTLY_DECODED_BYTES_PY2 = 'I\u00f0\u009f\u0092\u009c\u00f0\u009f\u0090\u008d' +INCORRECTLY_DECODED_BYTES_PY3 = "b'I\\xf0\\x9f\\x92\\x9c\\xf0\\x9f\\x90\\x8d'" +# =================== Exception messages during transaction ==================== -@pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @validate_transaction_exception_message(UNICODE_MESSAGE) @background_task() -def test_py3_transaction_exception_message_bytes_non_english_unicode(): +def test_transaction_exception_message_bytes_non_english_unicode(): """Assert (native) unicode exception message is preserved when when non-ascii compatible characters present""" try: @@ -160,10 +44,9 @@ def test_py3_transaction_exception_message_bytes_non_english_unicode(): notice_error() -@pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @validate_transaction_exception_message(UNICODE_ENGLISH) @background_task() -def test_py3_transaction_exception_message_unicode_english(): +def test_transaction_exception_message_unicode_english(): """Assert (native) unicode exception message is preserved, when characters are ascii-compatible""" try: @@ -172,10 +55,9 @@ def test_py3_transaction_exception_message_unicode_english(): notice_error() -@pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @validate_transaction_exception_message(INCORRECTLY_DECODED_BYTES_PY3) @background_task() -def test_py3_transaction_exception_message_bytes_non_english(): +def test_transaction_exception_message_bytes_non_english(): """An issue can occur if you cast from bytes to a string in python 3 (that is using str(), not using encode/decode methods). This is because all characters in bytes are literals, no implicit @@ -188,128 +70,11 @@ def test_py3_transaction_exception_message_bytes_non_english(): except ValueError: notice_error() - # =================== Exception messages outside transaction ==================== -# ---------------- Python 2 - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@reset_core_stats_engine() -@set_default_encoding("ascii") -@validate_application_exception_message(UNICODE_MESSAGE) -def test_py2_application_exception_message_unicode(): - """Assert unicode message when using non-ascii characters is preserved, - with sys default encoding""" - try: - raise ValueError(UNICODE_MESSAGE) - except ValueError: - app = application() - notice_error(application=app) - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@reset_core_stats_engine() -@set_default_encoding("ascii") -@validate_application_exception_message(UNICODE_ENGLISH) -def test_py2_application_exception_message_unicode_english(): - """Assert unicode message when using ascii compatible characters preserved, - with sys default encoding""" - try: - raise ValueError(UNICODE_ENGLISH) - except ValueError: - app = application() - notice_error(application=app) - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@reset_core_stats_engine() -@set_default_encoding("ascii") -@validate_application_exception_message(UNICODE_ENGLISH) -def test_py2_application_exception_message_bytes_english(): - """Assert byte string of ascii characters decodes sensibly""" - try: - raise ValueError(BYTES_ENGLISH) - except ValueError: - app = application() - notice_error(application=app) - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@reset_core_stats_engine() -@set_default_encoding("ascii") -@validate_application_exception_message(INCORRECTLY_DECODED_BYTES_PY2) -def test_py2_application_exception_message_bytes_non_english(): - """Assert known situation where (explicitly) utf-8 encoded byte string gets - mangled when default sys encoding is ascii. THIS TEST ASSERTS THAT THE - MESSAGE IS WRONG. We do not expect it to work now, or in the future. - """ - try: - raise ValueError(BYTES_UTF8_ENCODED) - except ValueError: - app = application() - notice_error(application=app) - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@reset_core_stats_engine() -@set_default_encoding("ascii") -@validate_application_exception_message(INCORRECTLY_DECODED_BYTES_PY2) -def test_py2_application_exception_message_bytes_implicit_encoding_non_english(): - """Assert known situation where (implicitly) utf-8 encoded byte string gets - mangled when default sys encoding is ascii. THIS TEST ASSERTS THAT THE - MESSAGE IS WRONG. We do not expect it to work now, or in the future. - """ - try: - # Bytes literal with non-ascii compatible characters only allowed in - # python 2 - - raise ValueError("I💜🐍") - except ValueError: - app = application() - notice_error(application=app) - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@reset_core_stats_engine() -@set_default_encoding("utf-8") -@validate_application_exception_message(UNICODE_MESSAGE) -def test_py2_application_exception_message_unicode_utf8_encoding(): - """Assert unicode error message is preserved with sys non-default utf-8 - encoding - """ - try: - raise ValueError(UNICODE_MESSAGE) - except ValueError: - app = application() - notice_error(application=app) - - -@pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@reset_core_stats_engine() -@set_default_encoding("utf-8") -@validate_application_exception_message(UNICODE_MESSAGE) -def test_py2_application_exception_message_bytes_utf8_encoding_non_english(): - """Assert utf-8 encoded byte produces correct exception message when sys - encoding is also utf-8. - """ - try: - # Bytes literal with non-ascii compatible characters only allowed in - # python 2 - - raise ValueError("I💜🐍") - except ValueError: - app = application() - notice_error(application=app) - - -# ---------------- Python 3 - - -@pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @reset_core_stats_engine() @validate_application_exception_message(UNICODE_MESSAGE) -def test_py3_application_exception_message_bytes_non_english_unicode(): +def test_application_exception_message_bytes_non_english_unicode(): """Assert (native) unicode exception message is preserved when when non-ascii compatible characters present""" try: @@ -319,10 +84,9 @@ def test_py3_application_exception_message_bytes_non_english_unicode(): notice_error(application=app) -@pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @reset_core_stats_engine() @validate_application_exception_message(UNICODE_ENGLISH) -def test_py3_application_exception_message_unicode_english(): +def test_application_exception_message_unicode_english(): """Assert (native) unicode exception message is preserved, when characters are ascii-compatible""" try: @@ -332,10 +96,9 @@ def test_py3_application_exception_message_unicode_english(): notice_error(application=app) -@pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @reset_core_stats_engine() @validate_application_exception_message(INCORRECTLY_DECODED_BYTES_PY3) -def test_py3_application_exception_message_bytes_non_english(): +def test_application_exception_message_bytes_non_english(): """It really makes a mess of things when you cast from bytes to a string in python 3 (that is using str(), not using encode/decode methods). This is because all characters in bytes are literals, no implicit diff --git a/tests/agent_features/test_logs_in_context.py b/tests/agent_features/test_logs_in_context.py index c4f7bcd21d..c03068e014 100644 --- a/tests/agent_features/test_logs_in_context.py +++ b/tests/agent_features/test_logs_in_context.py @@ -15,6 +15,8 @@ import json import logging import sys + +from io import StringIO as Buffer from traceback import format_tb import pytest @@ -23,12 +25,7 @@ from newrelic.api.background_task import background_task from newrelic.api.function_trace import FunctionTrace from newrelic.api.log import NewRelicContextFormatter -from newrelic.packages import six -if six.PY2: - from io import BytesIO as Buffer -else: - from io import StringIO as Buffer _logger = logging.getLogger(__name__) @@ -262,7 +259,7 @@ def test_newrelic_logger_error_inside_transaction_with_stack_trace(log_buffer_wi assert isinstance(process_id, int) assert filename.endswith("/test_logs_in_context.py") assert isinstance(line_number, int) - assert isinstance(stack_trace, six.string_types) + assert isinstance(stack_trace, str) assert stack_trace and stack_trace == expected_stack_trace expected = { @@ -356,7 +353,7 @@ def test_newrelic_logger_error_outside_transaction_with_stack_trace(log_buffer_w assert isinstance(process_id, int) assert filename.endswith("/test_logs_in_context.py") assert isinstance(line_number, int) - assert isinstance(stack_trace, six.string_types) + assert isinstance(stack_trace, str) assert stack_trace and stack_trace == expected_stack_trace expected = { diff --git a/tests/agent_features/test_ml_events.py b/tests/agent_features/test_ml_events.py index 96bb95f95e..a68078b1c6 100644 --- a/tests/agent_features/test_ml_events.py +++ b/tests/agent_features/test_ml_events.py @@ -14,6 +14,8 @@ import time +from importlib import reload + import pytest from testing_support.fixtures import ( function_not_called, @@ -34,14 +36,7 @@ from newrelic.api.background_task import background_task from newrelic.api.transaction import record_ml_event from newrelic.core.config import global_settings -from newrelic.packages import six -try: - # python 2.x - reload -except NameError: - # python 3.x - from importlib import reload _now = time.time() @@ -366,9 +361,6 @@ def test_application_create_ml_event_not_called(): @pytest.fixture(scope="module", autouse=True, params=["protobuf", "json"]) def otlp_content_encoding(request): - if six.PY2 and request.param == "protobuf": - pytest.skip("OTLP protos are not compatible with Python 2.") - _settings = global_settings() prev = _settings.debug.otlp_content_encoding _settings.debug.otlp_content_encoding = request.param diff --git a/tests/agent_features/test_web_transaction.py b/tests/agent_features/test_web_transaction.py index 66cf258587..5c3609ae9d 100644 --- a/tests/agent_features/test_web_transaction.py +++ b/tests/agent_features/test_web_transaction.py @@ -27,7 +27,6 @@ from newrelic.api.application import application_instance from newrelic.api.web_transaction import WebTransaction from newrelic.api.wsgi_application import wsgi_application -from newrelic.packages import six application = webtest.TestApp(simple_app) @@ -118,10 +117,7 @@ def test_base_web_transaction(use_bytes): for name, value in request_headers.items(): name = name.encode("utf-8") - try: - value = value.encode("utf-8") - except UnicodeDecodeError: - assert six.PY2 + value = value.encode("utf-8") byte_headers[name] = value request_headers = byte_headers diff --git a/tests/agent_streaming/test_infinite_tracing.py b/tests/agent_streaming/test_infinite_tracing.py index 63efe8d50c..2fc273b42c 100644 --- a/tests/agent_streaming/test_infinite_tracing.py +++ b/tests/agent_streaming/test_infinite_tracing.py @@ -17,7 +17,7 @@ import pytest from testing_support.fixtures import override_generic_settings -from testing_support.util import conditional_decorator, retry +from testing_support.util import retry from testing_support.validators.validate_metric_payload import validate_metric_payload from newrelic.common.streaming_utils import StreamBuffer @@ -25,7 +25,6 @@ from newrelic.core.application import Application from newrelic.core.config import global_settings from newrelic.core.infinite_tracing_pb2 import AttributeValue, Span -from newrelic.packages import six settings = global_settings() @@ -329,9 +328,6 @@ def connect_complete(): _test() -@conditional_decorator( - condition=six.PY2, decorator=pytest.mark.xfail(reason="Test frequently times out on Py2.", strict=False) -) def test_no_data_loss_on_reconnect(mock_grpc_server, app, buffer_empty_event, batching, spans_processed_event): """ Test for data loss when channel is closed by the server while waiting for more data in a request iterator. diff --git a/tests/agent_unittests/conftest.py b/tests/agent_unittests/conftest.py index fd5630f81e..9397456586 100644 --- a/tests/agent_unittests/conftest.py +++ b/tests/agent_unittests/conftest.py @@ -15,6 +15,8 @@ import sys import tempfile +from importlib import reload + import pytest from testing_support.fixtures import ( # noqa: F401; pylint: disable=W0611 collector_agent_registration_fixture, @@ -39,14 +41,6 @@ ) -try: - # python 2.x - reload -except NameError: - # python 3.x - from importlib import reload - - class FakeProtos(object): Span = object() SpanBatch = object() diff --git a/tests/agent_unittests/test_agent_protocol.py b/tests/agent_unittests/test_agent_protocol.py index fde10ad294..85395c6cdb 100644 --- a/tests/agent_unittests/test_agent_protocol.py +++ b/tests/agent_unittests/test_agent_protocol.py @@ -35,7 +35,6 @@ NetworkInterfaceException, RetryDataForRequest, ) -from newrelic.packages import six Request = namedtuple("Request", ("method", "path", "params", "headers", "payload")) @@ -486,7 +485,7 @@ def test_connect(with_aws, with_pcf, with_gcp, with_azure, with_docker, with_kub assert agent_settings_payload["proxy_host"] == "None" assert agent_settings_payload["attributes.include"] == "[]" assert agent_settings_payload["feature_flag"] == str(set()) - assert isinstance(agent_settings_payload["attribute_filter"], six.string_types) + assert isinstance(agent_settings_payload["attribute_filter"], str) # Verify that the connection is closed assert HttpClientRecorder.STATE == 0 diff --git a/tests/agent_unittests/test_http_client.py b/tests/agent_unittests/test_http_client.py index df409f9323..8f876c63e6 100644 --- a/tests/agent_unittests/test_http_client.py +++ b/tests/agent_unittests/test_http_client.py @@ -18,11 +18,12 @@ import ssl import zlib +from io import StringIO + import pytest -from testing_support.mock_external_http_server import ( - BaseHTTPServer, - MockExternalHTTPServer, -) + +from http.server import BaseHTTPRequestHandler, HTTPServer +from testing_support.mock_external_http_server import MockExternalHTTPServer from newrelic.common import certs from newrelic.common.agent_http import ( @@ -39,11 +40,6 @@ from newrelic.network.exceptions import NetworkInterfaceException from newrelic.packages.urllib3.util import Url -try: - from StringIO import StringIO -except ImportError: - from io import StringIO - SERVER_CERT = os.path.join(os.path.dirname(__file__), "cert.pem") @@ -81,12 +77,12 @@ def do_CONNECT(self): handler = type( "ResponseHandler", ( - BaseHTTPServer.BaseHTTPRequestHandler, + BaseHTTPRequestHandler, object, ), {"do_GET": handler, "do_POST": handler, "do_CONNECT": do_CONNECT}, ) - self.httpd = BaseHTTPServer.HTTPServer(("localhost", self.port), handler) + self.httpd = HTTPServer(("localhost", self.port), handler) self.httpd.connections = [] self.httpd.connect_host = None self.httpd.connect_port = None diff --git a/tests/agent_unittests/test_import_hook.py b/tests/agent_unittests/test_import_hook.py index fa414484c2..65ad885d35 100644 --- a/tests/agent_unittests/test_import_hook.py +++ b/tests/agent_unittests/test_import_hook.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - import newrelic.api.import_hook as import_hook -import newrelic.packages.six as six import pytest from newrelic.config import _module_function_glob @@ -41,25 +38,19 @@ def test_import_hook_finder(monkeypatch): } monkeypatch.setattr(import_hook, "_import_hooks", registered_hooks) - # Finding a module that does not exist and is not registered returns None. - module = finder.find_module("module_does_not_exist") + # Finding a module that does not exist returns None, whether or not it is registered. + module = finder.find_spec("module_does_not_exist") assert module is None - # Finding a module that does not exist and is registered behaves - # differently on python 2 vs python 3. - if six.PY2: - with pytest.raises(ImportError): - module = finder.find_module("registered_but_does_not_exist") - else: - module = finder.find_module("registered_but_does_not_exist") - assert module is None + module = finder.find_spec("registered_but_does_not_exist") + assert module is None # Finding a module that exists, but is not registered returns None. - module = finder.find_module("newrelic") + module = finder.find_spec("newrelic") assert module is None # Finding a module that exists, and is registered, finds that module. - module = finder.find_module("newrelic.api") + module = finder.find_spec("newrelic.api") assert module is not None diff --git a/tests/agent_unittests/test_package_version_utils.py b/tests/agent_unittests/test_package_version_utils.py index ccfef670b6..6d0e1e4a73 100644 --- a/tests/agent_unittests/test_package_version_utils.py +++ b/tests/agent_unittests/test_package_version_utils.py @@ -25,7 +25,6 @@ get_package_version, get_package_version_tuple, ) -from newrelic.packages import six # Notes: # importlib.metadata was a provisional addition to the std library in PY38 and PY39 @@ -137,7 +136,6 @@ def _getattr_deprecation_warning(attr): raise NotImplementedError() -@pytest.mark.skipif(six.PY2, reason="Can't add Deprecation in __version__ in Python 2.") def test_deprecation_warning_suppression(monkeypatch, recwarn): # Add fake module to be deleted later monkeypatch.setattr(pytest, "__getattr__", _getattr_deprecation_warning, raising=False) diff --git a/tests/agent_unittests/test_sampler_metrics.py b/tests/agent_unittests/test_sampler_metrics.py index dba1a285a0..cf5e030dfe 100644 --- a/tests/agent_unittests/test_sampler_metrics.py +++ b/tests/agent_unittests/test_sampler_metrics.py @@ -20,7 +20,6 @@ from testing_support.fixtures import override_generic_settings from newrelic.core.config import global_settings -from newrelic.packages import six from newrelic.samplers.cpu_usage import cpu_usage_data_source from newrelic.samplers.gc_data import garbage_collector_data_source from newrelic.samplers.memory_usage import memory_usage_data_source @@ -52,36 +51,28 @@ def memory_data_source(): PID = os.getpid() -if six.PY2: - EXPECTED_GC_METRICS = ( - "GC/objects/%d/all" % PID, - "GC/objects/%d/generation/0" % PID, - "GC/objects/%d/generation/1" % PID, - "GC/objects/%d/generation/2" % PID, - ) -else: - EXPECTED_GC_METRICS = ( - "GC/objects/%d/all" % PID, - "GC/objects/%d/generation/0" % PID, - "GC/objects/%d/generation/1" % PID, - "GC/objects/%d/generation/2" % PID, - "GC/collections/%d/all" % PID, - "GC/collections/%d/0" % PID, - "GC/collections/%d/1" % PID, - "GC/collections/%d/2" % PID, - "GC/collected/%d/all" % PID, - "GC/collected/%d/0" % PID, - "GC/collected/%d/1" % PID, - "GC/collected/%d/2" % PID, - "GC/uncollectable/%d/all" % PID, - "GC/uncollectable/%d/0" % PID, - "GC/uncollectable/%d/1" % PID, - "GC/uncollectable/%d/2" % PID, - "GC/time/%d/all" % PID, - "GC/time/%d/0" % PID, - "GC/time/%d/1" % PID, - "GC/time/%d/2" % PID, - ) +EXPECTED_GC_METRICS = ( + "GC/objects/%d/all" % PID, + "GC/objects/%d/generation/0" % PID, + "GC/objects/%d/generation/1" % PID, + "GC/objects/%d/generation/2" % PID, + "GC/collections/%d/all" % PID, + "GC/collections/%d/0" % PID, + "GC/collections/%d/1" % PID, + "GC/collections/%d/2" % PID, + "GC/collected/%d/all" % PID, + "GC/collected/%d/0" % PID, + "GC/collected/%d/1" % PID, + "GC/collected/%d/2" % PID, + "GC/uncollectable/%d/all" % PID, + "GC/uncollectable/%d/0" % PID, + "GC/uncollectable/%d/1" % PID, + "GC/uncollectable/%d/2" % PID, + "GC/time/%d/all" % PID, + "GC/time/%d/0" % PID, + "GC/time/%d/1" % PID, + "GC/time/%d/2" % PID, +) @pytest.mark.xfail( diff --git a/tests/agent_unittests/test_utilization_settings.py b/tests/agent_unittests/test_utilization_settings.py index 69cc8baedb..33eaa63a52 100644 --- a/tests/agent_unittests/test_utilization_settings.py +++ b/tests/agent_unittests/test_utilization_settings.py @@ -15,6 +15,8 @@ import os import tempfile +from importlib import reload + import pytest # these will be reloaded for each test @@ -34,12 +36,6 @@ global_settings, ) -try: - # python 2.x - reload -except NameError: - # python 3.x - from importlib import reload INI_FILE_WITHOUT_UTIL_CONF = b""" [newrelic] diff --git a/tests/application_gearman/test_gearman.py b/tests/application_gearman/test_gearman.py index 5dda4ef47e..72ffde5219 100644 --- a/tests/application_gearman/test_gearman.py +++ b/tests/application_gearman/test_gearman.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function +# ============================================================================================================= +# This framework has not had recent releases and falls outside our support window, so these tests are disabled. +# ============================================================================================================= import os import threading diff --git a/tests/component_djangorestframework/test_application.py b/tests/component_djangorestframework/test_application.py index 29861dca82..0adef819da 100644 --- a/tests/component_djangorestframework/test_application.py +++ b/tests/component_djangorestframework/test_application.py @@ -27,7 +27,6 @@ ) from newrelic.core.config import global_settings -from newrelic.packages import six DJANGO_VERSION = tuple(map(int, django.get_version().split(".")[:2])) @@ -149,7 +148,7 @@ def _test(): @validate_transaction_errors(errors=[]) @validate_transaction_metrics(_test_api_view_view_name_get, scoped_metrics=_test_api_view_scoped_metrics_get) -@validate_code_level_metrics("urls.WrappedAPIView", "wrapped_view", py2_namespace="urls") +@validate_code_level_metrics("urls.WrappedAPIView", "wrapped_view") def test_api_view_get(target_application): response = target_application.get("/api_view/") response.mustcontain("wrapped_view response") diff --git a/tests/component_flask_rest/test_application.py b/tests/component_flask_rest/test_application.py index 67d4825a1b..f2ecfab7bd 100644 --- a/tests/component_flask_rest/test_application.py +++ b/tests/component_flask_rest/test_application.py @@ -29,7 +29,6 @@ from newrelic.common.object_names import callable_name from newrelic.core.config import global_settings -from newrelic.packages import six @pytest.fixture(params=["flask_restful", "flask_restx"]) @@ -60,7 +59,7 @@ def application(request): ] -@validate_code_level_metrics("_test_application.create_app..IndexResource", "get", py2_namespace="_test_application.IndexResource") +@validate_code_level_metrics("_test_application.create_app..IndexResource", "get") @validate_transaction_errors(errors=[]) @validate_transaction_metrics("_test_application:index", scoped_metrics=_test_application_index_scoped_metrics) def test_application_index(application): @@ -86,7 +85,7 @@ def test_application_index(application): ], ) def test_application_raises(exception, status_code, ignore_status_code, propagate_exceptions, application): - @validate_code_level_metrics("_test_application.create_app..ExceptionResource", "get", py2_namespace="_test_application.ExceptionResource") + @validate_code_level_metrics("_test_application.create_app..ExceptionResource", "get") @validate_transaction_metrics("_test_application:exception", scoped_metrics=_test_application_raises_scoped_metrics) def _test(): try: diff --git a/tests/component_tastypie/test_application.py b/tests/component_tastypie/test_application.py index b16dd44f61..d15b4c6d21 100644 --- a/tests/component_tastypie/test_application.py +++ b/tests/component_tastypie/test_application.py @@ -29,7 +29,6 @@ from newrelic.api.background_task import background_task from newrelic.api.transaction import end_of_transaction -from newrelic.packages import six test_application = webtest.TestApp(application) @@ -74,20 +73,10 @@ def __exit__(self, *args, **kwargs): ("Python/WSGI/Application", 1), ("Python/WSGI/Response", 1), ("Python/WSGI/Finalize", 1), + ("Function/tastypie.resources:Resource.wrap_view..wrapper", 1), + ("Function/django.urls.resolvers:URLResolver.resolve", 1), ] -if six.PY3: - _test_api_base_scoped_metrics.append(("Function/tastypie.resources:Resource.wrap_view..wrapper", 1)) -else: - _test_api_base_scoped_metrics.append(("Function/tastypie.resources:wrapper", 1)) - -# django < 1.12 used the RegexURLResolver class and this was updated to URLResolver in later versions -if VERSION <= (0, 14, 3) and not six.PY3: - _test_api_base_scoped_metrics.append(("Function/django.urls.resolvers:RegexURLResolver.resolve", 1)) -else: - _test_api_base_scoped_metrics.append(("Function/django.urls.resolvers:URLResolver.resolve", 1)) - - _test_application_not_found_scoped_metrics = list(_test_api_base_scoped_metrics) @@ -135,13 +124,7 @@ def test_object_does_not_exist(api_version, tastypie_full_debug): test_application.get("/api/%s/simple/ObjectDoesNotExist/" % api_version, status=404) -_test_application_raises_zerodivision = list(_test_api_base_scoped_metrics) -_test_application_raises_zerodivision_exceptions = [] - -if six.PY3: - _test_application_raises_zerodivision_exceptions.append("builtins:ZeroDivisionError") -else: - _test_application_raises_zerodivision_exceptions.append("exceptions:ZeroDivisionError") +_test_application_raises_zerodivision_exceptions = ["builtins:ZeroDivisionError"] @pytest.mark.parametrize("api_version", ["v1", "v2"]) @@ -149,10 +132,9 @@ def test_object_does_not_exist(api_version, tastypie_full_debug): @validate_transaction_errors(errors=_test_application_raises_zerodivision_exceptions) def test_raises_zerodivision(api_version, tastypie_full_debug): _test_application_raises_zerodivision = list(_test_api_base_scoped_metrics) - if tastypie_full_debug: _test_application_raises_zerodivision.append( - (("Function/django.core.handlers.exception:" "handle_uncaught_exception"), 1) + (("Function/django.core.handlers.exception:handle_uncaught_exception"), 1) ) else: _test_application_raises_zerodivision.append(("Function/tastypie.http:HttpApplicationError.close", 1)) diff --git a/tests/cross_agent/test_cat_map.py b/tests/cross_agent/test_cat_map.py index ea011990a8..d7ba6ec4f0 100644 --- a/tests/cross_agent/test_cat_map.py +++ b/tests/cross_agent/test_cat_map.py @@ -24,10 +24,7 @@ import pytest import webtest -try: - from urllib2 import urlopen # Py2.X -except ImportError: - from urllib.request import urlopen # Py3.X +from urllib.request import urlopen from testing_support.fixtures import ( make_cross_agent_headers, @@ -49,7 +46,6 @@ ) from newrelic.api.wsgi_application import wsgi_application from newrelic.common.encoding_utils import json_encode, obfuscate -from newrelic.packages import six ENCODING_KEY = "1234567890123456789012345678901234567890" CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) @@ -94,10 +90,7 @@ def load_tests(): def target_wsgi_application(environ, start_response): status = "200 OK" - txn_name = environ.get("txn") - if six.PY2: - txn_name = txn_name.decode("UTF-8") - txn_name = txn_name.split("/", 3) + txn_name = environ.get("txn").split("/", 3) guid = environ.get("guid") old_cat = environ.get("old_cat") == "True" @@ -192,12 +185,8 @@ def test_cat_map( @override_application_settings(_custom_settings) @override_application_name(appName) def run_cat_test(): - if six.PY2: - txn_name = transactionName.encode("UTF-8") - guid = transactionGuid.encode("UTF-8") - else: - txn_name = transactionName - guid = transactionGuid + txn_name = transactionName + guid = transactionGuid # Only generate old cat style headers. This will test to make sure we # are properly ignoring these headers when the agent is using better diff --git a/tests/cross_agent/test_collector_hostname.py b/tests/cross_agent/test_collector_hostname.py index 2ce39a1ec8..714959a369 100644 --- a/tests/cross_agent/test_collector_hostname.py +++ b/tests/cross_agent/test_collector_hostname.py @@ -18,14 +18,9 @@ import sys import tempfile -import pytest +from importlib import reload -try: - # python 2.x - reload -except NameError: - # python 3.x - from importlib import reload +import pytest CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) diff --git a/tests/cross_agent/test_utilization_configs.py b/tests/cross_agent/test_utilization_configs.py index 4a4adb4859..30ba192b9a 100644 --- a/tests/cross_agent/test_utilization_configs.py +++ b/tests/cross_agent/test_utilization_configs.py @@ -17,6 +17,8 @@ import sys import tempfile +from importlib import reload + import pytest # NOTE: the test_utilization_settings_from_env_vars test mocks several of the @@ -29,12 +31,6 @@ from newrelic.common.utilization import CommonUtilization from newrelic.core.agent_protocol import AgentProtocol -try: - # python 2.x - reload -except NameError: - # python 3.x - from importlib import reload INITIAL_ENV = os.environ diff --git a/tests/cross_agent/test_w3c_trace_context.py b/tests/cross_agent/test_w3c_trace_context.py index 893274ce44..e897528c5c 100644 --- a/tests/cross_agent/test_w3c_trace_context.py +++ b/tests/cross_agent/test_w3c_trace_context.py @@ -34,7 +34,6 @@ from newrelic.api.wsgi_application import wsgi_application from newrelic.common.encoding_utils import W3CTraceState from newrelic.common.object_wrapper import transient_function_wrapper -from newrelic.packages import six CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) JSON_DIR = os.path.normpath(os.path.join(CURRENT_DIR, "fixtures", "distributed_tracing")) @@ -231,8 +230,6 @@ def test_trace_context( if transport_type != "HTTP": extra_environ[".inbound_headers"] = inbound_headers inbound_headers = None - elif six.PY2 and inbound_headers: - inbound_headers = {k.encode("utf-8"): v.encode("utf-8") for k, v in inbound_headers.items()} @validate_transaction_metrics( test_name, group="Uri", rollup_metrics=expected_metrics, background_task=not web_transaction diff --git a/tests/datastore_psycopg2/test_cursor.py b/tests/datastore_psycopg2/test_cursor.py index d66d73ff84..8f4dcbd1d9 100644 --- a/tests/datastore_psycopg2/test_cursor.py +++ b/tests/datastore_psycopg2/test_cursor.py @@ -162,9 +162,7 @@ def _exercise_db(cursor_factory=None, use_cur_context=False, row_type=tuple, _test_matrix[1].append((str, True)) # Composable SQL is expected to be available in versions 2.7 and up -assert sql, ( - "Composable sql (from psycopg2 import sql) is expected to load" - "but is not loading") +assert sql, "Composable sql (from psycopg2 import sql) is expected to load but is not loading" # exercise with regular SQL wrapper _test_matrix[1].append((sql.SQL, True)) diff --git a/tests/datastore_pymongo/test_pymongo.py b/tests/datastore_pymongo/test_pymongo.py index 4649062cee..a91bf09b56 100644 --- a/tests/datastore_pymongo/test_pymongo.py +++ b/tests/datastore_pymongo/test_pymongo.py @@ -22,7 +22,6 @@ from testing_support.validators.validate_transaction_errors import validate_transaction_errors from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from newrelic.api.background_task import background_task -from newrelic.packages import six DB_SETTINGS = mongodb_settings()[0] MONGODB_HOST = DB_SETTINGS["host"] @@ -268,11 +267,9 @@ def test_mongodb_client_operation(): _test_pymongo_client_scoped_metrics = _test_pymongo_scoped_metrics_v4 _test_pymongo_client_rollup_metrics = _test_pymongo_rollup_metrics_v4 - txn_name = "test_pymongo:test_mongodb_client_operation.._test" if six.PY3 else "test_pymongo:_test" - @validate_transaction_errors(errors=[]) @validate_transaction_metrics( - txn_name, + "test_pymongo:test_mongodb_client_operation.._test", scoped_metrics=_test_pymongo_client_scoped_metrics, rollup_metrics=_test_pymongo_client_rollup_metrics, background_task=True, diff --git a/tests/datastore_redis/test_rb.py b/tests/datastore_redis/test_rb.py deleted file mode 100644 index 0c11493484..0000000000 --- a/tests/datastore_redis/test_rb.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" The purpose of these tests is to confirm that we will record -record instance info for Redis Blaster commands that go through -redis.Connection:send_command(). Commands that don't use send_command, -like the one that use the fanout client, won't have instance info. -""" - -import pytest -from testing_support.db_settings import redis_settings -from testing_support.fixtures import override_application_settings -from testing_support.util import instance_hostname -from testing_support.validators.validate_transaction_metrics import ( - validate_transaction_metrics, -) - -from newrelic.api.background_task import background_task -from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six - -DB_SETTINGS = redis_settings()[0] -REDIS_PY_VERSION = get_package_version_tuple("redis") - - -# Settings - -_enable_instance_settings = { - "datastore_tracer.instance_reporting.enabled": True, -} -_disable_instance_settings = { - "datastore_tracer.instance_reporting.enabled": False, -} - -# Metrics - -# We don't record instance metrics when using redis blaster, -# so we just check for base metrics. - -_base_scoped_metrics = ( - ("Datastore/operation/Redis/get", 1), - ("Datastore/operation/Redis/set", 1), -) - -_base_rollup_metrics = ( - ("Datastore/all", 2), - ("Datastore/allOther", 2), - ("Datastore/Redis/all", 2), - ("Datastore/Redis/allOther", 2), - ("Datastore/operation/Redis/get", 1), - ("Datastore/operation/Redis/set", 1), -) - -_disable_rollup_metrics = list(_base_rollup_metrics) -_enable_rollup_metrics = list(_base_rollup_metrics) - -_host = instance_hostname(DB_SETTINGS["host"]) -_port = DB_SETTINGS["port"] - -_instance_metric_name = "Datastore/instance/Redis/%s/%s" % (_host, _port) - -_enable_rollup_metrics.append((_instance_metric_name, 2)) - -_disable_rollup_metrics.append((_instance_metric_name, None)) - - -# Operations -def exercise_redis(routing_client): - routing_client.set("key", "value") - routing_client.get("key") - - -def exercise_fanout(cluster): - with cluster.fanout(hosts="all") as client: - client.execute_command("CLIENT", "LIST") - - -# Tests -@pytest.mark.skipif(six.PY3, reason="Redis Blaster is Python 2 only.") -@pytest.mark.skipif(REDIS_PY_VERSION < (2, 10, 2), reason="Redis Blaster requires redis>=2.10.2") -@override_application_settings(_enable_instance_settings) -@validate_transaction_metrics( - "test_rb:test_redis_blaster_operation_enable_instance", - scoped_metrics=_base_scoped_metrics, - rollup_metrics=_enable_rollup_metrics, - background_task=True, -) -@background_task() -def test_redis_blaster_operation_enable_instance(): - from rb import Cluster - - cluster = Cluster(hosts={0: {"port": DB_SETTINGS["port"]}}, host_defaults={"host": DB_SETTINGS["host"]}) - exercise_fanout(cluster) - - client = cluster.get_routing_client() - exercise_redis(client) - - -@pytest.mark.skipif(six.PY3, reason="Redis Blaster is Python 2 only.") -@pytest.mark.skipif(REDIS_PY_VERSION < (2, 10, 2), reason="Redis Blaster requires redis>=2.10.2") -@override_application_settings(_disable_instance_settings) -@validate_transaction_metrics( - "test_rb:test_redis_blaster_operation_disable_instance", - scoped_metrics=_base_scoped_metrics, - rollup_metrics=_disable_rollup_metrics, - background_task=True, -) -@background_task() -def test_redis_blaster_operation_disable_instance(): - from rb import Cluster - - cluster = Cluster(hosts={0: {"port": DB_SETTINGS["port"]}}, host_defaults={"host": DB_SETTINGS["host"]}) - exercise_fanout(cluster) - - client = cluster.get_routing_client() - exercise_redis(client) diff --git a/tests/external_http/test_http.py b/tests/external_http/test_http.py index 673a4a031e..fd607ca39f 100644 --- a/tests/external_http/test_http.py +++ b/tests/external_http/test_http.py @@ -29,20 +29,13 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six -if six.PY2: - import httplib -else: - import http.client as httplib +import http.client @pytest.fixture(scope="session") def metrics(server): - if six.PY2: - _external_metric = "External/localhost:%s/httplib/" % server.port - else: - _external_metric = "External/localhost:%s/http/" % server.port + _external_metric = "External/localhost:%s/http/" % server.port scoped = [(_external_metric, 1)] @@ -62,7 +55,7 @@ def test_http_http_request(server, metrics): ) @background_task(name="test_http:test_http_http_request") def _test(): - connection = httplib.HTTPConnection("localhost", server.port) + connection = http.client.HTTPConnection("localhost", server.port) connection.request("GET", "/") response = connection.getresponse() response.read() @@ -77,7 +70,7 @@ def test_http_https_request(server, metrics): ) @background_task(name="test_http:test_http_https_request") def _test(): - connection = httplib.HTTPSConnection("localhost", server.port) + connection = http.client.HTTPSConnection("localhost", server.port) try: connection.request("GET", "/") except Exception: @@ -100,7 +93,7 @@ def test_http_cross_process_request(distributed_tracing, span_events, server): @cache_outgoing_headers @validate_cross_process_headers def _test(): - connection = httplib.HTTPConnection("localhost", server.port) + connection = http.client.HTTPConnection("localhost", server.port) connection.request("GET", "/") response = connection.getresponse() response.read() @@ -145,7 +138,7 @@ def test_http_cross_process_response(server): @validate_external_node_params(params=_test_http_cross_process_response_external_node_params) @background_task(name="test_http:test_http_cross_process_response") def _test(): - connection = httplib.HTTPConnection("localhost", server.port) + connection = http.client.HTTPConnection("localhost", server.port) connection.request("GET", "/") response = connection.getresponse() response.read() diff --git a/tests/external_httplib/test_httplib.py b/tests/external_httplib/test_httplib.py index f67e68dc29..0c2c2b94d7 100644 --- a/tests/external_httplib/test_httplib.py +++ b/tests/external_httplib/test_httplib.py @@ -14,10 +14,7 @@ import pytest -try: - import http.client as httplib -except ImportError: - import httplib +import http.client as httplib from testing_support.external_fixtures import ( cache_outgoing_headers, @@ -40,29 +37,18 @@ from newrelic.api.background_task import background_task from newrelic.common.encoding_utils import DistributedTracePayload -from newrelic.packages import six - - -def select_python_version(py2, py3): - return six.PY3 and py3 or py2 def test_httplib_http_request(server): scoped = [ - select_python_version( - py2=("External/localhost:%d/httplib/" % server.port, 1), - py3=("External/localhost:%d/http/" % server.port, 1), - ) + ("External/localhost:%d/http/" % server.port, 1), ] rollup = [ ("External/all", 1), ("External/allOther", 1), ("External/localhost:%d/all" % server.port, 1), - select_python_version( - py2=("External/localhost:%d/httplib/" % server.port, 1), - py3=("External/localhost:%d/http/" % server.port, 1), - ), + ("External/localhost:%d/http/" % server.port, 1), ] @validate_transaction_metrics( @@ -81,20 +67,14 @@ def _test(): def test_httplib_https_request(server): _test_httplib_https_request_scoped_metrics = [ - select_python_version( - py2=("External/localhost:%d/httplib/" % server.port, 1), - py3=("External/localhost:%d/http/" % server.port, 1), - ) + ("External/localhost:%d/http/" % server.port, 1), ] _test_httplib_https_request_rollup_metrics = [ ("External/all", 1), ("External/allOther", 1), ("External/localhost:%d/all" % server.port, 1), - select_python_version( - py2=("External/localhost:%d/httplib/" % server.port, 1), - py3=("External/localhost:%d/http/" % server.port, 1), - ), + ("External/localhost:%d/http/" % server.port, 1), ] @validate_transaction_metrics( @@ -121,20 +101,14 @@ def _test(): def test_httplib_http_with_port_request(server): scoped = [ - select_python_version( - py2=("External/localhost:%d/httplib/" % server.port, 1), - py3=("External/localhost:%d/http/" % server.port, 1), - ) + ("External/localhost:%d/http/" % server.port, 1), ] rollup = [ ("External/all", 1), ("External/allOther", 1), ("External/localhost:%d/all" % server.port, 1), - select_python_version( - py2=("External/localhost:%d/httplib/" % server.port, 1), - py3=("External/localhost:%d/http/" % server.port, 1), - ), + ("External/localhost:%d/http/" % server.port, 1), ] @validate_transaction_metrics( @@ -329,14 +303,12 @@ def test_span_events(server): uri = "http://localhost:%d" % server.port exact_intrinsics = { - "name": select_python_version( - py2="External/localhost:%d/httplib/" % server.port, py3="External/localhost:%d/http/" % server.port - ), + "name": "External/localhost:%d/http/" % server.port, "type": "Span", "sampled": True, "category": "http", "span.kind": "client", - "component": select_python_version(py2="httplib", py3="http"), + "component": "http", } exact_agents = { "http.url": uri, diff --git a/tests/external_httplib/test_urllib2.py b/tests/external_httplib/test_urllib2.py index 62ed230745..44e0f41d8d 100644 --- a/tests/external_httplib/test_urllib2.py +++ b/tests/external_httplib/test_urllib2.py @@ -16,10 +16,7 @@ import pytest -try: - import urllib.request as urllib2 -except: - import urllib2 +import urllib.request as urllib2 from testing_support.external_fixtures import ( cache_outgoing_headers, diff --git a/tests/framework_ariadne/conftest.py b/tests/framework_ariadne/conftest.py index e9de51e7da..cbb4448cd5 100644 --- a/tests/framework_ariadne/conftest.py +++ b/tests/framework_ariadne/conftest.py @@ -17,7 +17,6 @@ collector_available_fixture, ) -from newrelic.packages import six _default_settings = { "package_reporting.enabled": False, # Turn off package reporting for testing as it causes slow downs. @@ -32,7 +31,3 @@ app_name="Python Agent Test (framework_ariadne)", default_settings=_default_settings, ) - - -if six.PY2: - collect_ignore = ["test_application_async.py"] diff --git a/tests/framework_bottle/test_application.py b/tests/framework_bottle/test_application.py index cdcd90e3f3..21a748e010 100644 --- a/tests/framework_bottle/test_application.py +++ b/tests/framework_bottle/test_application.py @@ -32,7 +32,6 @@ ) from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six version = list(get_package_version_tuple("bottle")) @@ -85,11 +84,7 @@ def test_application_index(target_application): _test_application_error_scoped_metrics.extend([("Function/bottle:Bottle.__call__", 1)]) _test_application_error_custom_metrics = [("Python/Framework/Bottle/%s.%s.%s" % version, 1)] - -if six.PY3: - _test_application_error_errors = ["builtins:RuntimeError"] -else: - _test_application_error_errors = ["exceptions:RuntimeError"] +_test_application_error_errors = ["builtins:RuntimeError"] @validate_code_level_metrics("_target_application", "error_page") @@ -181,9 +176,7 @@ def test_application_auth_basic_fail(target_application): custom_metrics=_test_application_auth_basic_okay_custom_metrics, ) def test_application_auth_basic_okay(target_application): - authorization_value = base64.b64encode(b"user:password") - if six.PY3: - authorization_value = authorization_value.decode("Latin-1") + authorization_value = base64.b64encode(b"user:password").decode("Latin-1") environ = {"HTTP_AUTHORIZATION": "Basic " + authorization_value} response = target_application.get("/auth", extra_environ=environ) response.mustcontain("AUTH OKAY") diff --git a/tests/framework_cherrypy/test_application.py b/tests/framework_cherrypy/test_application.py index dd4595c0b8..0448ce7127 100644 --- a/tests/framework_cherrypy/test_application.py +++ b/tests/framework_cherrypy/test_application.py @@ -26,7 +26,6 @@ validate_transaction_errors, ) -from newrelic.packages import six CHERRYPY_VERSION = tuple(int(v) for v in cherrypy.__version__.split(".")) @@ -106,13 +105,7 @@ def test_application_missing(): test_application.get("/missing", status=404) -if six.PY3: - _test_application_unexpected_exception_errors = ["builtins:RuntimeError"] -else: - _test_application_unexpected_exception_errors = ["exceptions:RuntimeError"] - - -@validate_transaction_errors(errors=_test_application_unexpected_exception_errors) +@validate_transaction_errors(errors=["builtins:RuntimeError"]) def test_application_unexpected_exception(): test_application.get("/error", status=500) diff --git a/tests/framework_cherrypy/test_dispatch.py b/tests/framework_cherrypy/test_dispatch.py index 64dccb2146..7e4358146e 100644 --- a/tests/framework_cherrypy/test_dispatch.py +++ b/tests/framework_cherrypy/test_dispatch.py @@ -15,7 +15,6 @@ import pytest import webtest -from newrelic.packages import six from testing_support.validators.validate_transaction_errors import validate_transaction_errors @@ -40,12 +39,8 @@ def _cp_dispatch(self, vpath): application = cherrypy.Application(Resource(), '/', conf) test_application = webtest.TestApp(application) -if six.PY3: - _test_dispatch_exception_errors = ['builtins:RuntimeError'] -else: - _test_dispatch_exception_errors = ['exceptions:RuntimeError'] @requires_cherrypy32 -@validate_transaction_errors(errors=_test_dispatch_exception_errors) +@validate_transaction_errors(errors=['builtins:RuntimeError']) def test_dispatch_exception(): response = test_application.get('/sub/a/b', status=500) diff --git a/tests/framework_flask/_test_compress.py b/tests/framework_flask/_test_compress.py index 1fbf207689..8ca66c09c2 100644 --- a/tests/framework_flask/_test_compress.py +++ b/tests/framework_flask/_test_compress.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - from io import BytesIO as IO -except ImportError: - import StringIO as IO +from io import BytesIO import webtest from flask import Flask, Response, send_file @@ -81,7 +78,7 @@ def html_insertion_named_attachment_header(): @application.route("/html_served_from_file") def html_served_from_file(): - file = IO() + file = BytesIO() contents = b""" Some header

My First Heading

My first paragraph.

@@ -94,7 +91,7 @@ def html_served_from_file(): @application.route("/text_served_from_file") def text_served_from_file(): - file = IO() + file = BytesIO() contents = b""" Some header

My First Heading

My first paragraph.

diff --git a/tests/framework_flask/test_application.py b/tests/framework_flask/test_application.py index 508fb68934..b1250828dd 100644 --- a/tests/framework_flask/test_application.py +++ b/tests/framework_flask/test_application.py @@ -28,7 +28,6 @@ validate_transaction_metrics, ) -from newrelic.packages import six try: # The __version__ attribute was only added in 0.7.0. @@ -53,10 +52,7 @@ def target_application(): # issues whereby the agent needs to be initialised before Flask is # imported and the routes configured. Normally pytest only runs the # global fixture which will initialise the agent after each test - # file is imported, which is too late. We also can't do application - # creation within a function as we will then get view handler - # functions are different between Python 2 and 3, with the latter - # showing scope in path. + # file is imported, which is too late. if not async_handler_support: from _test_application import _test_application @@ -179,13 +175,7 @@ def test_application_endpoint(): ] -if six.PY3: - _test_application_error_errors = ["builtins:RuntimeError"] -else: - _test_application_error_errors = ["exceptions:RuntimeError"] - - -@validate_transaction_errors(errors=_test_application_error_errors) +@validate_transaction_errors(errors=["builtins:RuntimeError"]) @validate_transaction_metrics("_test_application:error_page", scoped_metrics=_test_application_error_scoped_metrics) @validate_code_level_metrics("_test_application", "error_page") def test_application_error(): diff --git a/tests/framework_flask/test_blueprints.py b/tests/framework_flask/test_blueprints.py index 97d4fc463b..32a450d5f7 100644 --- a/tests/framework_flask/test_blueprints.py +++ b/tests/framework_flask/test_blueprints.py @@ -34,10 +34,7 @@ def target_application(): # issues whereby the agent needs to be initialised before Flask is # imported and the routes configured. Normally pytest only runs the # global fixture which will initialise the agent after each test - # file is imported, which is too late. We also can't do application - # creation within a function as we will then get view handler - # functions are different between Python 2 and 3, with the latter - # showing scope in path. + # file is imported, which is too late. from _test_blueprints import _test_application diff --git a/tests/framework_flask/test_compress.py b/tests/framework_flask/test_compress.py index 00ec4ed223..743bf49703 100644 --- a/tests/framework_flask/test_compress.py +++ b/tests/framework_flask/test_compress.py @@ -29,10 +29,7 @@ def target_application(): # issues whereby the agent needs to be initialised before Flask is # imported and the routes configured. Normally pytest only runs the # global fixture which will initialise the agent after each test - # file is imported, which is too late. We also can't do application - # creation within a function as we will then get view handler - # functions are different between Python 2 and 3, with the latter - # showing scope in path. + # file is imported, which is too late. from _test_compress import _test_application diff --git a/tests/framework_flask/test_middleware.py b/tests/framework_flask/test_middleware.py index 3e45cf6a47..41de7b258f 100644 --- a/tests/framework_flask/test_middleware.py +++ b/tests/framework_flask/test_middleware.py @@ -28,10 +28,7 @@ def target_application(): # issues whereby the agent needs to be initialised before Flask is # imported and the routes configured. Normally pytest only runs the # global fixture which will initialise the agent after each test - # file is imported, which is too late. We also can't do application - # creation within a function as we will then get view handler - # functions are different between Python 2 and 3, with the latter - # showing scope in path. + # file is imported, which is too late. from _test_middleware import _test_application diff --git a/tests/framework_flask/test_not_found.py b/tests/framework_flask/test_not_found.py index c1c55475ea..62e6fecfcc 100644 --- a/tests/framework_flask/test_not_found.py +++ b/tests/framework_flask/test_not_found.py @@ -22,10 +22,7 @@ def target_application(): # issues whereby the agent needs to be initialised before Flask is # imported and the routes configured. Normally pytest only runs the # global fixture which will initialise the agent after each test - # file is imported, which is too late. We also can't do application - # creation within a function as we will then get view handler - # functions are different between Python 2 and 3, with the latter - # showing scope in path. + # file is imported, which is too late. from _test_not_found import _test_application return _test_application diff --git a/tests/framework_flask/test_user_exceptions.py b/tests/framework_flask/test_user_exceptions.py index 844b4b9ef6..5e2a31736d 100644 --- a/tests/framework_flask/test_user_exceptions.py +++ b/tests/framework_flask/test_user_exceptions.py @@ -22,10 +22,7 @@ def target_application(): # issues whereby the agent needs to be initialised before Flask is # imported and the routes configured. Normally pytest only runs the # global fixture which will initialise the agent after each test - # file is imported, which is too late. We also can't do application - # creation within a function as we will then get view handler - # functions are different between Python 2 and 3, with the latter - # showing scope in path. + # file is imported, which is too late. from _test_user_exceptions import _test_application return _test_application diff --git a/tests/framework_flask/test_views.py b/tests/framework_flask/test_views.py index 0338169c7f..39cd99de27 100644 --- a/tests/framework_flask/test_views.py +++ b/tests/framework_flask/test_views.py @@ -43,10 +43,7 @@ def target_application(): # issues whereby the agent needs to be initialised before Flask is # imported and the routes configured. Normally pytest only runs the # global fixture which will initialise the agent after each test - # file is imported, which is too late. We also can't do application - # creation within a function as we will then get view handler - # functions are different between Python 2 and 3, with the latter - # showing scope in path. + # file is imported, which is too late. if not async_handler_support: from _test_views import _test_application diff --git a/tests/framework_graphene/conftest.py b/tests/framework_graphene/conftest.py index 12f69f5a35..176768cc75 100644 --- a/tests/framework_graphene/conftest.py +++ b/tests/framework_graphene/conftest.py @@ -18,7 +18,6 @@ collector_available_fixture, ) -from newrelic.packages import six _default_settings = { "package_reporting.enabled": False, # Turn off package reporting for testing as it causes slow downs. @@ -40,7 +39,3 @@ def app(): from _target_application import _target_application return _target_application - - -if six.PY2: - collect_ignore = ["test_application_async.py"] diff --git a/tests/framework_graphql/conftest.py b/tests/framework_graphql/conftest.py index fdf1bf5cf8..53d90d364f 100644 --- a/tests/framework_graphql/conftest.py +++ b/tests/framework_graphql/conftest.py @@ -18,7 +18,6 @@ collector_available_fixture, ) -from newrelic.packages import six _default_settings = { "package_reporting.enabled": False, # Turn off package reporting for testing as it causes slow downs. @@ -45,7 +44,3 @@ def target_application(request): return return "GraphQL", None, app, True, request.param.split("-")[1], 0 - - -if six.PY2: - collect_ignore = ["test_application_async.py"] diff --git a/tests/framework_grpc/conftest.py b/tests/framework_grpc/conftest.py index 879f06602f..27498b363f 100644 --- a/tests/framework_grpc/conftest.py +++ b/tests/framework_grpc/conftest.py @@ -22,7 +22,6 @@ ) from testing_support.mock_external_grpc_server import MockExternalgRPCServer -import newrelic.packages.six as six _default_settings = { "package_reporting.enabled": False, # Turn off package reporting for testing as it causes slow downs. @@ -56,29 +55,6 @@ def mock_grpc_server(grpc_app_server): return port -@pytest.fixture(scope="function", autouse=True) -def gc_garbage_empty(): - yield - - # Python 2 fails to collect objects with finalizers that participate in a reference cycle. - # These assertions are made with that assumption in mind. - # If there's a failure on py2, it's applicable to py3 as well. - # If PY3 has a reference cycle (which it shouldn't), but PY2 does not, it will be GCed - if six.PY2: - # garbage collect until everything is reachable - while gc.collect(): - pass - - from grpc._channel import _Rendezvous - - rendezvous_stored = sum(1 for o in gc.get_objects() if hasattr(o, "__class__") and isinstance(o, _Rendezvous)) - - assert rendezvous_stored == 0 - - # make sure that even python knows there isn't any garbage remaining - assert not gc.garbage - - @pytest.fixture(scope="session") def stub(stub_and_channel): return stub_and_channel[0] diff --git a/tests/framework_grpc/test_clients.py b/tests/framework_grpc/test_clients.py index a27f5fad15..67f8f9daeb 100644 --- a/tests/framework_grpc/test_clients.py +++ b/tests/framework_grpc/test_clients.py @@ -23,7 +23,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six _test_matrix = [ ("service_method_type,service_method_method_name,raises_exception," "message_count,cancel"), @@ -81,11 +80,6 @@ def test_client( ("External/all", 1), ] - if six.PY2: - _test_transaction_name = "test_clients:_test_client" - else: - _test_transaction_name = "test_clients:test_client.._test_client" - _errors = [] if not streaming_response and cancel: _errors.append("grpc:FutureCancelledError") @@ -94,7 +88,7 @@ def test_client( @validate_transaction_errors(errors=_errors) @validate_transaction_metrics( - _test_transaction_name, + "test_clients:test_client.._test_client", scoped_metrics=_test_scoped_metrics, rollup_metrics=_test_rollup_metrics, background_task=True, @@ -178,14 +172,9 @@ def test_future_timeout_error(service_method_type, service_method_method_name, f ("External/all", 1), ] - if six.PY2: - _test_transaction_name = "test_clients:_test_future_timeout_error" - else: - _test_transaction_name = "test_clients:test_future_timeout_error.." "_test_future_timeout_error" - @validate_transaction_errors(errors=[]) @validate_transaction_metrics( - _test_transaction_name, + "test_clients:test_future_timeout_error.." "_test_future_timeout_error", scoped_metrics=_test_scoped_metrics, rollup_metrics=_test_rollup_metrics, background_task=True, @@ -231,14 +220,9 @@ def test_repeated_result(service_method_type, service_method_method_name, mock_g ("External/all", 1), ] - if six.PY2: - _test_transaction_name = "test_clients:_test_repeated_result" - else: - _test_transaction_name = "test_clients:" "test_repeated_result.._test_repeated_result" - @validate_transaction_errors(errors=[]) @validate_transaction_metrics( - _test_transaction_name, + "test_clients:" "test_repeated_result.._test_repeated_result", scoped_metrics=_test_scoped_metrics, rollup_metrics=_test_rollup_metrics, background_task=True, @@ -286,14 +270,9 @@ def test_future_cancel(service_method_type, service_method_method_name, future_r ("External/all", 1), ] - if six.PY2: - _test_transaction_name = "test_clients:_test_future_cancel" - else: - _test_transaction_name = "test_clients:test_future_cancel.." "_test_future_cancel" - @validate_transaction_errors(errors=[]) @validate_transaction_metrics( - _test_transaction_name, + "test_clients:test_future_cancel.." "_test_future_cancel", scoped_metrics=_test_scoped_metrics, rollup_metrics=_test_rollup_metrics, background_task=True, diff --git a/tests/framework_grpc/test_server.py b/tests/framework_grpc/test_server.py index 1d8783a8c0..8b057eda3d 100644 --- a/tests/framework_grpc/test_server.py +++ b/tests/framework_grpc/test_server.py @@ -35,17 +35,12 @@ ) from newrelic.core.config import global_settings -from newrelic.packages import six +from newrelic.common.package_version_utils import get_package_version -def select_python_version(py2, py3): - return six.PY3 and py3 or py2 +GRPC_VERSION = get_package_version("grpc") -if hasattr(grpc, "__version__"): - GRPC_VERSION = tuple(int(v) for v in grpc.__version__.split(".")) -else: - GRPC_VERSION = None _test_matrix = [ "method_name,streaming_request", @@ -97,7 +92,7 @@ def test_raises_response_status(method_name, streaming_request, mock_grpc_server @validate_code_level_metrics("sample_application.SampleApplicationServicer", method_name) @validate_transaction_errors( - errors=[select_python_version(py2="exceptions:AssertionError", py3="builtins:AssertionError")] + errors=["builtins:AssertionError"] ) @validate_transaction_metrics(_transaction_name) @override_application_settings({"attributes.include": ["request.*"]}) @@ -128,7 +123,7 @@ def test_abort(method_name, streaming_request, mock_grpc_server, stub): method = getattr(stub, method_name) @validate_code_level_metrics("sample_application.SampleApplicationServicer", method_name) - @validate_transaction_errors(errors=[select_python_version(py2="exceptions:Exception", py3="builtins:Exception")]) + @validate_transaction_errors(errors=["builtins:Exception"]) @wait_for_transaction_completion def _doit(): with pytest.raises(grpc.RpcError) as error: @@ -149,7 +144,7 @@ def test_abort_with_status(method_name, streaming_request, mock_grpc_server, stu method = getattr(stub, method_name) @validate_code_level_metrics("sample_application.SampleApplicationServicer", method_name) - @validate_transaction_errors(errors=[select_python_version(py2="exceptions:Exception", py3="builtins:Exception")]) + @validate_transaction_errors(errors=["builtins:Exception"]) @wait_for_transaction_completion def _doit(): with pytest.raises(grpc.RpcError) as error: diff --git a/tests/framework_pyramid/test_application.py b/tests/framework_pyramid/test_application.py index 572f8706e4..0547d37b9e 100644 --- a/tests/framework_pyramid/test_application.py +++ b/tests/framework_pyramid/test_application.py @@ -24,7 +24,6 @@ validate_transaction_metrics, ) -from newrelic.packages import six def target_application(with_tweens=False, tweens_explicit=False): @@ -40,12 +39,6 @@ def target_application(with_tweens=False, tweens_explicit=False): return _app(with_tweens, tweens_explicit) - -if six.PY3: - tween_name = "Function/_test_application:" "simple_tween_factory..simple_tween" -else: - tween_name = "Function/_test_application:simple_tween" - _test_application_index_scoped_metrics = [ ("Python/WSGI/Application", 1), ("Python/WSGI/Response", 1), @@ -68,7 +61,7 @@ def test_application_index(with_tweens, tweens_explicit): metrics = list(_test_application_index_scoped_metrics) if with_tweens: - metrics.append((tween_name, 1)) + metrics.append(("Function/_test_application:" "simple_tween_factory..simple_tween", 1)) @validate_code_level_metrics("_test_application", "home_view") @validate_transaction_errors(errors=[]) @@ -143,13 +136,8 @@ def test_application_not_found_returns_NotFound(): ("Function/_test_application:error", 1), ] -if six.PY3: - _test_unexpected_exception_errors = ["builtins:RuntimeError"] -else: - _test_unexpected_exception_errors = ["exceptions:RuntimeError"] - -@validate_transaction_errors(errors=_test_unexpected_exception_errors) +@validate_transaction_errors(errors=["builtins:RuntimeError"]) @validate_transaction_metrics("_test_application:error", scoped_metrics=_test_unexpected_exception_scoped_metrics) def test_application_unexpected_exception(): application = target_application() diff --git a/tests/framework_pyramid/test_cornice.py b/tests/framework_pyramid/test_cornice.py index fe36831e00..f272f34e22 100644 --- a/tests/framework_pyramid/test_cornice.py +++ b/tests/framework_pyramid/test_cornice.py @@ -18,7 +18,6 @@ from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from testing_support.validators.validate_transaction_errors import validate_transaction_errors -from newrelic.packages import six @pytest.fixture(autouse=True, scope="module") @@ -89,13 +88,8 @@ def test_cornice_resource_get(): ('Function/cornice.pyramidhook:handle_exceptions', 1), ('Function/_test_application:cornice_error_get_info', 1)] -if six.PY3: - _test_cornice_error_errors = ['builtins:RuntimeError'] -else: - _test_cornice_error_errors = ['exceptions:RuntimeError'] - @validate_code_level_metrics("_test_application", "cornice_error_get_info") -@validate_transaction_errors(errors=_test_cornice_error_errors) +@validate_transaction_errors(errors=['builtins:RuntimeError']) @validate_transaction_metrics('_test_application:cornice_error_get_info', scoped_metrics=_test_cornice_error_scoped_metrics) def test_cornice_error(): diff --git a/tests/logger_logging/test_attributes.py b/tests/logger_logging/test_attributes.py index 63a7e94f70..0329c2bb0b 100644 --- a/tests/logger_logging/test_attributes.py +++ b/tests/logger_logging/test_attributes.py @@ -17,7 +17,6 @@ from testing_support.validators.validate_log_events import validate_log_events from newrelic.api.background_task import background_task -from newrelic.packages import six @validate_log_events( @@ -68,7 +67,6 @@ def test_logging_exc_info_context_attributes(logger): logger.error("exc_info", exc_info=True) -@pytest.mark.skipif(six.PY2, reason="stack_info on log messages not available in Python 2.") @validate_log_events([{"message": "stack_info"}], required_attrs=["context.stack_info"]) @validate_log_event_count(1) @background_task() diff --git a/tests/logger_logging/test_metrics.py b/tests/logger_logging/test_metrics.py index f5a1c5e8da..5ac2fefc51 100644 --- a/tests/logger_logging/test_metrics.py +++ b/tests/logger_logging/test_metrics.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from newrelic.packages import six from newrelic.api.background_task import background_task from testing_support.fixtures import reset_core_stats_engine from testing_support.validators.validate_custom_metrics_outside_transaction import validate_custom_metrics_outside_transaction @@ -37,9 +36,8 @@ def exercise_logging(logger): @reset_core_stats_engine() def test_logging_metrics_inside_transaction(logger): - txn_name = "test_metrics:test_logging_metrics_inside_transaction..test" if six.PY3 else "test_metrics:test" @validate_transaction_metrics( - txn_name, + "test_metrics:test_logging_metrics_inside_transaction..test", custom_metrics=_test_logging_unscoped_metrics, background_task=True, ) diff --git a/tests/logger_logging/test_settings.py b/tests/logger_logging/test_settings.py index d6d49513d8..63d7f62e52 100644 --- a/tests/logger_logging/test_settings.py +++ b/tests/logger_logging/test_settings.py @@ -14,7 +14,6 @@ import pytest -from newrelic.packages import six from newrelic.api.background_task import background_task from testing_support.fixtures import reset_core_stats_engine from testing_support.validators.validate_log_event_count import validate_log_event_count @@ -73,14 +72,13 @@ def test(): @reset_core_stats_engine() def test_log_metrics_settings(logger, feature_setting, subfeature_setting, expected): metric_count = 1 if expected else None - txn_name = "test_settings:test_log_metrics_settings..test" if six.PY3 else "test_settings:test" @override_application_settings({ "application_logging.enabled": feature_setting, "application_logging.metrics.enabled": subfeature_setting, }) @validate_transaction_metrics( - txn_name, + "test_settings:test_log_metrics_settings..test", custom_metrics=[ ("Logging/lines", metric_count), ("Logging/lines/WARNING", metric_count), diff --git a/tests/logger_structlog/test_metrics.py b/tests/logger_structlog/test_metrics.py index 48f7204e87..dbcd5e5c54 100644 --- a/tests/logger_structlog/test_metrics.py +++ b/tests/logger_structlog/test_metrics.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from newrelic.packages import six from newrelic.api.background_task import background_task from testing_support.fixtures import reset_core_stats_engine from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics @@ -29,9 +28,8 @@ @reset_core_stats_engine() def test_logging_metrics_inside_transaction(exercise_logging_multiple_lines): - txn_name = "test_metrics:test_logging_metrics_inside_transaction..test" if six.PY3 else "test_metrics:test" @validate_transaction_metrics( - txn_name, + "test_metrics:test_logging_metrics_inside_transaction..test", custom_metrics=_test_logging_unscoped_metrics, background_task=True, ) @@ -60,9 +58,8 @@ def test(): @reset_core_stats_engine() def test_filtering_logging_metrics_inside_transaction(exercise_filtering_logging_multiple_lines): - txn_name = "test_metrics:test_filtering_logging_metrics_inside_transaction..test" if six.PY3 else "test_metrics:test" @validate_transaction_metrics( - txn_name, + "test_metrics:test_filtering_logging_metrics_inside_transaction..test", custom_metrics=_test_logging_unscoped_filtering_metrics, background_task=True, ) diff --git a/tests/messagebroker_confluentkafka/test_consumer.py b/tests/messagebroker_confluentkafka/test_consumer.py index 31f9478b30..cf276f46aa 100644 --- a/tests/messagebroker_confluentkafka/test_consumer.py +++ b/tests/messagebroker_confluentkafka/test_consumer.py @@ -34,7 +34,6 @@ from newrelic.api.background_task import background_task from newrelic.api.transaction import end_of_transaction from newrelic.common.object_names import callable_name -from newrelic.packages import six def test_custom_metrics(get_consumer_record, topic): @@ -68,12 +67,8 @@ def _test(): def test_custom_metrics_on_existing_transaction(get_consumer_record, topic): from confluent_kafka import __version__ as version - transaction_name = ( - "test_consumer:test_custom_metrics_on_existing_transaction.._test" if six.PY3 else "test_consumer:_test" - ) - @validate_transaction_metrics( - transaction_name, + "test_consumer:test_custom_metrics_on_existing_transaction.._test", custom_metrics=[ ("Message/Kafka/Topic/Named/%s/Received/Bytes" % topic, 1), ("Message/Kafka/Topic/Named/%s/Received/Messages" % topic, 1), @@ -90,12 +85,8 @@ def _test(): def test_custom_metrics_inactive_transaction(get_consumer_record, topic): - transaction_name = ( - "test_consumer:test_custom_metrics_inactive_transaction.._test" if six.PY3 else "test_consumer:_test" - ) - @validate_transaction_metrics( - transaction_name, + "test_consumer:test_custom_metrics_inactive_transaction.._test", custom_metrics=[ ("Message/Kafka/Topic/Named/%s/Received/Bytes" % topic, None), ("Message/Kafka/Topic/Named/%s/Received/Messages" % topic, None), diff --git a/tests/messagebroker_confluentkafka/test_producer.py b/tests/messagebroker_confluentkafka/test_producer.py index fe33794fa7..a3fc04a50f 100644 --- a/tests/messagebroker_confluentkafka/test_producer.py +++ b/tests/messagebroker_confluentkafka/test_producer.py @@ -30,7 +30,6 @@ from newrelic.api.background_task import background_task from newrelic.api.function_trace import FunctionTrace from newrelic.common.object_names import callable_name -from newrelic.packages import six @pytest.mark.parametrize( @@ -102,10 +101,9 @@ def test_trace_metrics(topic, send_producer_message): scoped_metrics = [("MessageBroker/Kafka/Topic/Produce/Named/%s" % topic, 1)] unscoped_metrics = scoped_metrics - txn_name = "test_producer:test_trace_metrics..test" if six.PY3 else "test_producer:test" @validate_transaction_metrics( - txn_name, + "test_producer:test_trace_metrics..test", scoped_metrics=scoped_metrics, rollup_metrics=unscoped_metrics, custom_metrics=[("Python/MessageBroker/Confluent-Kafka/%s" % version, 1)], @@ -119,10 +117,8 @@ def test(): def test_distributed_tracing_headers(topic, send_producer_message): - txn_name = "test_producer:test_distributed_tracing_headers..test" if six.PY3 else "test_producer:test" - @validate_transaction_metrics( - txn_name, + "test_producer:test_distributed_tracing_headers..test", rollup_metrics=[ ("Supportability/TraceContext/Create/Success", 1), ("Supportability/DistributedTrace/CreatePayload/Success", 1), diff --git a/tests/messagebroker_confluentkafka/test_serialization.py b/tests/messagebroker_confluentkafka/test_serialization.py index 0b8b41d52a..91a19b916d 100644 --- a/tests/messagebroker_confluentkafka/test_serialization.py +++ b/tests/messagebroker_confluentkafka/test_serialization.py @@ -22,19 +22,16 @@ from newrelic.api.background_task import background_task from newrelic.common.object_names import callable_name -from newrelic.packages import six def test_serialization_metrics(skip_if_not_serializing, topic, send_producer_message): - txn_name = "test_serialization:test_serialization_metrics..test" if six.PY3 else "test_serialization:test" - _metrics = [ ("MessageBroker/Kafka/Topic/Named/%s/Serialization/Value" % topic, 1), ("MessageBroker/Kafka/Topic/Named/%s/Serialization/Key" % topic, 1), ] @validate_transaction_metrics( - txn_name, + "test_serialization:test_serialization_metrics..test", scoped_metrics=_metrics, rollup_metrics=_metrics, background_task=True, diff --git a/tests/messagebroker_kafkapython/test_consumer.py b/tests/messagebroker_kafkapython/test_consumer.py index 78ba086c6e..2d62e038e1 100644 --- a/tests/messagebroker_kafkapython/test_consumer.py +++ b/tests/messagebroker_kafkapython/test_consumer.py @@ -34,7 +34,6 @@ from newrelic.api.background_task import background_task from newrelic.api.transaction import end_of_transaction from newrelic.common.object_names import callable_name -from newrelic.packages import six def test_custom_metrics(get_consumer_record, topic): @@ -65,12 +64,8 @@ def _test(): def test_custom_metrics_on_existing_transaction(get_consumer_record, topic): from kafka.version import __version__ as version - transaction_name = ( - "test_consumer:test_custom_metrics_on_existing_transaction.._test" if six.PY3 else "test_consumer:_test" - ) - @validate_transaction_metrics( - transaction_name, + "test_consumer:test_custom_metrics_on_existing_transaction.._test", custom_metrics=[ ("Message/Kafka/Topic/Named/%s/Received/Bytes" % topic, 1), ("Message/Kafka/Topic/Named/%s/Received/Messages" % topic, 1), @@ -87,12 +82,8 @@ def _test(): def test_custom_metrics_inactive_transaction(get_consumer_record, topic): - transaction_name = ( - "test_consumer:test_custom_metrics_inactive_transaction.._test" if six.PY3 else "test_consumer:_test" - ) - @validate_transaction_metrics( - transaction_name, + "test_consumer:test_custom_metrics_inactive_transaction.._test", custom_metrics=[ ("Message/Kafka/Topic/Named/%s/Received/Bytes" % topic, None), ("Message/Kafka/Topic/Named/%s/Received/Messages" % topic, None), diff --git a/tests/messagebroker_kafkapython/test_producer.py b/tests/messagebroker_kafkapython/test_producer.py index 418ea4b408..168e80a739 100644 --- a/tests/messagebroker_kafkapython/test_producer.py +++ b/tests/messagebroker_kafkapython/test_producer.py @@ -27,7 +27,6 @@ from newrelic.api.background_task import background_task from newrelic.api.function_trace import FunctionTrace from newrelic.common.object_names import callable_name -from newrelic.packages import six def test_trace_metrics(topic, send_producer_message): @@ -35,10 +34,9 @@ def test_trace_metrics(topic, send_producer_message): scoped_metrics = [("MessageBroker/Kafka/Topic/Produce/Named/%s" % topic, 1)] unscoped_metrics = scoped_metrics - txn_name = "test_producer:test_trace_metrics..test" if six.PY3 else "test_producer:test" @validate_transaction_metrics( - txn_name, + "test_producer:test_trace_metrics..test", scoped_metrics=scoped_metrics, rollup_metrics=unscoped_metrics, custom_metrics=[("Python/MessageBroker/Kafka-Python/%s" % version, 1)], @@ -52,10 +50,8 @@ def test(): def test_distributed_tracing_headers(topic, send_producer_message): - txn_name = "test_producer:test_distributed_tracing_headers..test" if six.PY3 else "test_producer:test" - @validate_transaction_metrics( - txn_name, + "test_producer:test_distributed_tracing_headers..test", rollup_metrics=[ ("Supportability/TraceContext/Create/Success", 1), ("Supportability/DistributedTrace/CreatePayload/Success", 1), diff --git a/tests/messagebroker_kafkapython/test_serialization.py b/tests/messagebroker_kafkapython/test_serialization.py index 0b2bee74df..4c9096284d 100644 --- a/tests/messagebroker_kafkapython/test_serialization.py +++ b/tests/messagebroker_kafkapython/test_serialization.py @@ -28,19 +28,16 @@ from newrelic.api.background_task import background_task from newrelic.common.object_names import callable_name -from newrelic.packages import six def test_serialization_metrics(skip_if_not_serializing, topic, send_producer_message): - txn_name = "test_serialization:test_serialization_metrics..test" if six.PY3 else "test_serialization:test" - _metrics = [ ("MessageBroker/Kafka/Topic/Named/%s/Serialization/Value" % topic, 1), ("MessageBroker/Kafka/Topic/Named/%s/Serialization/Key" % topic, 1), ] @validate_transaction_metrics( - txn_name, + "test_serialization:test_serialization_metrics..test", scoped_metrics=_metrics, rollup_metrics=_metrics, background_task=True, @@ -79,7 +76,7 @@ def test(): ), ) def test_deserialization_errors(skip_if_not_serializing, monkeypatch, topic, producer, consumer, key, value): - error_cls = json.decoder.JSONDecodeError if six.PY3 else ValueError + error_cls = json.decoder.JSONDecodeError # Remove serializers to cause intentional issues monkeypatch.setitem(producer.config, "value_serializer", None) diff --git a/tests/messagebroker_pika/compat.py b/tests/messagebroker_pika/compat.py index b19e3e712e..91151a21ec 100644 --- a/tests/messagebroker_pika/compat.py +++ b/tests/messagebroker_pika/compat.py @@ -16,14 +16,14 @@ def basic_consume(channel, queue, callback, auto_ack=None): - kwargs = {'queue': queue} + kwargs = {"queue": queue} if pika_version_info[0] < 1: - kwargs['consumer_callback'] = callback + kwargs["consumer_callback"] = callback if auto_ack is not None: - kwargs['no_ack'] = not auto_ack + kwargs["no_ack"] = not auto_ack else: - kwargs['on_message_callback'] = callback + kwargs["on_message_callback"] = callback if auto_ack is not None: - kwargs['auto_ack'] = auto_ack + kwargs["auto_ack"] = auto_ack return channel.basic_consume(**kwargs) diff --git a/tests/messagebroker_pika/minversion.py b/tests/messagebroker_pika/minversion.py index 0c2148fe30..df034bff80 100644 --- a/tests/messagebroker_pika/minversion.py +++ b/tests/messagebroker_pika/minversion.py @@ -16,15 +16,16 @@ import pytest import pika -pika_version_info = tuple(int(num) for num in pika.__version__.split('.')[:2]) +pika_version_info = tuple(int(num) for num in pika.__version__.split(".")[:2]) new_pika_xfail = pytest.mark.xfail( - condition=pika_version_info[0] > 0, strict=True, - reason='test fails if pika version is 1.x or greater') + condition=pika_version_info[0] > 0, strict=True, reason="test fails if pika version is 1.x or greater" +) new_pika_xfail_py37 = pytest.mark.xfail( - condition=pika_version_info[0] > 0 and sys.version_info >= (3, 7), - strict=True, - reason='test fails if pika version is 1.x or greater') + condition=pika_version_info[0] > 0 and sys.version_info >= (3, 7), + strict=True, + reason="test fails if pika version is 1.x or greater", +) new_pika_skip = pytest.mark.skipif( - condition=pika_version_info[0] > 0, - reason='test hangs if pika version is 1.x or greater') + condition=pika_version_info[0] > 0, reason="test hangs if pika version is 1.x or greater" +) diff --git a/tests/messagebroker_pika/test_cat.py b/tests/messagebroker_pika/test_cat.py index 6ba9730477..57085501f8 100644 --- a/tests/messagebroker_pika/test_cat.py +++ b/tests/messagebroker_pika/test_cat.py @@ -26,7 +26,6 @@ from newrelic.api.background_task import background_task from newrelic.api.transaction import current_transaction -from newrelic.packages import six DB_SETTINGS = rabbitmq_settings()[0] @@ -55,14 +54,10 @@ def do_basic_publish(channel, QUEUE, properties=None): ] _test_cat_basic_consume_rollup_metrics = list(_test_cat_basic_consume_scoped_metrics) _test_cat_basic_consume_rollup_metrics.append(("ClientApplication/1#1/all", 1)) -if six.PY3: - _txn_name = "test_cat:test_basic_consume_cat_headers..on_receive" -else: - _txn_name = "test_cat:on_receive" @validate_transaction_metrics( - _txn_name, + "test_cat:test_basic_consume_cat_headers..on_receive", scoped_metrics=_test_cat_basic_consume_scoped_metrics, rollup_metrics=_test_cat_basic_consume_rollup_metrics, background_task=True, diff --git a/tests/messagebroker_pika/test_distributed_tracing.py b/tests/messagebroker_pika/test_distributed_tracing.py index 6387ecdbcb..9de1fe1897 100644 --- a/tests/messagebroker_pika/test_distributed_tracing.py +++ b/tests/messagebroker_pika/test_distributed_tracing.py @@ -26,7 +26,6 @@ from newrelic.api.function_trace import FunctionTrace from newrelic.api.transaction import current_transaction from newrelic.common.encoding_utils import DistributedTracePayload -from newrelic.packages import six DB_SETTINGS = rabbitmq_settings()[0] @@ -73,16 +72,9 @@ def do_basic_publish(channel, QUEUE, properties=None): ("TransportDuration/App/33/12345/AMQP/allOther", 1), ] -if six.PY3: - _consume_txn_name = ( - "test_distributed_tracing:" "test_basic_consume_distributed_tracing_headers." ".on_receive" - ) -else: - _consume_txn_name = "test_distributed_tracing:on_receive" - @validate_transaction_metrics( - _consume_txn_name, + "test_distributed_tracing:test_basic_consume_distributed_tracing_headers..on_receive", rollup_metrics=_test_distributed_tracing_basic_consume_rollup_metrics, background_task=True, group="Message/RabbitMQ/Exchange/Default", diff --git a/tests/messagebroker_pika/test_memory_leak.py b/tests/messagebroker_pika/test_memory_leak.py index 8ad2e2c5af..78f81744d8 100644 --- a/tests/messagebroker_pika/test_memory_leak.py +++ b/tests/messagebroker_pika/test_memory_leak.py @@ -24,14 +24,12 @@ @background_task() def test_memory_leak(): - params = pika.ConnectionParameters( - DB_SETTINGS['host'], DB_SETTINGS['port']) + params = pika.ConnectionParameters(DB_SETTINGS["host"], DB_SETTINGS["port"]) # create 2 unreferenced blocking channels with pika.BlockingConnection(params) as connection: for _ in range(2): - connection.channel().basic_publish( - exchange='', routing_key='memory_leak_test', body='test') + connection.channel().basic_publish(exchange="", routing_key="memory_leak_test", body="test") # garbage collect until everything is reachable while gc.collect(): @@ -39,7 +37,7 @@ def test_memory_leak(): # the number of channel objects stored should be 0 from pika.adapters.blocking_connection import BlockingChannel - channel_objects_stored = sum(1 for o in gc.get_objects() - if isinstance(o, BlockingChannel)) + + channel_objects_stored = sum(1 for o in gc.get_objects() if isinstance(o, BlockingChannel)) assert channel_objects_stored == 0 diff --git a/tests/messagebroker_pika/test_pika_async_connection_consume.py b/tests/messagebroker_pika/test_pika_async_connection_consume.py index 88d7b9f34b..0ff26c4d0f 100644 --- a/tests/messagebroker_pika/test_pika_async_connection_consume.py +++ b/tests/messagebroker_pika/test_pika_async_connection_consume.py @@ -49,7 +49,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six DB_SETTINGS = rabbitmq_settings()[0] @@ -81,22 +80,9 @@ def handle_callback_exception(self, *args, **kwargs): _test_select_conn_basic_get_inside_txn_metrics = [ ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, 1), + ("Function/test_pika_async_connection_consume:test_async_connection_basic_get_inside_txn..on_message", 1), ] -if six.PY3: - _test_select_conn_basic_get_inside_txn_metrics.append( - ( - ( - "Function/test_pika_async_connection_consume:" - "test_async_connection_basic_get_inside_txn." - ".on_message" - ), - 1, - ) - ) -else: - _test_select_conn_basic_get_inside_txn_metrics.append(("Function/test_pika_async_connection_consume:on_message", 1)) - @parametrized_connection @pytest.mark.parametrize("callback_as_partial", [True, False]) @@ -104,7 +90,6 @@ def handle_callback_exception(self, *args, **kwargs): @validate_code_level_metrics( "test_pika_async_connection_consume.test_async_connection_basic_get_inside_txn.", "on_message", - py2_namespace="test_pika_async_connection_consume", ) @validate_span_events( count=1, @@ -274,22 +259,12 @@ def on_open_connection(connection): _test_select_conn_basic_consume_in_txn_metrics = [ ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), + ( + "Function/test_pika_async_connection_consume:test_async_connection_basic_consume_inside_txn..on_message", + 1, + ), ] -if six.PY3: - _test_select_conn_basic_consume_in_txn_metrics.append( - ( - ( - "Function/test_pika_async_connection_consume:" - "test_async_connection_basic_consume_inside_txn." - ".on_message" - ), - 1, - ) - ) -else: - _test_select_conn_basic_consume_in_txn_metrics.append(("Function/test_pika_async_connection_consume:on_message", 1)) - @parametrized_connection @validate_transaction_metrics( @@ -301,7 +276,6 @@ def on_open_connection(connection): @validate_code_level_metrics( "test_pika_async_connection_consume.test_async_connection_basic_consume_inside_txn.", "on_message", - py2_namespace="test_pika_async_connection_consume", ) @validate_tt_collector_json(message_broker_params=_message_broker_tt_params) @background_task() @@ -335,37 +309,16 @@ def on_open_connection(connection): ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE_2, None), ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE_2, None), + ( + "Function/test_pika_async_connection_consume:test_async_connection_basic_consume_two_exchanges..on_message_1", + 1, + ), + ( + "Function/test_pika_async_connection_consume:test_async_connection_basic_consume_two_exchanges..on_message_2", + 1, + ), ] -if six.PY3: - _test_select_conn_basic_consume_two_exchanges.append( - ( - ( - "Function/test_pika_async_connection_consume:" - "test_async_connection_basic_consume_two_exchanges." - ".on_message_1" - ), - 1, - ) - ) - _test_select_conn_basic_consume_two_exchanges.append( - ( - ( - "Function/test_pika_async_connection_consume:" - "test_async_connection_basic_consume_two_exchanges." - ".on_message_2" - ), - 1, - ) - ) -else: - _test_select_conn_basic_consume_two_exchanges.append( - ("Function/test_pika_async_connection_consume:on_message_1", 1) - ) - _test_select_conn_basic_consume_two_exchanges.append( - ("Function/test_pika_async_connection_consume:on_message_2", 1) - ) - @parametrized_connection @validate_transaction_metrics( @@ -377,12 +330,10 @@ def on_open_connection(connection): @validate_code_level_metrics( "test_pika_async_connection_consume.test_async_connection_basic_consume_two_exchanges.", "on_message_1", - py2_namespace="test_pika_async_connection_consume", ) @validate_code_level_metrics( "test_pika_async_connection_consume.test_async_connection_basic_consume_two_exchanges.", "on_message_2", - py2_namespace="test_pika_async_connection_consume", ) @background_task() def test_async_connection_basic_consume_two_exchanges(producer, producer_2, ConnectionClass): @@ -460,32 +411,17 @@ def on_open_connection(connection): raise -if six.PY3: - _txn_name = ( - "test_pika_async_connection_consume:" - "test_select_connection_basic_consume_outside_transaction." - ".on_message" - ) - _test_select_connection_consume_outside_txn_metrics = [ - ( - ( - "Function/test_pika_async_connection_consume:" - "test_select_connection_basic_consume_outside_transaction." - ".on_message" - ), - None, - ) - ] -else: - _txn_name = "test_pika_async_connection_consume:on_message" - _test_select_connection_consume_outside_txn_metrics = [ - ("Function/test_pika_async_connection_consume:on_message", None) - ] +_test_select_connection_consume_outside_txn_metrics = [ + ( + "Function/test_pika_async_connection_consume:test_select_connection_basic_consume_outside_transaction..on_message", + None, + ), +] # This should create a transaction @validate_transaction_metrics( - _txn_name, + "test_pika_async_connection_consume:test_select_connection_basic_consume_outside_transaction..on_message", scoped_metrics=_test_select_connection_consume_outside_txn_metrics, rollup_metrics=_test_select_connection_consume_outside_txn_metrics, background_task=True, @@ -494,7 +430,6 @@ def on_open_connection(connection): @validate_code_level_metrics( "test_pika_async_connection_consume.test_select_connection_basic_consume_outside_transaction.", "on_message", - py2_namespace="test_pika_async_connection_consume", ) def test_select_connection_basic_consume_outside_transaction(producer): def on_message(channel, method_frame, header_frame, body): diff --git a/tests/messagebroker_pika/test_pika_blocking_connection_consume.py b/tests/messagebroker_pika/test_pika_blocking_connection_consume.py index 07de09f201..3d6a453bf4 100644 --- a/tests/messagebroker_pika/test_pika_blocking_connection_consume.py +++ b/tests/messagebroker_pika/test_pika_blocking_connection_consume.py @@ -34,7 +34,6 @@ from newrelic.api.background_task import background_task from newrelic.api.transaction import end_of_transaction -from newrelic.packages import six DB_SETTINGS = rabbitmq_settings()[0] @@ -125,37 +124,15 @@ def test_basic_get(): _test_blocking_conn_basic_consume_no_txn_metrics = [ ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), + ("Function/test_pika_blocking_connection_consume:test_blocking_connection_basic_consume_outside_transaction..on_message", None), ] - -if six.PY3: - _txn_name = ( - "test_pika_blocking_connection_consume:" - "test_blocking_connection_basic_consume_outside_transaction." - ".on_message" - ) - _test_blocking_conn_basic_consume_no_txn_metrics.append( - ( - ( - "Function/test_pika_blocking_connection_consume:" - "test_blocking_connection_basic_consume_outside_transaction." - ".on_message" - ), - None, - ) - ) -else: - _txn_name = "test_pika_blocking_connection_consume:on_message" - _test_blocking_conn_basic_consume_no_txn_metrics.append( - ("Function/test_pika_blocking_connection_consume:on_message", None) - ) - +_txn_name = "test_pika_blocking_connection_consume:test_blocking_connection_basic_consume_outside_transaction..on_message" @pytest.mark.parametrize("as_partial", [True, False]) @dt_enabled @validate_code_level_metrics( "test_pika_blocking_connection_consume.test_blocking_connection_basic_consume_outside_transaction.", "on_message", - py2_namespace="test_pika_blocking_connection_consume", ) @validate_transaction_metrics( _txn_name, @@ -193,31 +170,14 @@ def on_message(channel, method_frame, header_frame, body): _test_blocking_conn_basic_consume_in_txn_metrics = [ ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), + ("Function/test_pika_blocking_connection_consume:test_blocking_connection_basic_consume_inside_txn..on_message", 1), ] -if six.PY3: - _test_blocking_conn_basic_consume_in_txn_metrics.append( - ( - ( - "Function/test_pika_blocking_connection_consume:" - "test_blocking_connection_basic_consume_inside_txn." - ".on_message" - ), - 1, - ) - ) -else: - _test_blocking_conn_basic_consume_in_txn_metrics.append( - ("Function/test_pika_blocking_connection_consume:on_message", 1) - ) - - @pytest.mark.parametrize("as_partial", [True, False]) @dt_enabled @validate_code_level_metrics( "test_pika_blocking_connection_consume.test_blocking_connection_basic_consume_inside_txn.", "on_message", - py2_namespace="test_pika_blocking_connection_consume", ) @validate_transaction_metrics( "test_pika_blocking_connection_consume:test_blocking_connection_basic_consume_inside_txn", @@ -250,25 +210,9 @@ def on_message(channel, method_frame, header_frame, body): ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), ("OtherTransaction/Message/RabbitMQ/Exchange/Named/%s" % EXCHANGE, None), + ("Function/test_pika_blocking_connection_consume:test_blocking_connection_basic_consume_stopped_txn..on_message", None), ] -if six.PY3: - _test_blocking_conn_basic_consume_stopped_txn_metrics.append( - ( - ( - "Function/test_pika_blocking_connection_consume:" - "test_blocking_connection_basic_consume_stopped_txn." - ".on_message" - ), - None, - ) - ) -else: - _test_blocking_conn_basic_consume_stopped_txn_metrics.append( - ("Function/test_pika_blocking_connection_consume:on_message", None) - ) - - @pytest.mark.parametrize("as_partial", [True, False]) @validate_transaction_metrics( "test_pika_blocking_connection_consume:test_blocking_connection_basic_consume_stopped_txn", diff --git a/tests/messagebroker_pika/test_pika_supportability.py b/tests/messagebroker_pika/test_pika_supportability.py index dacd68ca9f..487b1a3e4b 100644 --- a/tests/messagebroker_pika/test_pika_supportability.py +++ b/tests/messagebroker_pika/test_pika_supportability.py @@ -21,7 +21,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six DB_SETTINGS = rabbitmq_settings()[0] @@ -91,14 +90,8 @@ def on_open_connection(connection): raise -if six.PY3: - _txn_name = "test_pika_supportability:" "test_select_connection_supportability_outside_txn." ".on_message" -else: - _txn_name = "test_pika_supportability:on_message" - - @validate_transaction_metrics( - _txn_name, + "test_pika_supportability:test_select_connection_supportability_outside_txn..on_message", scoped_metrics=(), rollup_metrics=_test_select_connection_supportability_metrics, background_task=True, diff --git a/tests/mlmodel_sklearn/test_calibration_models.py b/tests/mlmodel_sklearn/test_calibration_models.py index 39ac34cb23..e294c75ce5 100644 --- a/tests/mlmodel_sklearn/test_calibration_models.py +++ b/tests/mlmodel_sklearn/test_calibration_models.py @@ -18,7 +18,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six def test_model_methods_wrapped_in_function_trace(calibration_model_name, run_calibration_model): @@ -30,14 +29,8 @@ def test_model_methods_wrapped_in_function_trace(calibration_model_name, run_cal ], } - expected_transaction_name = "test_calibration_models:_test" - if six.PY3: - expected_transaction_name = ( - "test_calibration_models:test_model_methods_wrapped_in_function_trace.._test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_calibration_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[calibration_model_name], rollup_metrics=expected_scoped_metrics[calibration_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_cluster_models.py b/tests/mlmodel_sklearn/test_cluster_models.py index 17471700ee..bbef2a9f46 100644 --- a/tests/mlmodel_sklearn/test_cluster_models.py +++ b/tests/mlmodel_sklearn/test_cluster_models.py @@ -19,7 +19,6 @@ from newrelic.api.background_task import background_task from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six SKLEARN_VERSION = get_package_version_tuple("sklearn") @@ -95,14 +94,9 @@ def test_below_v1_1_model_methods_wrapped_in_function_trace(cluster_model_name, ("Function/MLModel/Sklearn/Named/SpectralClustering.fit_predict", 1), ], } - expected_transaction_name = "test_cluster_models:_test" - if six.PY3: - expected_transaction_name = ( - "test_cluster_models:test_below_v1_1_model_methods_wrapped_in_function_trace.._test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_cluster_models:test_below_v1_1_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[cluster_model_name], rollup_metrics=expected_scoped_metrics[cluster_model_name], background_task=True, @@ -134,14 +128,9 @@ def test_above_v1_1_model_methods_wrapped_in_function_trace(cluster_model_name, ("Function/MLModel/Sklearn/Named/OPTICS.fit_predict", 1), ], } - expected_transaction_name = "test_cluster_models:_test" - if six.PY3: - expected_transaction_name = ( - "test_cluster_models:test_above_v1_1_model_methods_wrapped_in_function_trace.._test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_cluster_models:test_above_v1_1_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[cluster_model_name], rollup_metrics=expected_scoped_metrics[cluster_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_compose_models.py b/tests/mlmodel_sklearn/test_compose_models.py index eab076fc3c..7e497004c6 100644 --- a/tests/mlmodel_sklearn/test_compose_models.py +++ b/tests/mlmodel_sklearn/test_compose_models.py @@ -20,7 +20,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six @pytest.mark.parametrize( @@ -42,14 +41,8 @@ def test_model_methods_wrapped_in_function_trace(compose_model_name, run_compose ], } - expected_transaction_name = ( - "test_compose_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_compose_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_compose_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[compose_model_name], rollup_metrics=expected_scoped_metrics[compose_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_covariance_models.py b/tests/mlmodel_sklearn/test_covariance_models.py index afa5c31c20..e56574c3b9 100644 --- a/tests/mlmodel_sklearn/test_covariance_models.py +++ b/tests/mlmodel_sklearn/test_covariance_models.py @@ -18,7 +18,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six @pytest.mark.parametrize( @@ -64,14 +63,9 @@ def test_model_methods_wrapped_in_function_trace(covariance_model_name, run_cova ("Function/MLModel/Sklearn/Named/OAS.fit", 1), ], } - expected_transaction_name = ( - "test_covariance_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_covariance_models:_test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_covariance_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[covariance_model_name], rollup_metrics=expected_scoped_metrics[covariance_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_cross_decomposition_models.py b/tests/mlmodel_sklearn/test_cross_decomposition_models.py index 6a053350f7..0bbbd39fd2 100644 --- a/tests/mlmodel_sklearn/test_cross_decomposition_models.py +++ b/tests/mlmodel_sklearn/test_cross_decomposition_models.py @@ -18,7 +18,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six @pytest.mark.parametrize( @@ -38,14 +37,9 @@ def test_model_methods_wrapped_in_function_trace(cross_decomposition_model_name, ("Function/MLModel/Sklearn/Named/PLSSVD.transform", 1), ], } - expected_transaction_name = ( - "test_cross_decomposition_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_cross_decomposition_models:_test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_cross_decomposition_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[cross_decomposition_model_name], rollup_metrics=expected_scoped_metrics[cross_decomposition_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_discriminant_analysis_models.py b/tests/mlmodel_sklearn/test_discriminant_analysis_models.py index de11826962..0ef654d27d 100644 --- a/tests/mlmodel_sklearn/test_discriminant_analysis_models.py +++ b/tests/mlmodel_sklearn/test_discriminant_analysis_models.py @@ -18,7 +18,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six @pytest.mark.parametrize( @@ -44,14 +43,8 @@ def test_model_methods_wrapped_in_function_trace(discriminant_analysis_model_nam ], } - expected_transaction_name = ( - "test_discriminant_analysis_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_discriminant_analysis_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_discriminant_analysis_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[discriminant_analysis_model_name], rollup_metrics=expected_scoped_metrics[discriminant_analysis_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_dummy_models.py b/tests/mlmodel_sklearn/test_dummy_models.py index a85b799b22..eeae372070 100644 --- a/tests/mlmodel_sklearn/test_dummy_models.py +++ b/tests/mlmodel_sklearn/test_dummy_models.py @@ -19,7 +19,6 @@ from newrelic.api.background_task import background_task from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six SKLEARN_VERSION = get_package_version_tuple("sklearn") @@ -47,14 +46,8 @@ def test_model_methods_wrapped_in_function_trace(dummy_model_name, run_dummy_mod ], } - expected_transaction_name = ( - "test_dummy_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_dummy_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_dummy_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[dummy_model_name], rollup_metrics=expected_scoped_metrics[dummy_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_ensemble_models.py b/tests/mlmodel_sklearn/test_ensemble_models.py index 69c10f1b28..a0eac05957 100644 --- a/tests/mlmodel_sklearn/test_ensemble_models.py +++ b/tests/mlmodel_sklearn/test_ensemble_models.py @@ -20,7 +20,6 @@ from newrelic.api.background_task import background_task from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six SKLEARN_VERSION = get_package_version_tuple("sklearn") @@ -122,14 +121,8 @@ def test_below_v1_0_model_methods_wrapped_in_function_trace(ensemble_model_name, ], } - expected_transaction_name = ( - "test_ensemble_models:test_below_v1_0_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_ensemble_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_ensemble_models:test_below_v1_0_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[ensemble_model_name], rollup_metrics=expected_scoped_metrics[ensemble_model_name], background_task=True, @@ -178,14 +171,9 @@ def test_between_v1_0_and_v1_1_model_methods_wrapped_in_function_trace(ensemble_ ("Function/MLModel/Sklearn/Named/VotingRegressor.transform", 1), ], } - expected_transaction_name = ( - "test_ensemble_models:test_between_v1_0_and_v1_1_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_ensemble_models:_test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_ensemble_models:test_between_v1_0_and_v1_1_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[ensemble_model_name], rollup_metrics=expected_scoped_metrics[ensemble_model_name], background_task=True, @@ -239,14 +227,9 @@ def test_above_v1_1_model_methods_wrapped_in_function_trace(ensemble_model_name, ("Function/MLModel/Sklearn/Named/HistGradientBoostingRegressor.score", 1), ], } - expected_transaction_name = ( - "test_ensemble_models:test_above_v1_1_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_ensemble_models:_test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_ensemble_models:test_above_v1_1_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[ensemble_model_name], rollup_metrics=expected_scoped_metrics[ensemble_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_feature_selection_models.py b/tests/mlmodel_sklearn/test_feature_selection_models.py index f625461116..c3b41685a0 100644 --- a/tests/mlmodel_sklearn/test_feature_selection_models.py +++ b/tests/mlmodel_sklearn/test_feature_selection_models.py @@ -20,7 +20,6 @@ from newrelic.api.background_task import background_task from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six SKLEARN_VERSION = get_package_version_tuple("sklearn") @@ -54,14 +53,8 @@ def test_below_v1_0_model_methods_wrapped_in_function_trace(feature_selection_mo ], } - expected_transaction_name = ( - "test_feature_selection_models:test_below_v1_0_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_feature_selection_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_feature_selection_models:test_below_v1_0_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[feature_selection_model_name], rollup_metrics=expected_scoped_metrics[feature_selection_model_name], background_task=True, @@ -86,14 +79,9 @@ def test_above_v1_0_model_methods_wrapped_in_function_trace(feature_selection_mo ("Function/MLModel/Sklearn/Named/SequentialFeatureSelector.fit", 1), ], } - expected_transaction_name = ( - "test_feature_selection_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_feature_selection_models:_test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_feature_selection_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[feature_selection_model_name], rollup_metrics=expected_scoped_metrics[feature_selection_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_gaussian_process_models.py b/tests/mlmodel_sklearn/test_gaussian_process_models.py index 7a78fc7031..77cce342a8 100644 --- a/tests/mlmodel_sklearn/test_gaussian_process_models.py +++ b/tests/mlmodel_sklearn/test_gaussian_process_models.py @@ -18,7 +18,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six @pytest.mark.parametrize( @@ -41,14 +40,8 @@ def test_model_methods_wrapped_in_function_trace(gaussian_process_model_name, ru ], } - expected_transaction_name = ( - "test_gaussian_process_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_gaussian_process_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_gaussian_process_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[gaussian_process_model_name], rollup_metrics=expected_scoped_metrics[gaussian_process_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_kernel_ridge_models.py b/tests/mlmodel_sklearn/test_kernel_ridge_models.py index 1cbdddc31b..f3c29f7ef6 100644 --- a/tests/mlmodel_sklearn/test_kernel_ridge_models.py +++ b/tests/mlmodel_sklearn/test_kernel_ridge_models.py @@ -18,7 +18,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six @pytest.mark.parametrize( @@ -35,14 +34,8 @@ def test_model_methods_wrapped_in_function_trace(kernel_ridge_model_name, run_ke ], } - expected_transaction_name = ( - "test_kernel_ridge_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_kernel_ridge_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_kernel_ridge_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[kernel_ridge_model_name], rollup_metrics=expected_scoped_metrics[kernel_ridge_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_linear_models.py b/tests/mlmodel_sklearn/test_linear_models.py index d5077bbb5f..347c9e6acc 100644 --- a/tests/mlmodel_sklearn/test_linear_models.py +++ b/tests/mlmodel_sklearn/test_linear_models.py @@ -19,7 +19,6 @@ from newrelic.api.background_task import background_task from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six SKLEARN_VERSION = get_package_version_tuple("sklearn") SCIPY_VERSION = get_package_version_tuple("scipy") @@ -213,12 +212,9 @@ def test_model_methods_wrapped_in_function_trace(linear_model_name, run_linear_m ("Function/MLModel/Sklearn/Named/RANSACRegressor.score", 1), ], } - expected_transaction_name = "test_linear_models:_test" - if six.PY3: - expected_transaction_name = "test_linear_models:test_model_methods_wrapped_in_function_trace.._test" @validate_transaction_metrics( - expected_transaction_name, + "test_linear_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[linear_model_name], rollup_metrics=expected_scoped_metrics[linear_model_name], background_task=True, @@ -280,14 +276,9 @@ def test_above_v1_1_model_methods_wrapped_in_function_trace(linear_model_name, r ("Function/MLModel/Sklearn/Named/SGDOneClassSVM.predict", 1), ], } - expected_transaction_name = "test_linear_models:_test" - if six.PY3: - expected_transaction_name = ( - "test_linear_models:test_above_v1_1_model_methods_wrapped_in_function_trace.._test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_linear_models:test_above_v1_1_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[linear_model_name], rollup_metrics=expected_scoped_metrics[linear_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_mixture_models.py b/tests/mlmodel_sklearn/test_mixture_models.py index 7ef8381265..4c02c05a1c 100644 --- a/tests/mlmodel_sklearn/test_mixture_models.py +++ b/tests/mlmodel_sklearn/test_mixture_models.py @@ -18,7 +18,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six @pytest.mark.parametrize( @@ -44,14 +43,8 @@ def test_model_methods_wrapped_in_function_trace(mixture_model_name, run_mixture ], } - expected_transaction_name = ( - "test_mixture_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_mixture_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_mixture_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[mixture_model_name], rollup_metrics=expected_scoped_metrics[mixture_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_model_selection_models.py b/tests/mlmodel_sklearn/test_model_selection_models.py index cadf7e64ca..2af8d1ab99 100644 --- a/tests/mlmodel_sklearn/test_model_selection_models.py +++ b/tests/mlmodel_sklearn/test_model_selection_models.py @@ -19,7 +19,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six @pytest.mark.parametrize( @@ -47,14 +46,8 @@ def test_model_methods_wrapped_in_function_trace(model_selection_model_name, run ], } - expected_transaction_name = ( - "test_model_selection_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_model_selection_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_model_selection_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[model_selection_model_name], rollup_metrics=expected_scoped_metrics[model_selection_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_multiclass_models.py b/tests/mlmodel_sklearn/test_multiclass_models.py index dd10d76f16..21faac1ad6 100644 --- a/tests/mlmodel_sklearn/test_multiclass_models.py +++ b/tests/mlmodel_sklearn/test_multiclass_models.py @@ -19,7 +19,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six @pytest.mark.parametrize( @@ -47,14 +46,8 @@ def test_model_methods_wrapped_in_function_trace(multiclass_model_name, run_mult ], } - expected_transaction_name = ( - "test_multiclass_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_multiclass_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_multiclass_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[multiclass_model_name], rollup_metrics=expected_scoped_metrics[multiclass_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_multioutput_models.py b/tests/mlmodel_sklearn/test_multioutput_models.py index 49bb7d005a..3726898aa4 100644 --- a/tests/mlmodel_sklearn/test_multioutput_models.py +++ b/tests/mlmodel_sklearn/test_multioutput_models.py @@ -21,14 +21,11 @@ from newrelic.api.background_task import background_task from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six SKLEARN_VERSION = get_package_version_tuple("sklearn") -# Python 2 will not allow instantiation of abstract class -# (abstract method is __init__ here) -@pytest.mark.skipif(SKLEARN_VERSION >= (1, 0, 0) or six.PY2, reason="Requires sklearn < 1.0 and Python3") +@pytest.mark.skipif(SKLEARN_VERSION >= (1, 0, 0), reason="Requires sklearn < 1.0") @pytest.mark.parametrize( "multioutput_model_name", [ @@ -42,14 +39,9 @@ def test_below_v1_0_model_methods_wrapped_in_function_trace(multioutput_model_na ("Function/MLModel/Sklearn/Named/MultiOutputEstimator.predict", 2), ], } - expected_transaction_name = ( - "test_multioutput_models:test_below_v1_0_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_multioutput_models:_test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_multioutput_models:test_below_v1_0_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[multioutput_model_name], rollup_metrics=expected_scoped_metrics[multioutput_model_name], background_task=True, @@ -84,14 +76,9 @@ def test_above_v1_0_model_methods_wrapped_in_function_trace(multioutput_model_na ("Function/MLModel/Sklearn/Named/RegressorChain.fit", 1), ], } - expected_transaction_name = ( - "test_multioutput_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_multioutput_models:_test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_multioutput_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[multioutput_model_name], rollup_metrics=expected_scoped_metrics[multioutput_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_naive_bayes_models.py b/tests/mlmodel_sklearn/test_naive_bayes_models.py index 4f3208e952..7c9014d339 100644 --- a/tests/mlmodel_sklearn/test_naive_bayes_models.py +++ b/tests/mlmodel_sklearn/test_naive_bayes_models.py @@ -20,7 +20,6 @@ from newrelic.api.background_task import background_task from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six SKLEARN_VERSION = get_package_version_tuple("sklearn") @@ -41,14 +40,9 @@ def test_above_v1_0_model_methods_wrapped_in_function_trace(naive_bayes_model_na ("Function/MLModel/Sklearn/Named/CategoricalNB.predict_proba", 1), ], } - expected_transaction_name = ( - "test_naive_bayes_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_naive_bayes_models:_test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_naive_bayes_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[naive_bayes_model_name], rollup_metrics=expected_scoped_metrics[naive_bayes_model_name], background_task=True, @@ -97,14 +91,8 @@ def test_model_methods_wrapped_in_function_trace(naive_bayes_model_name, run_nai ], } - expected_transaction_name = ( - "test_naive_bayes_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_naive_bayes_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_naive_bayes_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[naive_bayes_model_name], rollup_metrics=expected_scoped_metrics[naive_bayes_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_neighbors_models.py b/tests/mlmodel_sklearn/test_neighbors_models.py index 11bccf66d7..67a34c28cd 100644 --- a/tests/mlmodel_sklearn/test_neighbors_models.py +++ b/tests/mlmodel_sklearn/test_neighbors_models.py @@ -20,7 +20,6 @@ from newrelic.api.background_task import background_task from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six SKLEARN_VERSION = get_package_version_tuple("sklearn") @@ -74,14 +73,8 @@ def test_model_methods_wrapped_in_function_trace(neighbors_model_name, run_neigh ], } - expected_transaction_name = ( - "test_neighbors_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_neighbors_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_neighbors_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[neighbors_model_name], rollup_metrics=expected_scoped_metrics[neighbors_model_name], background_task=True, @@ -123,14 +116,9 @@ def test_above_v1_0_model_methods_wrapped_in_function_trace(neighbors_model_name ("Function/MLModel/Sklearn/Named/RadiusNeighborsClassifier.predict_proba", 3), # Added in v1.0 ], } - expected_transaction_name = ( - "test_neighbors_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_neighbors_models:_test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_neighbors_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[neighbors_model_name], rollup_metrics=expected_scoped_metrics[neighbors_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_neural_network_models.py b/tests/mlmodel_sklearn/test_neural_network_models.py index a5d8f1b89c..921c3c3c84 100644 --- a/tests/mlmodel_sklearn/test_neural_network_models.py +++ b/tests/mlmodel_sklearn/test_neural_network_models.py @@ -19,7 +19,6 @@ from newrelic.api.background_task import background_task from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six SKLEARN_VERSION = get_package_version_tuple("sklearn") @@ -50,14 +49,8 @@ def test_model_methods_wrapped_in_function_trace(neural_network_model_name, run_ ], } - expected_transaction_name = ( - "test_neural_network_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_neural_network_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_neural_network_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[neural_network_model_name], rollup_metrics=expected_scoped_metrics[neural_network_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_pipeline_models.py b/tests/mlmodel_sklearn/test_pipeline_models.py index 769022d920..a1ba525b50 100644 --- a/tests/mlmodel_sklearn/test_pipeline_models.py +++ b/tests/mlmodel_sklearn/test_pipeline_models.py @@ -22,7 +22,6 @@ from newrelic.api.background_task import background_task from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six SKLEARN_VERSION = get_package_version_tuple("sklearn") @@ -47,14 +46,8 @@ def test_model_methods_wrapped_in_function_trace(pipeline_model_name, run_pipeli ], } - expected_transaction_name = ( - "test_pipeline_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_pipeline_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_pipeline_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[pipeline_model_name], rollup_metrics=expected_scoped_metrics[pipeline_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_semi_supervised_models.py b/tests/mlmodel_sklearn/test_semi_supervised_models.py index 531202eb72..e047aa72ad 100644 --- a/tests/mlmodel_sklearn/test_semi_supervised_models.py +++ b/tests/mlmodel_sklearn/test_semi_supervised_models.py @@ -20,7 +20,6 @@ from newrelic.api.background_task import background_task from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six SKLEARN_VERSION = get_package_version_tuple("sklearn") @@ -46,14 +45,8 @@ def test_model_methods_wrapped_in_function_trace(semi_supervised_model_name, run ], } - expected_transaction_name = ( - "test_semi_supervised_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_semi_supervised_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_semi_supervised_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[semi_supervised_model_name], rollup_metrics=expected_scoped_metrics[semi_supervised_model_name], background_task=True, @@ -82,14 +75,9 @@ def test_above_v1_0_model_methods_wrapped_in_function_trace(semi_supervised_mode ("Function/MLModel/Sklearn/Named/SelfTrainingClassifier.predict_proba", 1), ], } - expected_transaction_name = ( - "test_semi_supervised_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_semi_supervised_models:_test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_semi_supervised_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[semi_supervised_model_name], rollup_metrics=expected_scoped_metrics[semi_supervised_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_svm_models.py b/tests/mlmodel_sklearn/test_svm_models.py index a887c9e851..90236c0ca6 100644 --- a/tests/mlmodel_sklearn/test_svm_models.py +++ b/tests/mlmodel_sklearn/test_svm_models.py @@ -19,7 +19,6 @@ from newrelic.api.background_task import background_task from newrelic.common.package_version_utils import get_package_version_tuple -from newrelic.packages import six SKLEARN_VERSION = get_package_version_tuple("sklearn") @@ -68,14 +67,8 @@ def test_model_methods_wrapped_in_function_trace(svm_model_name, run_svm_model): ], } - expected_transaction_name = ( - "test_svm_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_svm_models:_test" - ) - @validate_transaction_metrics( - expected_transaction_name, + "test_svm_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[svm_model_name], rollup_metrics=expected_scoped_metrics[svm_model_name], background_task=True, diff --git a/tests/mlmodel_sklearn/test_tree_models.py b/tests/mlmodel_sklearn/test_tree_models.py index b30b7e2eac..775f817d05 100644 --- a/tests/mlmodel_sklearn/test_tree_models.py +++ b/tests/mlmodel_sklearn/test_tree_models.py @@ -18,7 +18,6 @@ ) from newrelic.api.background_task import background_task -from newrelic.packages import six def test_model_methods_wrapped_in_function_trace(tree_model_name, run_tree_model): @@ -51,14 +50,9 @@ def test_model_methods_wrapped_in_function_trace(tree_model_name, run_tree_model ("Function/MLModel/Sklearn/Named/DecisionTreeRegressor.score", 1), ], } - expected_transaction_name = ( - "test_tree_models:test_model_methods_wrapped_in_function_trace.._test" - if six.PY3 - else "test_tree_models:_test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_tree_models:test_model_methods_wrapped_in_function_trace.._test", scoped_metrics=expected_scoped_metrics[tree_model_name], rollup_metrics=expected_scoped_metrics[tree_model_name], background_task=True, @@ -100,12 +94,9 @@ def test_multiple_calls_to_model_methods(tree_model_name, run_tree_model): ("Function/MLModel/Sklearn/Named/DecisionTreeRegressor.score", 2), ], } - expected_transaction_name = ( - "test_tree_models:test_multiple_calls_to_model_methods.._test" if six.PY3 else "test_tree_models:_test" - ) @validate_transaction_metrics( - expected_transaction_name, + "test_tree_models:test_multiple_calls_to_model_methods.._test", scoped_metrics=expected_scoped_metrics[tree_model_name], rollup_metrics=expected_scoped_metrics[tree_model_name], background_task=True, diff --git a/tests/testing_support/external_fixtures.py b/tests/testing_support/external_fixtures.py index 52b57d4010..d28411b2c6 100644 --- a/tests/testing_support/external_fixtures.py +++ b/tests/testing_support/external_fixtures.py @@ -12,11 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import http.client as httplib -except ImportError: - import httplib - +import http.client as httplib from newrelic.api.external_trace import ExternalTrace from newrelic.api.transaction import current_transaction diff --git a/tests/testing_support/fixture/event_loop.py b/tests/testing_support/fixture/event_loop.py index 7a8eecc4e5..06b386c2cb 100644 --- a/tests/testing_support/fixture/event_loop.py +++ b/tests/testing_support/fixture/event_loop.py @@ -14,17 +14,11 @@ import pytest -from newrelic.packages import six -# Guard against Python 2 crashes -if six.PY2: - event_loop = None -else: +@pytest.fixture(scope="session") +def event_loop(): + from asyncio import new_event_loop, set_event_loop - @pytest.fixture(scope="session") - def event_loop(): - from asyncio import new_event_loop, set_event_loop - - loop = new_event_loop() - set_event_loop(loop) - yield loop + loop = new_event_loop() + set_event_loop(loop) + yield loop diff --git a/tests/testing_support/fixtures.py b/tests/testing_support/fixtures.py index b5565a2706..1670bb0667 100644 --- a/tests/testing_support/fixtures.py +++ b/tests/testing_support/fixtures.py @@ -21,12 +21,9 @@ import threading import time -import pytest +from queue import Queue -try: - from Queue import Queue -except ImportError: - from queue import Queue +import pytest from testing_support.sample_applications import ( error_user_params_added, @@ -59,7 +56,6 @@ ) from newrelic.core.config import apply_config_setting, flatten_settings, global_settings from newrelic.network.exceptions import RetryDataForRequest -from newrelic.packages import six _logger = logging.getLogger("newrelic.tests") @@ -322,7 +318,11 @@ def _raise_background_exceptions(wrapped, instance, args, kwargs): assert done, "Timeout waiting for background task to finish." if exc_info is not None: - six.reraise(*exc_info) + # Reraise exception + if exc_info[1] is not None: + raise exc_info[1] + else: + raise exc_info[0]() return result @@ -1202,38 +1202,6 @@ def error_is_saved(error, app_name=None): return error_name in [e.type for e in errors if e.type == error_name] -def set_default_encoding(encoding): - """Changes the default encoding of the global environment. Only works in - Python 2, will cause an error in Python 3 - """ - - # If using this with other decorators/fixtures that depend on the system - # default encoding, this decorator must be on wrapped on top of them. - - @function_wrapper - def _set_default_encoding(wrapped, instance, args, kwargs): - # This technique of reloading the sys module is necessary because the - # method is removed during initialization of Python. Doing this is - # highly frowned upon, but it is the only way to test how our agent - # behaves when different sys encodings are used. For more information, - # see this Stack Overflow post: http://bit.ly/1xBNxRc - - six.moves.reload_module(sys) # pylint: disable=E1101 - original_encoding = sys.getdefaultencoding() - sys.setdefaultencoding(encoding) # pylint: disable=E1101 - - try: - result = wrapped(*args, **kwargs) - except: - raise - finally: - sys.setdefaultencoding(original_encoding) # pylint: disable=E1101 - - return result - - return _set_default_encoding - - def function_not_called(module, name): """Verify that a function is not called. diff --git a/tests/testing_support/mock_external_http_server.py b/tests/testing_support/mock_external_http_server.py index 0a2a65b4be..d8858816a6 100644 --- a/tests/testing_support/mock_external_http_server.py +++ b/tests/testing_support/mock_external_http_server.py @@ -15,7 +15,7 @@ import socket import threading -from newrelic.packages.six.moves import BaseHTTPServer +from http.server import BaseHTTPRequestHandler, HTTPServer # This defines an external server test apps can make requests to (instead of # www.google.com for example). This provides 3 features: @@ -46,7 +46,7 @@ def __init__(self, handler=simple_get, port=None, *args, **kwargs): super(MockExternalHTTPServer, self).__init__(*args, **kwargs) self.daemon = True handler = type('ResponseHandler', - (BaseHTTPServer.BaseHTTPRequestHandler, object,), + (BaseHTTPRequestHandler, object,), { 'do_GET': handler, 'do_OPTIONS': handler, @@ -58,7 +58,7 @@ def __init__(self, handler=simple_get, port=None, *args, **kwargs): }) if port: - self.httpd = BaseHTTPServer.HTTPServer(('localhost', port), handler) + self.httpd = HTTPServer(('localhost', port), handler) self.port = port else: # If port not set, try to bind to a port until successful @@ -70,7 +70,7 @@ def __init__(self, handler=simple_get, port=None, *args, **kwargs): # Obtain random open port port = self.get_open_port() # Attempt to bind to port - self.httpd = BaseHTTPServer.HTTPServer(('localhost', port), handler) + self.httpd = HTTPServer(('localhost', port), handler) self.port = port except OSError as exc: # Reraise errors other than port already in use diff --git a/tests/testing_support/mock_http_client.py b/tests/testing_support/mock_http_client.py index de8566fdd6..e4c801e6bd 100644 --- a/tests/testing_support/mock_http_client.py +++ b/tests/testing_support/mock_http_client.py @@ -12,12 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import newrelic.packages.urllib3 as urllib3 +from urllib.parse import urlencode -try: - from urllib import urlencode -except ImportError: - from urllib.parse import urlencode +import newrelic.packages.urllib3 as urllib3 from newrelic.common.agent_http import BaseClient diff --git a/tests/testing_support/sample_applications.py b/tests/testing_support/sample_applications.py index 14032b23cb..ed0b83b89e 100644 --- a/tests/testing_support/sample_applications.py +++ b/tests/testing_support/sample_applications.py @@ -14,10 +14,7 @@ import logging -try: - from urllib2 import urlopen # Py2.X -except ImportError: - from urllib.request import urlopen # Py3.X +from urllib.request import urlopen import sqlite3 as db diff --git a/tests/testing_support/validators/validate_code_level_metrics.py b/tests/testing_support/validators/validate_code_level_metrics.py index c3a880b356..d27e800a3a 100644 --- a/tests/testing_support/validators/validate_code_level_metrics.py +++ b/tests/testing_support/validators/validate_code_level_metrics.py @@ -12,18 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from newrelic.packages import six from testing_support.validators.validate_span_events import validate_span_events from testing_support.fixtures import dt_enabled from newrelic.common.object_wrapper import function_wrapper -def validate_code_level_metrics(namespace, function, py2_namespace=None, builtin=False, count=1, index=-1): +def validate_code_level_metrics(namespace, function, builtin=False, count=1, index=-1): """Verify that code level metrics are generated for a callable.""" - if six.PY2 and py2_namespace is not None: - namespace = py2_namespace - if builtin: validator = validate_span_events( exact_agents={"code.function": function, "code.namespace": namespace, "code.filepath": ""}, @@ -43,4 +39,4 @@ def validate_code_level_metrics(namespace, function, py2_namespace=None, builtin def wrapper(wrapped, instance, args, kwargs): validator(dt_enabled(wrapped))(*args, **kwargs) - return wrapper \ No newline at end of file + return wrapper diff --git a/tests/testing_support/validators/validate_custom_events.py b/tests/testing_support/validators/validate_custom_events.py index 206ce08f1a..bc746e785e 100644 --- a/tests/testing_support/validators/validate_custom_events.py +++ b/tests/testing_support/validators/validate_custom_events.py @@ -18,7 +18,6 @@ from testing_support.fixtures import catch_background_exceptions from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper -from newrelic.packages import six def validate_custom_events(events): @@ -76,15 +75,15 @@ def _check_event_attributes(expected, captured, mismatches): mismatches.append("key: timestamp, value:<%s>" % intrinsics["timestamp"]) return False - captured_keys = set(six.iterkeys(captured[1])) - expected_keys = set(six.iterkeys(expected[1])) + captured_keys = set(captured[1].keys()) + expected_keys = set(expected[1].keys()) extra_keys = captured_keys - expected_keys if extra_keys: mismatches.append("extra_keys: %s" % str(tuple(extra_keys))) return False - for key, value in six.iteritems(expected[1]): + for key, value in expected[1].items(): if key in captured[1]: captured_value = captured[1].get(key, None) else: diff --git a/tests/testing_support/validators/validate_error_trace_collector_json.py b/tests/testing_support/validators/validate_error_trace_collector_json.py index e4765597a0..b74c3acb24 100644 --- a/tests/testing_support/validators/validate_error_trace_collector_json.py +++ b/tests/testing_support/validators/validate_error_trace_collector_json.py @@ -16,7 +16,6 @@ from newrelic.common.encoding_utils import json_encode from newrelic.common.object_wrapper import transient_function_wrapper -from newrelic.packages import six def validate_error_trace_collector_json(): @@ -41,9 +40,9 @@ def _validate_error_trace_collector_json(wrapped, instance, args, kwargs): err = decoded_json[1][0] assert len(err) == 5 assert isinstance(err[0], (int, float)) - assert isinstance(err[1], six.string_types) # path - assert isinstance(err[2], six.string_types) # error message - assert isinstance(err[3], six.string_types) # exception name + assert isinstance(err[1], str) # path + assert isinstance(err[2], str) # error message + assert isinstance(err[3], str) # exception name parameters = err[4] parameter_fields = ["userAttributes", "stack_trace", "agentAttributes", "intrinsics"] diff --git a/tests/testing_support/validators/validate_log_events.py b/tests/testing_support/validators/validate_log_events.py index 4008d0e52d..c62413f3da 100644 --- a/tests/testing_support/validators/validate_log_events.py +++ b/tests/testing_support/validators/validate_log_events.py @@ -17,7 +17,6 @@ from testing_support.fixtures import catch_background_exceptions from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper -from newrelic.packages import six def validate_log_events(events=None, required_attrs=None, forgone_attrs=None): @@ -65,7 +64,7 @@ def _validate_log_events(wrapped, instance, args, kwargs): return val def _check_log_attributes(expected, required_attrs, forgone_attrs, captured, mismatches): - for key, value in six.iteritems(expected): + for key, value in expected.items(): if hasattr(captured, key): captured_value = getattr(captured, key, None) elif key in captured.attributes: diff --git a/tests/testing_support/validators/validate_log_events_outside_transaction.py b/tests/testing_support/validators/validate_log_events_outside_transaction.py index 6c17d089b5..053c137186 100644 --- a/tests/testing_support/validators/validate_log_events_outside_transaction.py +++ b/tests/testing_support/validators/validate_log_events_outside_transaction.py @@ -17,7 +17,6 @@ from testing_support.fixtures import catch_background_exceptions from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper -from newrelic.packages import six def validate_log_events_outside_transaction(events=None, required_attrs=None, forgone_attrs=None): @@ -62,7 +61,7 @@ def _validate_log_events_outside_transaction(wrapped, instance, args, kwargs): return val def _check_log_attributes(expected, required_attrs, forgone_attrs, captured, mismatches): - for key, value in six.iteritems(expected): + for key, value in expected.items(): if hasattr(captured, key): captured_value = getattr(captured, key, None) elif key in captured.attributes: diff --git a/tests/testing_support/validators/validate_ml_events.py b/tests/testing_support/validators/validate_ml_events.py index 275a9b2e1b..37830a0851 100644 --- a/tests/testing_support/validators/validate_ml_events.py +++ b/tests/testing_support/validators/validate_ml_events.py @@ -18,7 +18,6 @@ from testing_support.fixtures import catch_background_exceptions from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper -from newrelic.packages import six def validate_ml_events(events): @@ -76,15 +75,15 @@ def _check_event_attributes(expected, captured, mismatches): mismatches.append("key: timestamp, value:<%s>" % intrinsics["timestamp"]) return False - captured_keys = set(six.iterkeys(captured[1])) - expected_keys = set(six.iterkeys(expected[1])) + captured_keys = set(captured[1].keys()) + expected_keys = set(expected[1].keys()) extra_keys = captured_keys - expected_keys if extra_keys: mismatches.append("extra_keys: %s" % str(tuple(extra_keys))) return False - for key, value in six.iteritems(expected[1]): + for key, value in expected[1].items(): if key in captured[1]: captured_value = captured[1].get(key, None) else: diff --git a/tests/testing_support/validators/validate_slow_sql_collector_json.py b/tests/testing_support/validators/validate_slow_sql_collector_json.py index 40cef39d2c..19a580846d 100644 --- a/tests/testing_support/validators/validate_slow_sql_collector_json.py +++ b/tests/testing_support/validators/validate_slow_sql_collector_json.py @@ -17,7 +17,6 @@ from newrelic.common.object_wrapper import transient_function_wrapper from newrelic.common.system_info import LOCALHOST_EQUIVALENTS from newrelic.core.database_utils import SQLConnections -from newrelic.packages import six def validate_slow_sql_collector_json(required_params=set(), @@ -52,16 +51,16 @@ def _validate_slow_sql_collector_json(wrapped, instance, args, kwargs): slow_sql_list = instance.slow_sql_data(connections) for slow_sql in slow_sql_list: - assert isinstance(slow_sql[0], six.string_types) # txn_name - assert isinstance(slow_sql[1], six.string_types) # txn_url + assert isinstance(slow_sql[0], str) # txn_name + assert isinstance(slow_sql[1], str) # txn_url assert isinstance(slow_sql[2], int) # sql_id - assert isinstance(slow_sql[3], six.string_types) # sql - assert isinstance(slow_sql[4], six.string_types) # metric_name + assert isinstance(slow_sql[3], str) # sql + assert isinstance(slow_sql[4], str) # metric_name assert isinstance(slow_sql[5], int) # count assert isinstance(slow_sql[6], float) # total assert isinstance(slow_sql[7], float) # min assert isinstance(slow_sql[8], float) # max - assert isinstance(slow_sql[9], six.string_types) # params + assert isinstance(slow_sql[9], str) # params params = slow_sql[9] data = unpack_field(params) diff --git a/tests/testing_support/validators/validate_span_events.py b/tests/testing_support/validators/validate_span_events.py index 2e543c20fd..6ffdbc42c3 100644 --- a/tests/testing_support/validators/validate_span_events.py +++ b/tests/testing_support/validators/validate_span_events.py @@ -15,7 +15,6 @@ import time from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper -from newrelic.packages import six try: from newrelic.core.infinite_tracing_pb2 import AttributeValue, Span @@ -131,7 +130,7 @@ def check_value_equals(dictionary, key, expected_value): def assert_isinstance(value, expected_type): if AttributeValue and isinstance(value, AttributeValue): - if expected_type is six.string_types: + if expected_type is str: assert value.HasField("string_value") elif expected_type is float: assert value.HasField("double_value") @@ -164,11 +163,11 @@ def _check_span_attributes(attrs, exact, expected, unexpected, mismatches): def _check_span_intrinsics(intrinsics): assert check_value_equals(intrinsics, "type", "Span") - assert_isinstance(intrinsics["traceId"], six.string_types) - assert_isinstance(intrinsics["guid"], six.string_types) + assert_isinstance(intrinsics["traceId"], str) + assert_isinstance(intrinsics["guid"], str) if "parentId" in intrinsics: - assert_isinstance(intrinsics["parentId"], six.string_types) - assert_isinstance(intrinsics["transactionId"], six.string_types) + assert_isinstance(intrinsics["parentId"], str) + assert_isinstance(intrinsics["transactionId"], str) intrinsics["sampled"] is True assert_isinstance(intrinsics["priority"], float) assert_isinstance(intrinsics["timestamp"], int) @@ -177,7 +176,7 @@ def _check_span_intrinsics(intrinsics): ts = ts.double_value assert ts <= int(time.time() * 1000) assert_isinstance(intrinsics["duration"], float) - assert_isinstance(intrinsics["name"], six.string_types) - assert_isinstance(intrinsics["category"], six.string_types) + assert_isinstance(intrinsics["name"], str) + assert_isinstance(intrinsics["category"], str) if "nr.entryPoint" in intrinsics: assert check_value_equals(intrinsics, "nr.entryPoint", True) diff --git a/tests/testing_support/validators/validate_tt_collector_json.py b/tests/testing_support/validators/validate_tt_collector_json.py index 28c9e93a39..e411dd928f 100644 --- a/tests/testing_support/validators/validate_tt_collector_json.py +++ b/tests/testing_support/validators/validate_tt_collector_json.py @@ -16,7 +16,6 @@ from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper from newrelic.common.system_info import LOCALHOST_EQUIVALENTS from newrelic.core.database_utils import SQLConnections -from newrelic.packages import six def _lookup_string_table(name, string_table, default=None): @@ -68,12 +67,12 @@ def _validate_trace(trace): assert isinstance(trace[0], float) # absolute start time (ms) assert isinstance(trace[1], float) # duration (ms) assert trace[0] > 0 # absolute time (ms) - assert isinstance(trace[2], six.string_types) # transaction name + assert isinstance(trace[2], str) # transaction name if trace[2].startswith("WebTransaction"): if exclude_request_uri: assert trace[3] is None # request url else: - assert isinstance(trace[3], six.string_types) + assert isinstance(trace[3], str) # query parameters should not be captured assert "?" not in trace[3] @@ -110,7 +109,7 @@ def _validate_trace(trace): trace_segment = children[0] assert isinstance(trace_segment[0], float) # entry timestamp assert isinstance(trace_segment[1], float) # exit timestamp - assert isinstance(trace_segment[2], six.string_types) # scope + assert isinstance(trace_segment[2], str) # scope assert isinstance(trace_segment[3], dict) # request params assert isinstance(trace_segment[4], list) # children @@ -158,7 +157,7 @@ def _check_params_and_start_time(node): assert "userAttributes" in attributes assert "agentAttributes" in attributes - assert isinstance(trace[5], six.string_types) # GUID + assert isinstance(trace[5], str) # GUID assert trace[6] is None # reserved for future use assert trace[7] is False # deprecated force persist flag @@ -168,11 +167,11 @@ def _check_params_and_start_time(node): # Synthetics ID - assert trace[9] is None or isinstance(trace[9], six.string_types) + assert trace[9] is None or isinstance(trace[9], str) assert isinstance(string_table, list) for name in string_table: - assert isinstance(name, six.string_types) # metric name + assert isinstance(name, str) # metric name _new_wrapper = _validate_tt_collector_json(wrapped) val = _new_wrapper(*args, **kwargs) diff --git a/tox.ini b/tox.ini index 923a2cf0a1..83140cce92 100644 --- a/tox.ini +++ b/tox.ini @@ -16,7 +16,7 @@ ; framework_aiohttp-aiohttp01: aiohttp<2 ; framework_aiohttp-aiohttp0202: aiohttp<2.3 ; 3. Python version required. Uses the standard tox definitions. (https://tox.readthedocs.io/en/latest/config.html#tox-environments) -; Examples: py27,py37,py38,py39,pypy27,pypy310 +; Examples: py37,py38,py39,pypy310 ; 4. Library and version (Optional). Used when testing multiple versions of the library, and may be omitted when only testing a single version. ; Versions should be specified with 2 digits per version number, so <3 becomes 02 and <3.5 becomes 0304. latest and master are also acceptable versions. ; Examples: uvicorn03, CherryPy0302, uvicornlatest @@ -37,48 +37,44 @@ ; Full Examples: ; - memcached-datastore_bmemcached-py37-memcached030 ; - python-agent_unittests-py38-with_extensions -; - python-adapter_gevent-py27 +; - python-adapter_gevent-py39 [tox] -requires = virtualenv<20.22.0 setupdir = {toxinidir} ; Fail tests when interpreters are missing. skip_missing_interpreters = false envlist = - elasticsearchserver07-datastore_elasticsearch-{py27,py37,py38,py39,py310,py311,py312,pypy27,pypy310}-elasticsearch07, + elasticsearchserver07-datastore_elasticsearch-{py37,py38,py39,py310,py311,py312,pypy310}-elasticsearch07, elasticsearchserver08-datastore_elasticsearch-{py37,py38,py39,py310,py311,py312,pypy310}-elasticsearch08, firestore-datastore_firestore-{py37,py38,py39,py310,py311,py312}, grpc-framework_grpc-{py37,py38,py39,py310,py311,py312}-grpclatest, - grpc-framework_grpc-py27-grpc0125, - kafka-messagebroker_confluentkafka-{py27,py39}-confluentkafka{0107,0106}, + kafka-messagebroker_confluentkafka-py39-confluentkafka{0107,0106}, kafka-messagebroker_confluentkafka-{py37,py38,py39,py310,py311,py312}-confluentkafkalatest, - ; confluent-kafka had a bug in 1.8.2's setup.py file which was incompatible with 2.7. - kafka-messagebroker_confluentkafka-{py39}-confluentkafka{0108}, - kafka-messagebroker_kafkapython-{py27,py38}-kafkapython{020001,020000}, - kafka-messagebroker_kafkapython-{pypy27,py27,py37,py38,pypy310}-kafkapythonlatest, - memcached-datastore_bmemcached-{pypy27,py27,py37,py38,py39,py310,py311,py312}-memcached030, + kafka-messagebroker_confluentkafka-py39-confluentkafka{0108}, + kafka-messagebroker_kafkapython-py38-kafkapython{020001,020000}, + kafka-messagebroker_kafkapython-{py37,py38,pypy310}-kafkapythonlatest, + memcached-datastore_bmemcached-{py37,py38,py39,py310,py311,py312}-memcached030, memcached-datastore_aiomcache-{py38,py39,py310,py311,py312}-memcached030, memcached-datastore_memcache-{py37,py38,py39,py310,py311,py312,pypy310}-memcached01, - memcached-datastore_pylibmc-{py27,py37}, - memcached-datastore_pymemcache-{py27,py37,py38,py39,py310,py311,py312,pypy27,pypy310}, - mongodb-datastore_pymongo-{py27,py37,py38,py39,py310,py311,py312,pypy27}-pymongo03, - mongodb-datastore_pymongo-{py37,py38,py39,py310,py311,py312,pypy27,pypy310}-pymongo04, + memcached-datastore_pylibmc-py37, + memcached-datastore_pymemcache-{py37,py38,py39,py310,py311,py312,pypy310}, + mongodb-datastore_pymongo-{py37,py38,py39,py310,py311,py312}-pymongo03, + mongodb-datastore_pymongo-{py37,py38,py39,py310,py311,py312,pypy310}-pymongo04, mssql-datastore_pymssql-{py37,py38,py39,py310,py311,py312}, - mysql-datastore_mysql-mysql080023-py27, mysql-datastore_mysql-mysqllatest-{py37,py38,py39,py310,py311,py312}, - mysql-datastore_pymysql-{py27,py37,py38,py39,py310,py311,py312,pypy27,pypy310}, + mysql-datastore_pymysql-{py37,py38,py39,py310,py311,py312,pypy310}, postgres16-datastore_asyncpg-{py37,py38,py39,py310,py311,py312}, postgres16-datastore_psycopg-{py38,py39,py310,py311,py312,pypy310}-psycopglatest, postgres16-datastore_psycopg-py312-psycopg_{purepython,binary,compiled}0301, - postgres16-datastore_psycopg2-{py27,py37,py38,py39,py310,py311,py312}-psycopg2latest, - postgres16-datastore_psycopg2cffi-{py27,pypy27,py37,py38,py39,py310,py311,py312}-psycopg2cffilatest, - postgres16-datastore_pyodbc-{py27,py37,py38,py39,py310,py311,py312}-pyodbclatest, + postgres16-datastore_psycopg2-{py37,py38,py39,py310,py311,py312}-psycopg2latest, + postgres16-datastore_psycopg2cffi-{py37,py38,py39,py310,py311,py312}-psycopg2cffilatest, + postgres16-datastore_pyodbc-{py37,py38,py39,py310,py311,py312}-pyodbclatest, postgres9-datastore_postgresql-{py37,py38,py39}, python-adapter_asgiref-{py37,py38,py39,py310,py311,py312,pypy310}-asgireflatest, python-adapter_asgiref-py310-asgiref{0303,0304,0305,0306,0307}, - python-adapter_cheroot-{py27,py37,py38,py39,py310,py311,py312}, + python-adapter_cheroot-{py37,py38,py39,py310,py311,py312}, python-adapter_daphne-{py37,py38,py39,py310,py311,py312}-daphnelatest, - python-adapter_gevent-{py27,py37,py38,py310,py311,py312}, + python-adapter_gevent-{py37,py38,py310,py311,py312}, python-adapter_gunicorn-{py37,py38,py39,py310,py311,py312}-aiohttp03-gunicornlatest, python-adapter_hypercorn-{py38,py39,py310,py311,py312}-hypercornlatest, python-adapter_hypercorn-py38-hypercorn{0010,0011,0012,0013}, @@ -87,47 +83,45 @@ envlist = python-adapter_waitress-{py37,py38,py39,py310,py311,py312}-waitresslatest, python-adapter_waitress-{py37,py38,py39,py310}-waitress02, python-adapter_waitress-{py37,py38,py39}-waitress010404, - python-agent_features-{py27,py37,py38,py39,py310,py311,py312}-{with,without}_extensions, - python-agent_features-{pypy27,pypy310}-without_extensions, + python-agent_features-{py37,py38,py39,py310,py311,py312}-{with,without}_extensions, + python-agent_features-pypy310-without_extensions, python-agent_streaming-{py37,py38,py39,py310,py311,py312}-protobuf04-{with,without}_extensions, - python-agent_streaming-py27-grpc0125-{with,without}_extensions, python-agent_streaming-py39-protobuf{03,0319}-{with,without}_extensions, - python-agent_unittests-{py27,py37,py38,py39,py310,py311,py312}-{with,without}_extensions, - python-agent_unittests-{pypy27,pypy310}-without_extensions, + python-agent_unittests-{py37,py38,py39,py310,py311,py312}-{with,without}_extensions, + python-agent_unittests-pypy310-without_extensions, python-application_celery-{py37,py38,py39,py310,py311,py312,pypy310}-celerylatest, python-application_celery-py311-celery{0503,0502,0501}, python-component_djangorestframework-{py37,py38,py39,py310,py311,py312}-djangorestframeworklatest, python-component_flask_rest-{py38,py39,py310,py311,py312,pypy310}-flaskrestxlatest, - python-component_flask_rest-{py27,pypy27}-flaskrestx051, python-component_flask_rest-py37-flaskrestx110, python-component_graphqlserver-{py37,py38,py39,py310,py311,py312}, python-component_tastypie-{py37,py38,py39,py310,py311,py312,pypy310}-tastypielatest, python-coroutines_asyncio-{py37,py38,py39,py310,py311,py312,pypy310}, - python-cross_agent-{py27,py37,py38,py39,py310,py311,py312}-{with,without}_extensions, - python-cross_agent-pypy27-without_extensions, - python-datastore_sqlite-{py27,py37,py38,py39,py310,py311,py312,pypy27,pypy310}, + python-cross_agent-{py37,py38,py39,py310,py311,py312}-{with,without}_extensions, + python-datastore_sqlite-{py37,py38,py39,py310,py311,py312,pypy310}, python-external_aiobotocore-{py38,py39,py310,py311,py312}-aiobotocorelatest, python-external_botocore-{py38,py39,py310,py311,py312}-botocorelatest, python-external_botocore-{py311}-botocorelatest-langchain, python-external_botocore-py310-botocore0125, python-external_botocore-py311-botocore128, - python-external_feedparser-py27-feedparser{05,06}, - python-external_http-{py27,py37,py38,py39,py310,py311,py312,pypy27}, - python-external_httplib-{py27,py37,py38,py39,py310,py311,py312,pypy27,pypy310}, - python-external_httplib2-{py27,py37,py38,py39,py310,py311,py312,pypy27,pypy310}, + python-external_feedparser-{py37,py38,py39,py310,py311,py312}-feedparser06, + python-external_http-{py37,py38,py39,py310,py311,py312}, + python-external_httplib-{py37,py38,py39,py310,py311,py312,pypy310}, + python-external_httplib2-{py37,py38,py39,py310,py311,py312,pypy310}, python-external_httpx-{py37,py38,py39,py310,py311,py312}, - python-external_requests-{py27,py37,py38,py39,py310,py311,py312,pypy27,pypy310}, - python-external_urllib3-{py27,py37,py38,py39,py310,py311,py312,pypy27,pypy310}-urllib3latest, - python-external_urllib3-{py27,py37,pypy27}-urllib3{0109}, + python-external_requests-{py37,py38,py39,py310,py311,py312,pypy310}, + python-external_urllib3-{py37,py38,py39,py310,py311,py312,pypy310}-urllib3latest, + python-external_urllib3-py37-urllib3{0109}, python-framework_aiohttp-{py37,py38,py39,py310,py311,py312,pypy310}-aiohttp03, python-framework_ariadne-{py37,py38,py39,py310,py311,py312}-ariadnelatest, python-framework_ariadne-py37-ariadne{0011,0012,0013}, - python-framework_bottle-{py27,py37,py38,py39,py310,py311,py312,pypy27,pypy310}-bottle0012, + python-framework_bottle-{py37,py38,py39,py310,py311,py312,pypy310}-bottle0012, python-framework_cherrypy-{py37,py38,py39,py310,py311,py312,pypy310}-CherryPylatest, python-framework_django-{py37,py38,py39,py310,py311,py312}-Djangolatest, python-framework_django-{py39}-Django{0202,0300,0301,0302,0401}, python-framework_falcon-{py37,py312}-falcon0300, - python-framework_falcon-{py37,py38,py39,py310,py311,py312,pypy310}-falcon{latest,master}, + python-framework_falcon-{py37,py38,py39,py310,py311,py312,pypy310}-falconlatest, + python-framework_falcon-{py38,py39,py310,py311,py312,pypy310}-falconmaster, python-framework_fastapi-{py37,py38,py39,py310,py311,py312}, python-framework_flask-py37-flask020205, python-framework_flask-{py38,py39,py310,py311,py312,pypy310}-flask{020205,latest,master}, @@ -145,7 +139,7 @@ envlist = python-framework_strawberry-{py38,py39,py310,py311,py312}-strawberry02352, python-framework_strawberry-{py37,py38,py39,py310,py311,py312}-strawberrylatest, python-framework_tornado-{py38,py39,py310,py311,py312}-tornado{latest,master}, - python-logger_logging-{py27,py37,py38,py39,py310,py311,py312,pypy27,pypy310}, + python-logger_logging-{py37,py38,py39,py310,py311,py312,pypy310}, python-logger_loguru-{py37,py38,py39,py310,py311,py312,pypy310}-logurulatest, python-logger_loguru-py39-loguru{06,05}, python-logger_structlog-{py37,py38,py39,py310,py311,py312,pypy310}-structloglatest, @@ -156,21 +150,20 @@ envlist = python-mlmodel_openai-openailatest-{py37,py38,py39,py310,py311,py312}, python-mlmodel_sklearn-{py37}-scikitlearn0101, python-mlmodel_sklearn-{py38,py39,py310,py311,py312}-scikitlearnlatest, - python-template_genshi-{py27,py37,py38,py39,py310,py311,py312}-genshilatest, + python-template_genshi-{py37,py38,py39,py310,py311,py312}-genshilatest, python-template_jinja2-{py38,py39,py310,py311,py312}-jinja2latest, python-template_jinja2-py37-jinja2030103, - python-template_mako-{py27,py37,py38,py39,py310,py311,py312}, + python-template_mako-{py37,py38,py39,py310,py311,py312}, rabbitmq-messagebroker_pika-{py37,py38,py39,py310,py311,py312,pypy310}-pikalatest, redis-datastore_redis-{py37,py38,py39,py310,py311,py312,pypy310}-redis{0400,latest}, rediscluster-datastore_rediscluster-{py37,py311,py312,pypy310}-redislatest, - solr-datastore_pysolr-{py27,py37,py38,py39,py310,py311,py312,pypy27,pypy310}, + solr-datastore_pysolr-{py37,py38,py39,py310,py311,py312,pypy310}, [testenv] deps = # Base Dependencies {py38,py39,py310,py311,py312,pypy310}: pytest==8.2.1 py37: pytest==7.4.4 - {py27,pypy27}: pytest==4.6.11 iniconfig coverage WebTest==2.0.35 @@ -204,8 +197,7 @@ deps = adapter_waitress-waitress02: waitress<2.1 adapter_waitress-waitresslatest: waitress agent_features: beautifulsoup4 - agent_features-{py37,py38,py39,py310,py311,py312,pypy310}: protobuf - agent_features-{py27,pypy27}: protobuf<3.18.0 + agent_features: protobuf application_celery-celerylatest: celery[pytest] application_celery-celery0503: celery[pytest]<5.4 application_celery-celery0502: celery[pytest]<5.3 @@ -282,7 +274,6 @@ deps = external_botocore-botocore128: botocore<1.29 external_botocore-botocore0125: botocore<1.26 external_botocore-{py38,py39,py310,py311,py312}: moto - external_feedparser-feedparser05: feedparser<6 external_feedparser-feedparser06: feedparser<7 external_httplib2: httplib2<1.0 external_httpx: httpx<0.17 @@ -388,7 +379,6 @@ deps = logger_structlog-structloglatest: structlog messagebroker_pika-pikalatest: pika messagebroker_pika: tornado<5 - messagebroker_pika-{py27,pypy27}: enum34 messagebroker_confluentkafka-confluentkafkalatest: confluent-kafka messagebroker_confluentkafka-confluentkafka0108: confluent-kafka<1.9 messagebroker_confluentkafka-confluentkafka0107: confluent-kafka<1.8 @@ -410,9 +400,9 @@ setenv = without_extensions: NEW_RELIC_EXTENSIONS = false agent_features: NEW_RELIC_APDEX_T = 1000 framework_grpc: PYTHONPATH={toxinidir}/tests/:{toxinidir}/tests/framework_grpc/sample_application - framework_tornado-{py38,py39,py310,py311,py312}: PYCURL_SSL_LIBRARY=openssl - framework_tornado-{py38,py39,py310,py311,py312}: LDFLAGS=-L/usr/local/opt/openssl/lib - framework_tornado-{py38,py39,py310,py311,py312}: CPPFLAGS=-I/usr/local/opt/openssl/include + framework_tornado: PYCURL_SSL_LIBRARY=openssl + framework_tornado: LDFLAGS=-L/usr/local/opt/openssl/lib + framework_tornado: CPPFLAGS=-I/usr/local/opt/openssl/include passenv = NEW_RELIC_DEVELOPER_MODE @@ -427,7 +417,7 @@ commands = framework_grpc: --grpc_python_out={toxinidir}/tests/framework_grpc/sample_application \ framework_grpc: /{toxinidir}/tests/framework_grpc/sample_application/sample_application.proto - framework_tornado-{py38,py39,py310,py311,py312}: pip install --ignore-installed --config-settings="--build-option=--with-openssl" pycurl + framework_tornado: pip install --ignore-installed --config-settings="--build-option=--with-openssl" pycurl coverage run -m pytest -v [] allowlist_externals={toxinidir}/.github/scripts/* From 7603533711cfebe7e43da88f89f71ecc792b61a2 Mon Sep 17 00:00:00 2001 From: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> Date: Mon, 26 Aug 2024 09:16:41 -0700 Subject: [PATCH 2/7] Replace old style classes with new style (#1201) --- newrelic/api/application.py | 2 +- newrelic/api/asgi_application.py | 2 +- newrelic/api/cat_header_mixin.py | 2 +- newrelic/api/error_trace.py | 2 +- newrelic/api/function_profile.py | 4 ++-- newrelic/api/profile_trace.py | 2 +- newrelic/api/solr_trace.py | 2 +- newrelic/api/time_trace.py | 2 +- newrelic/api/transaction.py | 4 ++-- newrelic/api/web_transaction.py | 2 +- newrelic/api/wsgi_application.py | 6 +++--- newrelic/common/agent_http.py | 6 +++--- newrelic/common/async_proxy.py | 4 ++-- newrelic/common/stopwatch.py | 2 +- newrelic/common/streaming_utils.py | 4 ++-- newrelic/common/utilization.py | 2 +- newrelic/console.py | 4 ++-- newrelic/core/adaptive_sampler.py | 2 +- newrelic/core/agent.py | 2 +- newrelic/core/agent_protocol.py | 2 +- newrelic/core/agent_streaming.py | 2 +- newrelic/core/application.py | 2 +- newrelic/core/attribute_filter.py | 4 ++-- newrelic/core/config.py | 2 +- newrelic/core/context.py | 2 +- newrelic/core/data_collector.py | 2 +- newrelic/core/database_utils.py | 8 ++++---- newrelic/core/graphql_utils.py | 2 +- newrelic/core/internal_metrics.py | 6 +++--- newrelic/core/node_mixin.py | 2 +- newrelic/core/profile_sessions.py | 8 ++++---- newrelic/core/rules_engine.py | 4 ++-- newrelic/core/stats_engine.py | 8 ++++---- newrelic/core/string_table.py | 2 +- newrelic/core/thread_utilization.py | 2 +- newrelic/core/trace_cache.py | 2 +- newrelic/hooks/component_piston.py | 4 ++-- newrelic/hooks/database_asyncpg.py | 2 +- newrelic/hooks/external_feedparser.py | 2 +- newrelic/hooks/framework_pylons.py | 2 +- newrelic/hooks/framework_tornado.py | 2 +- newrelic/hooks/framework_web2py.py | 2 +- newrelic/hooks/template_genshi.py | 4 ++-- newrelic/hooks/template_mako.py | 2 +- newrelic/samplers/cpu_usage.py | 2 +- newrelic/samplers/data_sampler.py | 2 +- newrelic/samplers/gc_data.py | 2 +- tests/adapter_gunicorn/worker.py | 2 +- tests/agent_features/_test_code_level_metrics.py | 4 ++-- tests/agent_features/test_asgi_distributed_tracing.py | 2 +- tests/agent_features/test_attribute.py | 6 +++--- tests/agent_features/test_lambda_handler.py | 2 +- tests/agent_features/test_log_events.py | 4 ++-- tests/agent_features/test_logs_in_context.py | 4 ++-- tests/agent_features/test_serverless_mode.py | 2 +- tests/agent_features/test_span_events.py | 2 +- tests/agent_unittests/_test_import_hook.py | 4 ++-- tests/agent_unittests/conftest.py | 2 +- tests/agent_unittests/test_agent.py | 2 +- tests/agent_unittests/test_agent_protocol.py | 2 +- tests/agent_unittests/test_environment.py | 4 ++-- tests/agent_unittests/test_http_client.py | 2 +- tests/agent_unittests/test_trace_cache.py | 2 +- tests/agent_unittests/test_utilization_settings.py | 2 +- tests/component_tastypie/test_application.py | 2 +- tests/cross_agent/test_boot_id_utilization_data.py | 2 +- tests/cross_agent/test_lambda_event_source.py | 2 +- tests/cross_agent/test_pcf_utilization_data.py | 4 ++-- tests/cross_agent/test_sql_obfuscation.py | 2 +- tests/cross_agent/test_utilization_configs.py | 2 +- tests/datastore_aioredis/test_custom_conn_pool.py | 2 +- tests/datastore_aredis/test_custom_conn_pool.py | 2 +- tests/datastore_psycopg/test_forward_compat.py | 2 +- tests/datastore_psycopg2/test_forward_compat.py | 2 +- tests/datastore_redis/test_custom_conn_pool.py | 2 +- tests/framework_cherrypy/test_application.py | 2 +- tests/framework_cherrypy/test_dispatch.py | 2 +- tests/framework_cherrypy/test_resource.py | 2 +- tests/framework_cherrypy/test_routes.py | 2 +- tests/framework_django/middleware.py | 2 +- tests/framework_falcon/_target_application.py | 4 ++-- tests/framework_grpc/sample_application/__init__.py | 2 +- tests/framework_pyramid/_test_application.py | 2 +- tests/framework_sanic/conftest.py | 2 +- tests/framework_starlette/_test_application.py | 2 +- tests/messagebroker_kafkapython/conftest.py | 4 ++-- tests/testing_support/asgi_testing.py | 4 ++-- tests/testing_support/external_fixtures.py | 2 +- tests/testing_support/fixtures.py | 2 +- tests/testing_support/mock_external_grpc_server.py | 2 +- 90 files changed, 125 insertions(+), 125 deletions(-) diff --git a/newrelic/api/application.py b/newrelic/api/application.py index cc613a4dd9..e6f2832e5d 100644 --- a/newrelic/api/application.py +++ b/newrelic/api/application.py @@ -20,7 +20,7 @@ import newrelic.core.config -class Application(object): +class Application(): _lock = threading.Lock() _instances = {} diff --git a/newrelic/api/asgi_application.py b/newrelic/api/asgi_application.py index 72e2da1a07..6650923cce 100644 --- a/newrelic/api/asgi_application.py +++ b/newrelic/api/asgi_application.py @@ -49,7 +49,7 @@ def double_to_single_callable(wrapped, instance, args, kwargs): return coro_function_wrapper(coro_function, receive, send) -class ASGIBrowserMiddleware(object): +class ASGIBrowserMiddleware(): def __init__(self, app, transaction=None, search_maximum=64 * 1024): self.app = app self.send = None diff --git a/newrelic/api/cat_header_mixin.py b/newrelic/api/cat_header_mixin.py index b8251fdca1..1455cba288 100644 --- a/newrelic/api/cat_header_mixin.py +++ b/newrelic/api/cat_header_mixin.py @@ -17,7 +17,7 @@ # CatHeaderMixin assumes the mixin class also inherits from TimeTrace -class CatHeaderMixin(object): +class CatHeaderMixin(): cat_id_key = 'X-NewRelic-ID' cat_transaction_key = 'X-NewRelic-Transaction' cat_appdata_key = 'X-NewRelic-App-Data' diff --git a/newrelic/api/error_trace.py b/newrelic/api/error_trace.py index a6fedeced5..8b2c8d4ef7 100644 --- a/newrelic/api/error_trace.py +++ b/newrelic/api/error_trace.py @@ -19,7 +19,7 @@ from newrelic.common.object_wrapper import FunctionWrapper, wrap_object -class ErrorTrace(object): +class ErrorTrace(): def __init__( self, ignore_errors=[], diff --git a/newrelic/api/function_profile.py b/newrelic/api/function_profile.py index ae691f0c99..127b25072d 100644 --- a/newrelic/api/function_profile.py +++ b/newrelic/api/function_profile.py @@ -20,7 +20,7 @@ from newrelic.common.object_wrapper import FunctionWrapper, wrap_object -class FunctionProfile(object): +class FunctionProfile(): def __init__(self, profile): self.profile = profile @@ -33,7 +33,7 @@ def __exit__(self, exc, value, tb): self.profile.disable() pass -class FunctionProfileSession(object): +class FunctionProfileSession(): def __init__(self, filename, delay=1.0, checkpoint=30): self.filename = filename % { 'pid': os.getpid() } diff --git a/newrelic/api/profile_trace.py b/newrelic/api/profile_trace.py index 98a321cb92..5714d7e822 100644 --- a/newrelic/api/profile_trace.py +++ b/newrelic/api/profile_trace.py @@ -25,7 +25,7 @@ AGENT_PACKAGE_DIRECTORY = os.path.dirname(AGENT_PACKAGE_FILE) + "/" -class ProfileTrace(object): +class ProfileTrace(): def __init__(self, depth): self.function_traces = [] self.maximum_depth = depth diff --git a/newrelic/api/solr_trace.py b/newrelic/api/solr_trace.py index 6907f20f8b..c249b5a0d0 100644 --- a/newrelic/api/solr_trace.py +++ b/newrelic/api/solr_trace.py @@ -55,7 +55,7 @@ def create_node(self): ) -class SolrTraceWrapper(object): +class SolrTraceWrapper(): def __init__(self, wrapped, library, command): if isinstance(wrapped, tuple): (instance, wrapped) = wrapped diff --git a/newrelic/api/time_trace.py b/newrelic/api/time_trace.py index 5abbc0bb6e..4531907c08 100644 --- a/newrelic/api/time_trace.py +++ b/newrelic/api/time_trace.py @@ -33,7 +33,7 @@ _logger = logging.getLogger(__name__) -class TimeTrace(object): +class TimeTrace(): def __init__(self, parent=None, source=None): self.parent = parent self.root = None diff --git a/newrelic/api/transaction.py b/newrelic/api/transaction.py index 94d3f79cde..a80812b25b 100644 --- a/newrelic/api/transaction.py +++ b/newrelic/api/transaction.py @@ -145,7 +145,7 @@ def root(self, value): pass -class CachedPath(object): +class CachedPath(): def __init__(self, transaction): self._name = None self.transaction = weakref.ref(transaction) @@ -161,7 +161,7 @@ def path(self): return "Unknown" -class Transaction(object): +class Transaction(): STATE_PENDING = 0 STATE_RUNNING = 1 STATE_STOPPED = 2 diff --git a/newrelic/api/web_transaction.py b/newrelic/api/web_transaction.py index 66ee81015a..60e16d0897 100644 --- a/newrelic/api/web_transaction.py +++ b/newrelic/api/web_transaction.py @@ -526,7 +526,7 @@ def browser_monitoring_intrinsics(self, obfuscation_key): return intrinsics -class WSGIHeaderProxy(object): +class WSGIHeaderProxy(): def __init__(self, environ): self.environ = environ self.length = None diff --git a/newrelic/api/wsgi_application.py b/newrelic/api/wsgi_application.py index f5f234a2e4..874665e047 100644 --- a/newrelic/api/wsgi_application.py +++ b/newrelic/api/wsgi_application.py @@ -29,7 +29,7 @@ _logger = logging.getLogger(__name__) -class _WSGIApplicationIterable(object): +class _WSGIApplicationIterable(): def __init__(self, transaction, generator): self.transaction = transaction self.generator = generator @@ -95,7 +95,7 @@ def close(self): self.closed = True -class _WSGIInputWrapper(object): +class _WSGIInputWrapper(): def __init__(self, transaction, input): self.__transaction = transaction self.__input = input @@ -150,7 +150,7 @@ def readlines(self, *args, **kwargs): return lines -class _WSGIApplicationMiddleware(object): +class _WSGIApplicationMiddleware(): # This is a WSGI middleware for automatically inserting RUM into # HTML responses. It only works for where a WSGI application is # returning response content via a iterable/generator. It does not diff --git a/newrelic/common/agent_http.py b/newrelic/common/agent_http.py index 7b4c85fb55..8a71daa81a 100644 --- a/newrelic/common/agent_http.py +++ b/newrelic/common/agent_http.py @@ -35,7 +35,7 @@ from ssl import get_default_verify_paths except ImportError: - class _DEFAULT_CERT_PATH(object): + class _DEFAULT_CERT_PATH(): cafile = None capath = None @@ -77,7 +77,7 @@ def _urllib3_ssl_recursion_workaround(wrapped, instance, args, kwargs): return wrapped(*args, **kwargs) -class BaseClient(object): +class BaseClient(): AUDIT_LOG_ID = 0 def __init__( @@ -521,7 +521,7 @@ def __init__( ) -class SupportabilityMixin(object): +class SupportabilityMixin(): @staticmethod def _supportability_request(params, payload, body, compression_time): # ********* diff --git a/newrelic/common/async_proxy.py b/newrelic/common/async_proxy.py index ffc65f0085..481f8df416 100644 --- a/newrelic/common/async_proxy.py +++ b/newrelic/common/async_proxy.py @@ -25,7 +25,7 @@ CancelledError = None -class TransactionContext(object): +class TransactionContext(): def __init__(self, transaction_init): self.enter_time = None self.transaction = None @@ -101,7 +101,7 @@ def __exit__(self, exc, value, tb): self.transaction.__exit__(exc, value, tb) -class LoopContext(object): +class LoopContext(): def __enter__(self): self.enter_time = time.time() diff --git a/newrelic/common/stopwatch.py b/newrelic/common/stopwatch.py index 1f8de2f428..8038f3ec98 100644 --- a/newrelic/common/stopwatch.py +++ b/newrelic/common/stopwatch.py @@ -53,7 +53,7 @@ # wall clock time and duration based on a monotonic clock where # available. -class _Timer(object): +class _Timer(): def __init__(self): self._time_started = time.time() diff --git a/newrelic/common/streaming_utils.py b/newrelic/common/streaming_utils.py index ad1b371dc6..71863374c9 100644 --- a/newrelic/common/streaming_utils.py +++ b/newrelic/common/streaming_utils.py @@ -25,7 +25,7 @@ _logger = logging.getLogger(__name__) -class StreamBuffer(object): +class StreamBuffer(): def __init__(self, maxlen, batching=False): self._queue = collections.deque(maxlen=maxlen) self._notify = self.condition() @@ -80,7 +80,7 @@ def __iter__(self): return StreamBufferIterator(self) -class StreamBufferIterator(object): +class StreamBufferIterator(): MAX_BATCH_SIZE = 100 def __init__(self, stream_buffer): diff --git a/newrelic/common/utilization.py b/newrelic/common/utilization.py index d1c9184995..826ad778ba 100644 --- a/newrelic/common/utilization.py +++ b/newrelic/common/utilization.py @@ -45,7 +45,7 @@ def send_request(self, *args, **kwargs): return super(UtilizationHttpClient, self).send_request(*args, **kwargs) -class CommonUtilization(object): +class CommonUtilization(): METADATA_HOST = "" METADATA_PATH = "" METADATA_QUERY = None diff --git a/newrelic/console.py b/newrelic/console.py index 3559b07735..c813a3073e 100644 --- a/newrelic/console.py +++ b/newrelic/console.py @@ -100,7 +100,7 @@ def setquit(): else: eof = "Ctrl-D (i.e. EOF)" - class Quitter(object): + class Quitter(): def __init__(self, name): self.name = name @@ -426,7 +426,7 @@ def do_threads(self): print("\n\n".join(all), file=self.stdout) -class ConnectionManager(object): +class ConnectionManager(): def __init__(self, listener_socket): self.__listener_socket = listener_socket self.__console_initialized = False diff --git a/newrelic/core/adaptive_sampler.py b/newrelic/core/adaptive_sampler.py index 5e93e0e8a4..d79aa0eca9 100644 --- a/newrelic/core/adaptive_sampler.py +++ b/newrelic/core/adaptive_sampler.py @@ -17,7 +17,7 @@ import threading -class AdaptiveSampler(object): +class AdaptiveSampler(): def __init__(self, sampling_target, sampling_period): self.adaptive_target = 0.0 self.period = sampling_period diff --git a/newrelic/core/agent.py b/newrelic/core/agent.py index 67d5a4140a..fde6178e1b 100644 --- a/newrelic/core/agent.py +++ b/newrelic/core/agent.py @@ -79,7 +79,7 @@ def check_environment(): ) -class Agent(object): +class Agent(): """Only one instance of the agent should ever exist and that can be obtained using the agent_instance() function. diff --git a/newrelic/core/agent_protocol.py b/newrelic/core/agent_protocol.py index b8ccff2b90..5270982dab 100644 --- a/newrelic/core/agent_protocol.py +++ b/newrelic/core/agent_protocol.py @@ -50,7 +50,7 @@ _logger = logging.getLogger(__name__) -class AgentProtocol(object): +class AgentProtocol(): VERSION = 17 STATUS_CODE_RESPONSE = { diff --git a/newrelic/core/agent_streaming.py b/newrelic/core/agent_streaming.py index b581f5d17a..213e07a5d7 100644 --- a/newrelic/core/agent_streaming.py +++ b/newrelic/core/agent_streaming.py @@ -25,7 +25,7 @@ _logger = logging.getLogger(__name__) -class StreamingRpc(object): +class StreamingRpc(): """Streaming Remote Procedure Call This class keeps a stream_stream RPC alive, retrying after a timeout when diff --git a/newrelic/core/application.py b/newrelic/core/application.py index 3fc4f0f431..0c3c690da2 100644 --- a/newrelic/core/application.py +++ b/newrelic/core/application.py @@ -53,7 +53,7 @@ _logger = logging.getLogger(__name__) -class Application(object): +class Application(): """Class which maintains recorded data for a single application.""" diff --git a/newrelic/core/attribute_filter.py b/newrelic/core/attribute_filter.py index 8cd26cb30f..162046ca3a 100644 --- a/newrelic/core/attribute_filter.py +++ b/newrelic/core/attribute_filter.py @@ -25,7 +25,7 @@ DST_LOG_EVENT_CONTEXT_DATA = 1 << 6 -class AttributeFilter(object): +class AttributeFilter(): # Apply filtering rules to attributes. # # Upon initialization, an AttributeFilter object will take all attribute @@ -159,7 +159,7 @@ def apply(self, name, default_destinations): return destinations -class AttributeFilterRule(object): +class AttributeFilterRule(): def __init__(self, name, destinations, is_include): self.name = name.rstrip("*") self.destinations = destinations diff --git a/newrelic/core/config.py b/newrelic/core/config.py index 5873b2374c..0820965151 100644 --- a/newrelic/core/config.py +++ b/newrelic/core/config.py @@ -82,7 +82,7 @@ def emit(self, record): # sub categories we don't know about. -class Settings(object): +class Settings(): nested = False def __repr__(self): diff --git a/newrelic/core/context.py b/newrelic/core/context.py index 7560855aef..d13d7ed080 100644 --- a/newrelic/core/context.py +++ b/newrelic/core/context.py @@ -24,7 +24,7 @@ _logger = logging.getLogger(__name__) -class ContextOf(object): +class ContextOf(): def __init__(self, trace=None, request=None, trace_cache_id=None, strict=True): self.trace = None self.trace_cache = trace_cache() diff --git a/newrelic/core/data_collector.py b/newrelic/core/data_collector.py index 6012a57cdd..d9ab80065b 100644 --- a/newrelic/core/data_collector.py +++ b/newrelic/core/data_collector.py @@ -35,7 +35,7 @@ _logger = logging.getLogger(__name__) -class Session(object): +class Session(): PROTOCOL = AgentProtocol OTLP_PROTOCOL = OtlpProtocol CLIENT = ApplicationModeClient diff --git a/newrelic/core/database_utils.py b/newrelic/core/database_utils.py index 28bb7f7295..41b87cd533 100644 --- a/newrelic/core/database_utils.py +++ b/newrelic/core/database_utils.py @@ -521,7 +521,7 @@ def _obfuscate_explain_plan(database, columns, rows): return columns, rows -class SQLConnection(object): +class SQLConnection(): def __init__(self, database, connection): self.database = database @@ -561,7 +561,7 @@ def cleanup(self): self.connection.close() -class SQLConnections(object): +class SQLConnections(): def __init__(self, maximum=4): self.connections = [] @@ -767,7 +767,7 @@ def explain_plan(connections, sql_statement, connect_params, cursor_params, # Wrapper for information about a specific database. -class SQLDatabase(object): +class SQLDatabase(): def __init__(self, dbapi2_module): self.dbapi2_module = dbapi2_module @@ -811,7 +811,7 @@ def explain_stmts(self): return result -class SQLStatement(object): +class SQLStatement(): def __init__(self, sql, database=None): self._operation = None diff --git a/newrelic/core/graphql_utils.py b/newrelic/core/graphql_utils.py index f76228c5de..8f377cdcc8 100644 --- a/newrelic/core/graphql_utils.py +++ b/newrelic/core/graphql_utils.py @@ -19,7 +19,7 @@ from newrelic.core.database_utils import SQLStatement -class GraphQLStyle(object): +class GraphQLStyle(): """Helper class to initialize SQLStatement instances.""" quoting_style = "single+double" diff --git a/newrelic/core/internal_metrics.py b/newrelic/core/internal_metrics.py index 090a658c73..0d3a413a49 100644 --- a/newrelic/core/internal_metrics.py +++ b/newrelic/core/internal_metrics.py @@ -21,7 +21,7 @@ _context = threading.local() -class InternalTrace(object): +class InternalTrace(): def __init__(self, name, metrics=None): self.name = name @@ -39,7 +39,7 @@ def __exit__(self, exc, value, tb): if self.metrics is not None: self.metrics.record_custom_metric(self.name, duration) -class InternalTraceWrapper(object): +class InternalTraceWrapper(): def __init__(self, wrapped, name): if type(wrapped) == type(()): @@ -68,7 +68,7 @@ def __call__(self, *args, **kwargs): with InternalTrace(self.__name, metrics): return self.__wrapped(*args, **kwargs) -class InternalTraceContext(object): +class InternalTraceContext(): def __init__(self, metrics): self.previous = None diff --git a/newrelic/core/node_mixin.py b/newrelic/core/node_mixin.py index 9444944390..734d8e998e 100644 --- a/newrelic/core/node_mixin.py +++ b/newrelic/core/node_mixin.py @@ -16,7 +16,7 @@ from newrelic.core.attribute_filter import DST_SPAN_EVENTS, DST_TRANSACTION_SEGMENTS -class GenericNodeMixin(object): +class GenericNodeMixin(): @property def processed_user_attributes(self): if hasattr(self, "_processed_user_attributes"): diff --git a/newrelic/core/profile_sessions.py b/newrelic/core/profile_sessions.py index 299d03ddc9..6f67317562 100644 --- a/newrelic/core/profile_sessions.py +++ b/newrelic/core/profile_sessions.py @@ -32,7 +32,7 @@ AGENT_PACKAGE_DIRECTORY = os.path.dirname(newrelic.__file__) + os.sep -class SessionState(object): +class SessionState(): RUNNING = 1 FINISHED = 2 @@ -113,7 +113,7 @@ def collect_stack_traces(include_nr_threads=False): yield thread_category, stack_trace -class ProfileSessionManager(object): +class ProfileSessionManager(): """Singleton class that manages multiple profile sessions. Do NOT instantiate directly from this class. Instead use profile_session_manager() @@ -282,7 +282,7 @@ def shutdown(self, app_name): return True -class ProfileSession(object): +class ProfileSession(): def __init__(self, profile_id, stop_time): self.profile_id = profile_id self.start_time_s = time.time() @@ -431,7 +431,7 @@ def profile_data(self): return profile -class CallTree(object): +class CallTree(): def __init__(self, method_data, call_count=0, depth=1): self.method_data = method_data self.call_count = call_count diff --git a/newrelic/core/rules_engine.py b/newrelic/core/rules_engine.py index 62ecce3fef..ddbf780331 100644 --- a/newrelic/core/rules_engine.py +++ b/newrelic/core/rules_engine.py @@ -54,7 +54,7 @@ def apply(self, string): return self.match_expression_re.subn(self.replacement, string, count) -class RulesEngine(object): +class RulesEngine(): def __init__(self, rules): self.__rules = [] @@ -126,7 +126,7 @@ def normalize(self, string): return (final_string, ignore) -class SegmentCollapseEngine(object): +class SegmentCollapseEngine(): """Segment names in transaction name are collapsed using the rules from the data collector. The collector sends a prefix and list of allowlist terms associated with that prefix. If a transaction name diff --git a/newrelic/core/stats_engine.py b/newrelic/core/stats_engine.py index 506f9a15c4..72a72a0243 100644 --- a/newrelic/core/stats_engine.py +++ b/newrelic/core/stats_engine.py @@ -195,7 +195,7 @@ def merge_raw_time_metric(self, duration, exclusive=None): pass -class CustomMetrics(object): +class CustomMetrics(): """Table for collection a set of value metrics.""" @@ -241,7 +241,7 @@ def reset_metric_stats(self): self.__stats_table = {} -class DimensionalMetrics(object): +class DimensionalMetrics(): """Nested dictionary table for collecting a set of metrics broken down by tags.""" @@ -377,7 +377,7 @@ def merge_slow_sql_node(self, node): self[0] += 1 -class SampledDataSet(object): +class SampledDataSet(): def __init__(self, capacity=100): self.pq = [] self.heap = False @@ -501,7 +501,7 @@ def merge(self, other_data_set): self.num_seen += other_data_set.num_seen - other_data_set.num_samples -class StatsEngine(object): +class StatsEngine(): """The stats engine object holds the accumulated transactions metrics, details of errors and slow transactions. There should be one instance diff --git a/newrelic/core/string_table.py b/newrelic/core/string_table.py index 5e37729e48..bbad866c1d 100644 --- a/newrelic/core/string_table.py +++ b/newrelic/core/string_table.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -class StringTable(object): +class StringTable(): def __init__(self): self.__values = [] diff --git a/newrelic/core/thread_utilization.py b/newrelic/core/thread_utilization.py index fd57ba9f86..dad118ec2c 100644 --- a/newrelic/core/thread_utilization.py +++ b/newrelic/core/thread_utilization.py @@ -26,7 +26,7 @@ def utilization_tracker(application): return _utilization_trackers.get(application) -class ThreadUtilizationDataSource(object): +class ThreadUtilizationDataSource(): def __init__(self, application): self._consumer_name = application diff --git a/newrelic/core/trace_cache.py b/newrelic/core/trace_cache.py index bc271959f4..e0703ac82c 100644 --- a/newrelic/core/trace_cache.py +++ b/newrelic/core/trace_cache.py @@ -71,7 +71,7 @@ def get_event_loop(task): return getattr(task, "_loop", None) -class cached_module(object): +class cached_module(): def __init__(self, module_path, name=None): self.module_path = module_path self.name = name or module_path diff --git a/newrelic/hooks/component_piston.py b/newrelic/hooks/component_piston.py index db9da94b7d..54dc170ad3 100644 --- a/newrelic/hooks/component_piston.py +++ b/newrelic/hooks/component_piston.py @@ -20,7 +20,7 @@ import newrelic.api.in_function -class MethodWrapper(object): +class MethodWrapper(): def __init__(self, wrapped, priority=None): self._nr_name = callable_name(wrapped) @@ -46,7 +46,7 @@ def __call__(self, *args, **kwargs): return self._nr_wrapped(*args, **kwargs) -class ResourceInitWrapper(object): +class ResourceInitWrapper(): def __init__(self, wrapped): if isinstance(wrapped, tuple): diff --git a/newrelic/hooks/database_asyncpg.py b/newrelic/hooks/database_asyncpg.py index d6ca62ef35..1abd3a0d01 100644 --- a/newrelic/hooks/database_asyncpg.py +++ b/newrelic/hooks/database_asyncpg.py @@ -17,7 +17,7 @@ from newrelic.common.object_wrapper import ObjectProxy, wrap_function_wrapper -class PostgresApi(object): +class PostgresApi(): @staticmethod def _instance_info(addr, connected_fut, con_params, *args, **kwargs): if isinstance(addr, str): diff --git a/newrelic/hooks/external_feedparser.py b/newrelic/hooks/external_feedparser.py index d505b61d2b..432d292be6 100644 --- a/newrelic/hooks/external_feedparser.py +++ b/newrelic/hooks/external_feedparser.py @@ -22,7 +22,7 @@ import newrelic.api.external_trace -class capture_external_trace(object): +class capture_external_trace(): def __init__(self, wrapped): newrelic.api.object_wrapper.update_wrapper(self, wrapped) self._nr_next_object = wrapped diff --git a/newrelic/hooks/framework_pylons.py b/newrelic/hooks/framework_pylons.py index 2832261668..f43d92de39 100644 --- a/newrelic/hooks/framework_pylons.py +++ b/newrelic/hooks/framework_pylons.py @@ -26,7 +26,7 @@ def name_controller(self, environ, start_response): action = environ['pylons.routes_dict']['action'] return "%s.%s" % (callable_name(self), action) -class capture_error(object): +class capture_error(): def __init__(self, wrapped): if isinstance(wrapped, tuple): (instance, wrapped) = wrapped diff --git a/newrelic/hooks/framework_tornado.py b/newrelic/hooks/framework_tornado.py index bfdd4dba5d..658670a663 100644 --- a/newrelic/hooks/framework_tornado.py +++ b/newrelic/hooks/framework_tornado.py @@ -415,7 +415,7 @@ def instrument_tornado_web(module): track_loop_time) -class TornadoContext(object): +class TornadoContext(): def __init__(self): self.transaction = None diff --git a/newrelic/hooks/framework_web2py.py b/newrelic/hooks/framework_web2py.py index aeb22bd84a..e71bfa5fdd 100644 --- a/newrelic/hooks/framework_web2py.py +++ b/newrelic/hooks/framework_web2py.py @@ -111,7 +111,7 @@ def instrument_gluon_main(module): # where we want to name the web transactions as # such, we pick that up later. - class error_serve_controller(object): + class error_serve_controller(): def __init__(self, wrapped): newrelic.api.object_wrapper.update_wrapper(self, wrapped) self._nr_next_object = wrapped diff --git a/newrelic/hooks/template_genshi.py b/newrelic/hooks/template_genshi.py index e4ed9aead6..253e06cc95 100644 --- a/newrelic/hooks/template_genshi.py +++ b/newrelic/hooks/template_genshi.py @@ -17,7 +17,7 @@ import newrelic.api.function_trace -class stream_wrapper(object): +class stream_wrapper(): def __init__(self, stream, filepath): self.__stream = stream self.__filepath = filepath @@ -46,7 +46,7 @@ def __html__(self): return self.__stream.__html__() -class wrap_template(object): +class wrap_template(): def __init__(self, wrapped): if isinstance(wrapped, tuple): (instance, wrapped) = wrapped diff --git a/newrelic/hooks/template_mako.py b/newrelic/hooks/template_mako.py index 1cd5bab16f..4faa5ba9ea 100644 --- a/newrelic/hooks/template_mako.py +++ b/newrelic/hooks/template_mako.py @@ -15,7 +15,7 @@ import newrelic.api.function_trace import newrelic.common.object_wrapper -class TemplateRenderWrapper(object): +class TemplateRenderWrapper(): def __init__(self, wrapped): self.__wrapped = wrapped diff --git a/newrelic/samplers/cpu_usage.py b/newrelic/samplers/cpu_usage.py index f19ccadbfa..5dff15db35 100644 --- a/newrelic/samplers/cpu_usage.py +++ b/newrelic/samplers/cpu_usage.py @@ -25,7 +25,7 @@ from newrelic.samplers.decorators import data_source_factory @data_source_factory(name='CPU Usage') -class _CPUUsageDataSource(object): +class _CPUUsageDataSource(): def __init__(self, settings, environ): self._timer = None diff --git a/newrelic/samplers/data_sampler.py b/newrelic/samplers/data_sampler.py index a7ecb3b4e5..696e6c8147 100644 --- a/newrelic/samplers/data_sampler.py +++ b/newrelic/samplers/data_sampler.py @@ -23,7 +23,7 @@ _logger = logging.getLogger(__name__) -class DataSampler(object): +class DataSampler(): def __init__(self, consumer, source, name, settings, **properties): self.consumer = consumer diff --git a/newrelic/samplers/gc_data.py b/newrelic/samplers/gc_data.py index 169212c05e..0a5dc1ec92 100644 --- a/newrelic/samplers/gc_data.py +++ b/newrelic/samplers/gc_data.py @@ -25,7 +25,7 @@ @data_source_factory(name="Garbage Collector Metrics") -class _GCDataSource(object): +class _GCDataSource(): def __init__(self, settings, environ): self.gc_time_metrics = CustomMetrics() self.start_time = 0.0 diff --git a/tests/adapter_gunicorn/worker.py b/tests/adapter_gunicorn/worker.py index 5d3a984980..f7bd868820 100644 --- a/tests/adapter_gunicorn/worker.py +++ b/tests/adapter_gunicorn/worker.py @@ -17,7 +17,7 @@ from gunicorn.workers.sync import SyncWorker -class WsgiProxy(object): +class WsgiProxy(): def __init__(self, asgi): self.asgi = asgi self.status_code = None diff --git a/tests/agent_features/_test_code_level_metrics.py b/tests/agent_features/_test_code_level_metrics.py index bbe3363f4c..21d193a104 100644 --- a/tests/agent_features/_test_code_level_metrics.py +++ b/tests/agent_features/_test_code_level_metrics.py @@ -18,7 +18,7 @@ def exercise_function(): return -class ExerciseClass(object): +class ExerciseClass(): def exercise_method(self): return @@ -31,7 +31,7 @@ def exercise_class_method(cls): return -class ExerciseClassCallable(object): +class ExerciseClassCallable(): def __call__(self): return diff --git a/tests/agent_features/test_asgi_distributed_tracing.py b/tests/agent_features/test_asgi_distributed_tracing.py index f38bbc0c5f..13e52c91f3 100644 --- a/tests/agent_features/test_asgi_distributed_tracing.py +++ b/tests/agent_features/test_asgi_distributed_tracing.py @@ -123,7 +123,7 @@ def test_distributed_tracing_web_transaction(): assert "X-NewRelic-App-Data" not in response.headers -class TestAsgiRequest(object): +class TestAsgiRequest(): scope = { "asgi": {"spec_version": "2.1", "version": "3.0"}, "client": ("127.0.0.1", 54768), diff --git a/tests/agent_features/test_attribute.py b/tests/agent_features/test_attribute.py index 168825173d..f1fc35558f 100644 --- a/tests/agent_features/test_attribute.py +++ b/tests/agent_features/test_attribute.py @@ -479,7 +479,7 @@ def test_sanitize_tuple(): assert sanitize(t) == "('one', 'two', 'three')" -class Foo(object): +class Foo(): pass @@ -488,7 +488,7 @@ def test_sanitize_object(): assert sanitize(f) == str(f) -class TypeErrorString(object): +class TypeErrorString(): def __str__(self): return 42 @@ -498,7 +498,7 @@ def test_str_raises_type_error(): sanitize(TypeErrorString()) -class AttributeErrorString(object): +class AttributeErrorString(): def __str__(self): raise AttributeError() diff --git a/tests/agent_features/test_lambda_handler.py b/tests/agent_features/test_lambda_handler.py index 69b05fbf8d..8658a0bca3 100644 --- a/tests/agent_features/test_lambda_handler.py +++ b/tests/agent_features/test_lambda_handler.py @@ -92,7 +92,7 @@ def handler(event, context): } -class Context(object): +class Context(): aws_request_id = "cookies" invoked_function_arn = "arn" function_name = "cats" diff --git a/tests/agent_features/test_log_events.py b/tests/agent_features/test_log_events.py index 2fc709233d..f1bb6cdf4c 100644 --- a/tests/agent_features/test_log_events.py +++ b/tests/agent_features/test_log_events.py @@ -36,14 +36,14 @@ from newrelic.core.config import _parse_attributes -class NonPrintableObject(object): +class NonPrintableObject(): def __str__(self): raise RuntimeError("Unable to print object.") __repr__ = __str__ -class NonSerializableObject(object): +class NonSerializableObject(): def __str__(self): return "<%s object>" % self.__class__.__name__ diff --git a/tests/agent_features/test_logs_in_context.py b/tests/agent_features/test_logs_in_context.py index c03068e014..931f0fa4e5 100644 --- a/tests/agent_features/test_logs_in_context.py +++ b/tests/agent_features/test_logs_in_context.py @@ -62,14 +62,14 @@ def log_buffer_with_stack_trace(caplog): _logger.removeHandler(_handler) -class NonPrintableObject(object): +class NonPrintableObject(): def __str__(self): raise RuntimeError("Unable to print object.") __repr__ = __str__ -class NonSerializableObject(object): +class NonSerializableObject(): def __str__(self): return "<%s object>" % self.__class__.__name__ diff --git a/tests/agent_features/test_serverless_mode.py b/tests/agent_features/test_serverless_mode.py index 6114102bf6..c7dbf1720e 100644 --- a/tests/agent_features/test_serverless_mode.py +++ b/tests/agent_features/test_serverless_mode.py @@ -167,7 +167,7 @@ def test_payload_metadata_arn(serverless_application, arn_set): settings.aws_lambda_metadata.update({"arn": arn, "function_version": "$LATEST"}) - class Context(object): + class Context(): invoked_function_arn = arn @validate_serverless_metadata(exact_metadata={"arn": arn}) diff --git a/tests/agent_features/test_span_events.py b/tests/agent_features/test_span_events.py index 05e375ff3e..13e725af94 100644 --- a/tests/agent_features/test_span_events.py +++ b/tests/agent_features/test_span_events.py @@ -479,7 +479,7 @@ def _test(): _test() -class FakeTrace(object): +class FakeTrace(): def __enter__(self): pass diff --git a/tests/agent_unittests/_test_import_hook.py b/tests/agent_unittests/_test_import_hook.py index 6afbb06374..e5c0a60a03 100644 --- a/tests/agent_unittests/_test_import_hook.py +++ b/tests/agent_unittests/_test_import_hook.py @@ -15,10 +15,10 @@ def run(): pass -class A(object): +class A(): def run(): pass -class B(object): +class B(): def run(): pass diff --git a/tests/agent_unittests/conftest.py b/tests/agent_unittests/conftest.py index 9397456586..4e9bfa5597 100644 --- a/tests/agent_unittests/conftest.py +++ b/tests/agent_unittests/conftest.py @@ -41,7 +41,7 @@ ) -class FakeProtos(object): +class FakeProtos(): Span = object() SpanBatch = object() diff --git a/tests/agent_unittests/test_agent.py b/tests/agent_unittests/test_agent.py index 55eea099cc..e099a2f0bd 100644 --- a/tests/agent_unittests/test_agent.py +++ b/tests/agent_unittests/test_agent.py @@ -18,7 +18,7 @@ from testing_support.fixtures import override_generic_settings -class FakeApplication(object): +class FakeApplication(): name = 'Fake' def __init__(self, *args, **kwargs): diff --git a/tests/agent_unittests/test_agent_protocol.py b/tests/agent_unittests/test_agent_protocol.py index 85395c6cdb..e388c45af0 100644 --- a/tests/agent_unittests/test_agent_protocol.py +++ b/tests/agent_unittests/test_agent_protocol.py @@ -570,7 +570,7 @@ def test_audit_logging(): ) def test_ca_bundle_path(monkeypatch, ca_bundle_path): # Pretend CA certificates are not available - class DefaultVerifyPaths(object): + class DefaultVerifyPaths(): cafile = None capath = None diff --git a/tests/agent_unittests/test_environment.py b/tests/agent_unittests/test_environment.py index 84dd753a9a..fd485cf74f 100644 --- a/tests/agent_unittests/test_environment.py +++ b/tests/agent_unittests/test_environment.py @@ -24,7 +24,7 @@ def module(version): - class Module(object): + class Module(): pass if version: @@ -68,7 +68,7 @@ def test_plugin_list_when_package_reporting_disabled(): assert plugin_list == [] -class NoIteratorDict(object): +class NoIteratorDict(): def __init__(self, d): self.d = d diff --git a/tests/agent_unittests/test_http_client.py b/tests/agent_unittests/test_http_client.py index 8f876c63e6..253e83233c 100644 --- a/tests/agent_unittests/test_http_client.py +++ b/tests/agent_unittests/test_http_client.py @@ -367,7 +367,7 @@ def test_default_cert_path(monkeypatch, system_certs_available): cert_file = None ca_path = None - class DefaultVerifyPaths(object): + class DefaultVerifyPaths(): cafile = cert_file capath = ca_path diff --git a/tests/agent_unittests/test_trace_cache.py b/tests/agent_unittests/test_trace_cache.py index e0f7db84fa..71046efb14 100644 --- a/tests/agent_unittests/test_trace_cache.py +++ b/tests/agent_unittests/test_trace_cache.py @@ -21,7 +21,7 @@ _TEST_CONCURRENT_ITERATION_TC_SIZE = 20 -class DummyTrace(object): +class DummyTrace(): pass diff --git a/tests/agent_unittests/test_utilization_settings.py b/tests/agent_unittests/test_utilization_settings.py index 33eaa63a52..84b370e6b1 100644 --- a/tests/agent_unittests/test_utilization_settings.py +++ b/tests/agent_unittests/test_utilization_settings.py @@ -72,7 +72,7 @@ # Tests for loading settings and testing for values precedence -class Environ(object): +class Environ(): def __init__(self, env_dict): self.env_dict = {} for key in env_dict.keys(): diff --git a/tests/component_tastypie/test_application.py b/tests/component_tastypie/test_application.py index d15b4c6d21..9515ffc232 100644 --- a/tests/component_tastypie/test_application.py +++ b/tests/component_tastypie/test_application.py @@ -53,7 +53,7 @@ def test_application_index(): response.mustcontain("INDEX RESPONSE") -class TastyPieFullDebugMode(object): +class TastyPieFullDebugMode(): def __init__(self, tastypie_full_debug): from django.conf import settings diff --git a/tests/cross_agent/test_boot_id_utilization_data.py b/tests/cross_agent/test_boot_id_utilization_data.py index ea5b26a9e7..42f58c12cf 100644 --- a/tests/cross_agent/test_boot_id_utilization_data.py +++ b/tests/cross_agent/test_boot_id_utilization_data.py @@ -48,7 +48,7 @@ def _parametrize_test(test): _boot_id_tests = [_parametrize_test(t) for t in _load_tests()] -class MockedBootIdEndpoint(object): +class MockedBootIdEndpoint(): def __init__(self, boot_id): self.boot_id = boot_id diff --git a/tests/cross_agent/test_lambda_event_source.py b/tests/cross_agent/test_lambda_event_source.py index de796a6b0f..e695a41665 100644 --- a/tests/cross_agent/test_lambda_event_source.py +++ b/tests/cross_agent/test_lambda_event_source.py @@ -44,7 +44,7 @@ def _load_tests(): return tests.keys() -class Context(object): +class Context(): aws_request_id = "cookies" invoked_function_arn = "arn" function_name = "cats" diff --git a/tests/cross_agent/test_pcf_utilization_data.py b/tests/cross_agent/test_pcf_utilization_data.py index 28b56f7592..108e77d890 100644 --- a/tests/cross_agent/test_pcf_utilization_data.py +++ b/tests/cross_agent/test_pcf_utilization_data.py @@ -45,7 +45,7 @@ def _parametrize_test(test): _pcf_tests = [_parametrize_test(t) for t in _load_tests()] -class Environ(object): +class Environ(): def __init__(self, env_dict): env_dict = env_dict or {} cleaned_env_dict = {} @@ -66,7 +66,7 @@ def __exit__(self, *args, **kwargs): os.environ = INITIAL_ENV -class MockResponse(object): +class MockResponse(): def __init__(self, code, body): self.code = code diff --git a/tests/cross_agent/test_sql_obfuscation.py b/tests/cross_agent/test_sql_obfuscation.py index 480b0a4176..362fc797df 100644 --- a/tests/cross_agent/test_sql_obfuscation.py +++ b/tests/cross_agent/test_sql_obfuscation.py @@ -53,7 +53,7 @@ def get_quoting_styles(dialects): return set([_quoting_styles.get(dialect) for dialect in dialects]) -class DummyDB(object): +class DummyDB(): def __init__(self, quoting_style): self.quoting_style = quoting_style diff --git a/tests/cross_agent/test_utilization_configs.py b/tests/cross_agent/test_utilization_configs.py index 30ba192b9a..b8f8dca622 100644 --- a/tests/cross_agent/test_utilization_configs.py +++ b/tests/cross_agent/test_utilization_configs.py @@ -72,7 +72,7 @@ def getips(*args, **kwargs): return getips -class UpdatedSettings(object): +class UpdatedSettings(): def __init__(self): self.initial_settings = newrelic.core.config._settings diff --git a/tests/datastore_aioredis/test_custom_conn_pool.py b/tests/datastore_aioredis/test_custom_conn_pool.py index b09cf0bdd3..415cded094 100644 --- a/tests/datastore_aioredis/test_custom_conn_pool.py +++ b/tests/datastore_aioredis/test_custom_conn_pool.py @@ -30,7 +30,7 @@ DB_SETTINGS = redis_settings()[0] -class FakeConnectionPool(object): +class FakeConnectionPool(): """Connection Pool without connection_kwargs attribute.""" def __init__(self, connection): diff --git a/tests/datastore_aredis/test_custom_conn_pool.py b/tests/datastore_aredis/test_custom_conn_pool.py index 70c75de9ea..c2594c2dbf 100644 --- a/tests/datastore_aredis/test_custom_conn_pool.py +++ b/tests/datastore_aredis/test_custom_conn_pool.py @@ -32,7 +32,7 @@ REDIS_PY_VERSION = aredis.VERSION -class FakeConnectionPool(object): +class FakeConnectionPool(): """Connection Pool without connection_kwargs attribute.""" def __init__(self, connection): diff --git a/tests/datastore_psycopg/test_forward_compat.py b/tests/datastore_psycopg/test_forward_compat.py index 0f5ead853f..520dd35df3 100644 --- a/tests/datastore_psycopg/test_forward_compat.py +++ b/tests/datastore_psycopg/test_forward_compat.py @@ -18,7 +18,7 @@ from newrelic.hooks.database_psycopg import wrapper_psycopg_as_string -class TestCompatability(object): +class TestCompatability(): def as_string(self, giraffe, lion, tiger=None): assert type(giraffe) in (psycopg.Cursor, psycopg.AsyncCursor) return "PASS" diff --git a/tests/datastore_psycopg2/test_forward_compat.py b/tests/datastore_psycopg2/test_forward_compat.py index d150943288..35611f0e00 100644 --- a/tests/datastore_psycopg2/test_forward_compat.py +++ b/tests/datastore_psycopg2/test_forward_compat.py @@ -20,7 +20,7 @@ from newrelic.hooks.database_psycopg2 import wrapper_psycopg2_as_string -class TestCompatability(object): +class TestCompatability(): def as_string(self, giraffe, lion, tiger=None): assert isinstance(giraffe, ext.cursor) return "PASS" diff --git a/tests/datastore_redis/test_custom_conn_pool.py b/tests/datastore_redis/test_custom_conn_pool.py index b16a77f48d..42e5e08921 100644 --- a/tests/datastore_redis/test_custom_conn_pool.py +++ b/tests/datastore_redis/test_custom_conn_pool.py @@ -33,7 +33,7 @@ REDIS_PY_VERSION = get_package_version_tuple("redis") -class FakeConnectionPool(object): +class FakeConnectionPool(): """Connection Pool without connection_kwargs attribute.""" def __init__(self, connection): diff --git a/tests/framework_cherrypy/test_application.py b/tests/framework_cherrypy/test_application.py index 0448ce7127..53e20ad77d 100644 --- a/tests/framework_cherrypy/test_application.py +++ b/tests/framework_cherrypy/test_application.py @@ -30,7 +30,7 @@ CHERRYPY_VERSION = tuple(int(v) for v in cherrypy.__version__.split(".")) -class Application(object): +class Application(): @cherrypy.expose def index(self): return "INDEX RESPONSE" diff --git a/tests/framework_cherrypy/test_dispatch.py b/tests/framework_cherrypy/test_dispatch.py index 7e4358146e..bc756fa2ec 100644 --- a/tests/framework_cherrypy/test_dispatch.py +++ b/tests/framework_cherrypy/test_dispatch.py @@ -26,7 +26,7 @@ requires_cherrypy32 = pytest.mark.skipif(not is_ge_cherrypy32, reason="The dispatch mechanism was only added in CherryPy 3.2.") -class Resource(object): +class Resource(): def _cp_dispatch(self, vpath): raise RuntimeError('dispatch error') diff --git a/tests/framework_cherrypy/test_resource.py b/tests/framework_cherrypy/test_resource.py index 385d28d91e..4d6f949fb9 100644 --- a/tests/framework_cherrypy/test_resource.py +++ b/tests/framework_cherrypy/test_resource.py @@ -19,7 +19,7 @@ import cherrypy -class Resource(object): +class Resource(): exposed = True diff --git a/tests/framework_cherrypy/test_routes.py b/tests/framework_cherrypy/test_routes.py index 9111a29ced..464fcc7c1e 100644 --- a/tests/framework_cherrypy/test_routes.py +++ b/tests/framework_cherrypy/test_routes.py @@ -21,7 +21,7 @@ import cherrypy -class EndPoint(object): +class EndPoint(): def index(self): return 'INDEX RESPONSE' diff --git a/tests/framework_django/middleware.py b/tests/framework_django/middleware.py index 2d9e794467..0e6669ae3f 100644 --- a/tests/framework_django/middleware.py +++ b/tests/framework_django/middleware.py @@ -19,7 +19,7 @@ class Custom410(Exception): pass -class ExceptionTo410Middleware(object): +class ExceptionTo410Middleware(): def __init__(self, get_response=None): self.get_response = get_response diff --git a/tests/framework_falcon/_target_application.py b/tests/framework_falcon/_target_application.py index e78e5bb633..258bc82af7 100644 --- a/tests/framework_falcon/_target_application.py +++ b/tests/framework_falcon/_target_application.py @@ -39,14 +39,14 @@ class BadPutRequest(ValueError): pass -class Index(object): +class Index(): def on_get(self, req, resp): """Handles GET requests""" resp.content_type = 'application/json' resp.data = b'{"status": "ok"}' -class BadResponse(object): +class BadResponse(): def on_get(self, req, resp): raise BadGetRequest() diff --git a/tests/framework_grpc/sample_application/__init__.py b/tests/framework_grpc/sample_application/__init__.py index cd5d3de105..f56d62af86 100644 --- a/tests/framework_grpc/sample_application/__init__.py +++ b/tests/framework_grpc/sample_application/__init__.py @@ -33,7 +33,7 @@ SampleApplicationStub = sample_application_pb2_grpc.SampleApplicationStub -class Status(object): +class Status(): code = grpc.StatusCode.ABORTED details = "abort_with_status" trailing_metadata = {} diff --git a/tests/framework_pyramid/_test_application.py b/tests/framework_pyramid/_test_application.py index 1ae86140f5..7442968e86 100644 --- a/tests/framework_pyramid/_test_application.py +++ b/tests/framework_pyramid/_test_application.py @@ -95,7 +95,7 @@ def cornice_service_get_info(request): @cornice.resource.resource(collection_path="/resource", path="/resource/{id}") - class Resource(object): + class Resource(): def __init__(self, request, context=None): self.request = request diff --git a/tests/framework_sanic/conftest.py b/tests/framework_sanic/conftest.py index 6e55569a14..913b7a0c93 100644 --- a/tests/framework_sanic/conftest.py +++ b/tests/framework_sanic/conftest.py @@ -142,7 +142,7 @@ def request(app, method, url, headers=None): return RESPONSES.pop() -class TestApplication(object): +class TestApplication(): def __init__(self, app): self.app = app diff --git a/tests/framework_starlette/_test_application.py b/tests/framework_starlette/_test_application.py index 74b724c0a9..149eeb6a77 100644 --- a/tests/framework_starlette/_test_application.py +++ b/tests/framework_starlette/_test_application.py @@ -78,7 +78,7 @@ def missing_route_handler(request, exc): return PlainTextResponse("Missing route handler", status_code=404) -class CustomRoute(object): +class CustomRoute(): def __init__(self, route): self.route = route diff --git a/tests/messagebroker_kafkapython/conftest.py b/tests/messagebroker_kafkapython/conftest.py index 4a692f18ae..09a095cd41 100644 --- a/tests/messagebroker_kafkapython/conftest.py +++ b/tests/messagebroker_kafkapython/conftest.py @@ -173,7 +173,7 @@ def deserialize(self, topic, bytes_): @pytest.fixture(scope="session") def json_callable_serializer(): - class JSONCallableSerializer(object): + class JSONCallableSerializer(): def __call__(self, obj): return json.dumps(obj).encode("utf-8") if obj is not None else None @@ -182,7 +182,7 @@ def __call__(self, obj): @pytest.fixture(scope="session") def json_callable_deserializer(): - class JSONCallableDeserializer(object): + class JSONCallableDeserializer(): def __call__(self, obj): return json.loads(obj.decode("utf-8")) if obj is not None else None diff --git a/tests/testing_support/asgi_testing.py b/tests/testing_support/asgi_testing.py index 3b45686794..cd20818b88 100644 --- a/tests/testing_support/asgi_testing.py +++ b/tests/testing_support/asgi_testing.py @@ -22,14 +22,14 @@ class ResponseState(Enum): DONE = 3 -class Response(object): +class Response(): def __init__(self, status, headers, body): self.status = status self.headers = headers self.body = b"".join(body) -class AsgiTest(object): +class AsgiTest(): def __init__(self, asgi_application): self.asgi_application = asgi_application diff --git a/tests/testing_support/external_fixtures.py b/tests/testing_support/external_fixtures.py index d28411b2c6..d8ed48658a 100644 --- a/tests/testing_support/external_fixtures.py +++ b/tests/testing_support/external_fixtures.py @@ -77,7 +77,7 @@ def _bind_params(transaction, *args, **kwargs): # reviewed and a better way of achieving what is # required found. - class _Transaction(object): + class _Transaction(): def __init__(self, wrapped): self.__wrapped__ = wrapped diff --git a/tests/testing_support/fixtures.py b/tests/testing_support/fixtures.py index 1670bb0667..85a75c64ef 100644 --- a/tests/testing_support/fixtures.py +++ b/tests/testing_support/fixtures.py @@ -1354,7 +1354,7 @@ def check_error_attributes( check_attributes(parameters, required_params, forgone_params, exact_attrs) -class Environ(object): +class Environ(): """Context manager for setting environment variables temporarily.""" def __init__(self, **kwargs): diff --git a/tests/testing_support/mock_external_grpc_server.py b/tests/testing_support/mock_external_grpc_server.py index 39ee834cc8..1511d55e47 100644 --- a/tests/testing_support/mock_external_grpc_server.py +++ b/tests/testing_support/mock_external_grpc_server.py @@ -27,7 +27,7 @@ # ... test stuff ... -class MockExternalgRPCServer(object): +class MockExternalgRPCServer(): def __init__(self, port=None, *args, **kwargs): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=4)) if port: From 3173305b479c36c05417d9d2c5490f0145cc8730 Mon Sep 17 00:00:00 2001 From: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> Date: Mon, 26 Aug 2024 14:07:36 -0700 Subject: [PATCH 3/7] Misc Python 3 Improvements (#1202) * Fix redundant code in parse_exc_info * Fix random unicode strings * Package updates in tox * Add missing py312 langchain test * Remove hypercorn pin * Refactor Pika version info in tests * Remove unneeded testing skips for various python versions from package version utils * Delete six from requirements.txt for dependabot --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- newrelic/common/object_names.py | 17 ++-------- newrelic/core/attribute.py | 3 +- newrelic/packages/requirements.txt | 1 - tests/agent_unittests/test_agent_protocol.py | 7 +---- .../test_package_version_utils.py | 6 ---- tests/external_feedparser/test_feedparser.py | 6 ++-- tests/messagebroker_pika/compat.py | 4 +-- tests/messagebroker_pika/conftest.py | 6 ++++ tests/messagebroker_pika/minversion.py | 31 ------------------- .../test_pika_async_connection_consume.py | 4 +-- tox.ini | 10 +++--- 11 files changed, 21 insertions(+), 74 deletions(-) delete mode 100644 tests/messagebroker_pika/minversion.py diff --git a/newrelic/common/object_names.py b/newrelic/common/object_names.py index 4859cf7fd8..d9852082a3 100644 --- a/newrelic/common/object_names.py +++ b/newrelic/common/object_names.py @@ -296,22 +296,9 @@ def parse_exc_info(exc_info): fullnames = (name,) try: - - # Favor unicode in exception messages. - + # Ensure exception messages are strings message = str(value) - except Exception: - try: - - # If exception cannot be represented in unicode, this means - # that it is a byte string encoded with an encoding - # that is not compatible with the default system encoding. - # So, just pass this byte string along. - - message = str(value) - - except Exception: - message = "" % type(value).__name__ + message = "" % type(value).__name__ return (module, name, fullnames, message) diff --git a/newrelic/core/attribute.py b/newrelic/core/attribute.py index 791392b261..fd37993f30 100644 --- a/newrelic/core/attribute.py +++ b/newrelic/core/attribute.py @@ -226,8 +226,7 @@ def truncate(text, maxsize=MAX_ATTRIBUTE_LENGTH, encoding="utf-8", ending=None): # Truncate text so that its byte representation # is no longer than maxsize bytes. - # If text is unicode (Python 2 or 3), return unicode. - # If text is a Python 2 string, return str. + # Handle unicode or bytes strings and return the same type as the input. if isinstance(text, str): truncated = _truncate_unicode(text, maxsize, encoding) diff --git a/newrelic/packages/requirements.txt b/newrelic/packages/requirements.txt index ee82c4e887..f4835d1b37 100644 --- a/newrelic/packages/requirements.txt +++ b/newrelic/packages/requirements.txt @@ -6,4 +6,3 @@ opentelemetry_proto==1.0.0 urllib3==1.26.18 wrapt==1.16.0 asgiref==3.6.0 # We only vendor asgiref.compatibility.py -six==1.16.0 diff --git a/tests/agent_unittests/test_agent_protocol.py b/tests/agent_unittests/test_agent_protocol.py index e388c45af0..f200f932ff 100644 --- a/tests/agent_unittests/test_agent_protocol.py +++ b/tests/agent_unittests/test_agent_protocol.py @@ -297,12 +297,7 @@ def connect_payload_asserts( ): payload_data = payload[0] - # Turn off black formatting for this section of the code. While Python 2 has been - # EOL'd since 2020, New Relic still supports it and therefore this unicode assert - # needs the u"" still. - # fmt: off - assert isinstance(payload_data["agent_version"], type(u"")) # pylint: disable=W1406 - # fmt: on + assert isinstance(payload_data["agent_version"], str) assert payload_data["app_name"] == PAYLOAD_APP_NAME assert payload_data["display_host"] == DISPLAY_NAME assert payload_data["environment"] == ENVIRONMENT diff --git a/tests/agent_unittests/test_package_version_utils.py b/tests/agent_unittests/test_package_version_utils.py index 6d0e1e4a73..8b2abeda86 100644 --- a/tests/agent_unittests/test_package_version_utils.py +++ b/tests/agent_unittests/test_package_version_utils.py @@ -56,8 +56,6 @@ def cleared_package_version_cache(): _get_package_version.cache_clear() -# This test only works on Python 3.7 -@SKIP_IF_IMPORTLIB_METADATA @pytest.mark.parametrize( "attr,value,expected_value", ( @@ -75,8 +73,6 @@ def test_get_package_version(monkeypatch, attr, value, expected_value): assert version == expected_value -# This test only works on Python 3.7 -@SKIP_IF_IMPORTLIB_METADATA def test_skips_version_callables(monkeypatch): # There is no file/module here, so we monkeypatch # pytest instead for our purposes @@ -88,8 +84,6 @@ def test_skips_version_callables(monkeypatch): assert version == "3.1.0b2" -# This test only works on Python 3.7 -@SKIP_IF_IMPORTLIB_METADATA @pytest.mark.parametrize( "attr,value,expected_value", ( diff --git a/tests/external_feedparser/test_feedparser.py b/tests/external_feedparser/test_feedparser.py index 5e515cfc30..2dda3a30d2 100644 --- a/tests/external_feedparser/test_feedparser.py +++ b/tests/external_feedparser/test_feedparser.py @@ -39,7 +39,7 @@ def test_feedparser_external(feedparser, server, url): @background_task(name="test_feedparser_external") def _test(): feed = feedparser.parse(url) - assert feed["feed"]["link"] == u"https://pypi.org/" + assert feed["feed"]["link"] == "https://pypi.org/" _test() @@ -59,7 +59,7 @@ def _test(): feed = feedparser.parse(f) else: feed = feedparser.parse("packages.xml") - assert feed["feed"]["link"] == u"https://pypi.org/" + assert feed["feed"]["link"] == "https://pypi.org/" _test() @@ -72,4 +72,4 @@ def test_feedparser_no_transaction(feedparser, server, url): if url.startswith('http://'): url = url + ':' + str(server.port) feed = feedparser.parse(url) - assert feed["feed"]["link"] == u"https://pypi.org/" + assert feed["feed"]["link"] == "https://pypi.org/" diff --git a/tests/messagebroker_pika/compat.py b/tests/messagebroker_pika/compat.py index 91151a21ec..bc72a6eacd 100644 --- a/tests/messagebroker_pika/compat.py +++ b/tests/messagebroker_pika/compat.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from minversion import pika_version_info +from conftest import PIKA_VERSION_INFO def basic_consume(channel, queue, callback, auto_ack=None): kwargs = {"queue": queue} - if pika_version_info[0] < 1: + if PIKA_VERSION_INFO[0] < 1: kwargs["consumer_callback"] = callback if auto_ack is not None: kwargs["no_ack"] = not auto_ack diff --git a/tests/messagebroker_pika/conftest.py b/tests/messagebroker_pika/conftest.py index fc682c303c..f6cd12ae03 100644 --- a/tests/messagebroker_pika/conftest.py +++ b/tests/messagebroker_pika/conftest.py @@ -16,12 +16,18 @@ import pika import pytest + +from newrelic.common.package_version_utils import get_package_version_tuple + from testing_support.db_settings import rabbitmq_settings from testing_support.fixtures import ( # noqa: F401; pylint: disable=W0611 collector_agent_registration_fixture, collector_available_fixture, ) + +PIKA_VERSION_INFO = get_package_version_tuple("pika") + QUEUE = "test_pika-%s" % uuid.uuid4() QUEUE_2 = "test_pika-%s" % uuid.uuid4() diff --git a/tests/messagebroker_pika/minversion.py b/tests/messagebroker_pika/minversion.py deleted file mode 100644 index df034bff80..0000000000 --- a/tests/messagebroker_pika/minversion.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import pytest -import pika - -pika_version_info = tuple(int(num) for num in pika.__version__.split(".")[:2]) - -new_pika_xfail = pytest.mark.xfail( - condition=pika_version_info[0] > 0, strict=True, reason="test fails if pika version is 1.x or greater" -) -new_pika_xfail_py37 = pytest.mark.xfail( - condition=pika_version_info[0] > 0 and sys.version_info >= (3, 7), - strict=True, - reason="test fails if pika version is 1.x or greater", -) -new_pika_skip = pytest.mark.skipif( - condition=pika_version_info[0] > 0, reason="test hangs if pika version is 1.x or greater" -) diff --git a/tests/messagebroker_pika/test_pika_async_connection_consume.py b/tests/messagebroker_pika/test_pika_async_connection_consume.py index 0ff26c4d0f..4c41aef920 100644 --- a/tests/messagebroker_pika/test_pika_async_connection_consume.py +++ b/tests/messagebroker_pika/test_pika_async_connection_consume.py @@ -28,7 +28,7 @@ QUEUE_2, REPLY_TO, ) -from minversion import pika_version_info +from conftest import PIKA_VERSION_INFO from pika.adapters.tornado_connection import TornadoConnection from testing_support.db_settings import rabbitmq_settings from testing_support.fixtures import ( @@ -181,7 +181,7 @@ def on_open_connection(connection): @pytest.mark.skipif( - condition=pika_version_info[0] > 0, reason="pika 1.0 removed the ability to use basic_get with callback=None" + condition=PIKA_VERSION_INFO[0] > 0, reason="pika 1.0 removed the ability to use basic_get with callback=None" ) @parametrized_connection @validate_transaction_metrics( diff --git a/tox.ini b/tox.ini index 83140cce92..98f528d051 100644 --- a/tox.ini +++ b/tox.ini @@ -143,8 +143,7 @@ envlist = python-logger_loguru-{py37,py38,py39,py310,py311,py312,pypy310}-logurulatest, python-logger_loguru-py39-loguru{06,05}, python-logger_structlog-{py37,py38,py39,py310,py311,py312,pypy310}-structloglatest, - ; langchain dependency faiss-cpu isn't compatible with 3.12 yet. - python-mlmodel_langchain-{py38,py39,py310,py311}, + python-mlmodel_langchain-{py38,py39,py310,py311,py312}, python-mlmodel_openai-openai0-{py37,py38,py39,py310,py311,py312}, python-mlmodel_openai-openai107-py312, python-mlmodel_openai-openailatest-{py37,py38,py39,py310,py311,py312}, @@ -162,11 +161,11 @@ envlist = [testenv] deps = # Base Dependencies - {py38,py39,py310,py311,py312,pypy310}: pytest==8.2.1 + {py38,py39,py310,py311,py312,pypy310}: pytest==8.3.2 py37: pytest==7.4.4 iniconfig coverage - WebTest==2.0.35 + WebTest==3.0.0 # Test Suite Dependencies adapter_asgiref-asgireflatest: asgiref @@ -184,8 +183,7 @@ deps = adapter_gunicorn-aiohttp03-py312: aiohttp==3.9.0rc0 adapter_gunicorn-gunicorn19: gunicorn<20 adapter_gunicorn-gunicornlatest: gunicorn - ; Temporarily pinned. Needs to be addressed - adapter_hypercorn-hypercornlatest: hypercorn<0.16 + adapter_hypercorn-hypercornlatest: hypercorn adapter_hypercorn-hypercorn0013: hypercorn<0.14 adapter_hypercorn-hypercorn0012: hypercorn<0.13 adapter_hypercorn-hypercorn0011: hypercorn<0.12 From 968fc4d113b2c578bac157f4839c4f46fdccb210 Mon Sep 17 00:00:00 2001 From: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> Date: Tue, 3 Sep 2024 14:31:26 -0700 Subject: [PATCH 4/7] Convert String Formatting to f-strings with Flynt (#1200) * Flynt - transform percent style strings * Flynt - transform .format style strings * Flynt - transform string concatenation * Flynt - transform join statements * Flynt - aggressive transforms (-a) * Flynt - convert more strings >88 chars long (-ll) * Manual - fix broken f-string expression * Manual - remove unnecessary int casts * Manual - additional manual conversions --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .github/workflows/get-envs.py | 6 +- newrelic/admin/__init__.py | 12 +- newrelic/admin/license_key.py | 2 +- newrelic/admin/local_config.py | 2 +- newrelic/admin/network_config.py | 14 +- newrelic/admin/record_deploy.py | 13 +- newrelic/admin/run_program.py | 5 +- newrelic/admin/run_python.py | 5 +- newrelic/admin/server_config.py | 2 +- newrelic/admin/validate_config.py | 2 +- newrelic/api/database_trace.py | 6 +- newrelic/api/datastore_trace.py | 13 +- newrelic/api/external_trace.py | 6 +- newrelic/api/function_trace.py | 15 +- newrelic/api/generator_trace.py | 2 +- newrelic/api/graphql_trace.py | 17 +- newrelic/api/log.py | 19 +-- newrelic/api/memcache_trace.py | 2 +- newrelic/api/message_trace.py | 6 +- newrelic/api/message_transaction.py | 6 +- newrelic/api/ml_model.py | 3 +- newrelic/api/profile_trace.py | 6 +- newrelic/api/solr_trace.py | 6 +- newrelic/api/supportability.py | 2 +- newrelic/api/time_trace.py | 10 +- newrelic/api/transaction.py | 64 +++---- newrelic/api/web_transaction.py | 4 +- newrelic/bootstrap/sitecustomize.py | 6 +- newrelic/common/agent_http.py | 50 +++--- newrelic/common/encoding_utils.py | 36 +--- newrelic/common/log_file.py | 4 +- newrelic/common/object_names.py | 8 +- newrelic/common/system_info.py | 4 +- newrelic/common/utilization.py | 2 +- newrelic/config.py | 31 ++-- newrelic/console.py | 38 ++--- newrelic/core/agent.py | 26 +-- newrelic/core/agent_protocol.py | 2 +- newrelic/core/agent_streaming.py | 2 +- newrelic/core/application.py | 64 ++++--- newrelic/core/attribute.py | 2 +- newrelic/core/attribute_filter.py | 4 +- newrelic/core/code_level_metrics.py | 4 +- newrelic/core/config.py | 22 +-- newrelic/core/context.py | 2 +- newrelic/core/data_collector.py | 2 +- newrelic/core/database_node.py | 20 +-- newrelic/core/database_utils.py | 18 +- newrelic/core/datastore_node.py | 15 +- newrelic/core/environment.py | 2 +- newrelic/core/external_node.py | 20 +-- newrelic/core/function_node.py | 6 +- newrelic/core/graphql_node.py | 16 +- newrelic/core/loop_node.py | 10 +- newrelic/core/memcache_node.py | 4 +- newrelic/core/message_node.py | 3 +- newrelic/core/node_mixin.py | 6 +- newrelic/core/otlp_utils.py | 2 +- newrelic/core/profile_sessions.py | 4 +- newrelic/core/rules_engine.py | 4 +- newrelic/core/solr_node.py | 4 +- newrelic/core/stack_trace.py | 3 +- newrelic/core/stats_engine.py | 5 +- newrelic/core/string_table.py | 2 +- newrelic/core/trace_cache.py | 4 +- newrelic/core/transaction_node.py | 25 ++- newrelic/hooks/application_celery.py | 2 +- newrelic/hooks/application_gearman.py | 6 +- .../hooks/component_djangorestframework.py | 5 +- newrelic/hooks/component_tastypie.py | 4 +- newrelic/hooks/database_asyncpg.py | 2 +- newrelic/hooks/database_dbapi2.py | 4 +- newrelic/hooks/database_dbapi2_async.py | 4 +- newrelic/hooks/database_psycopg.py | 4 +- newrelic/hooks/database_psycopg2.py | 4 +- newrelic/hooks/datastore_aiomcache.py | 2 +- newrelic/hooks/datastore_aioredis.py | 6 +- newrelic/hooks/datastore_aredis.py | 4 +- newrelic/hooks/datastore_bmemcached.py | 2 +- newrelic/hooks/datastore_elasticsearch.py | 4 +- newrelic/hooks/datastore_firestore.py | 52 +++--- newrelic/hooks/datastore_memcache.py | 4 +- newrelic/hooks/datastore_motor.py | 6 +- newrelic/hooks/datastore_pyelasticsearch.py | 2 +- newrelic/hooks/datastore_pylibmc.py | 2 +- newrelic/hooks/datastore_pymemcache.py | 2 +- newrelic/hooks/datastore_pymongo.py | 6 +- newrelic/hooks/datastore_pysolr.py | 4 +- newrelic/hooks/datastore_redis.py | 8 +- newrelic/hooks/datastore_solrpy.py | 2 +- newrelic/hooks/external_botocore.py | 6 +- newrelic/hooks/external_facepy.py | 2 +- newrelic/hooks/external_feedparser.py | 2 +- newrelic/hooks/external_httplib.py | 2 +- newrelic/hooks/external_thrift.py | 4 +- newrelic/hooks/external_urllib3.py | 2 +- newrelic/hooks/external_xmlrpclib.py | 2 +- newrelic/hooks/framework_aiohttp.py | 2 +- newrelic/hooks/framework_django.py | 2 +- newrelic/hooks/framework_flask.py | 2 +- newrelic/hooks/framework_graphql.py | 2 +- newrelic/hooks/framework_grpc.py | 4 +- newrelic/hooks/framework_pylons.py | 2 +- newrelic/hooks/framework_sanic.py | 2 +- newrelic/hooks/framework_web2py.py | 19 +-- newrelic/hooks/logger_logging.py | 8 +- newrelic/hooks/logger_loguru.py | 8 +- newrelic/hooks/logger_structlog.py | 4 +- .../hooks/messagebroker_confluentkafka.py | 12 +- newrelic/hooks/messagebroker_kafkapython.py | 16 +- newrelic/hooks/messagebroker_pika.py | 2 +- newrelic/hooks/middleware_flask_compress.py | 2 +- newrelic/hooks/middleware_weberror.py | 2 +- newrelic/hooks/mlmodel_langchain.py | 19 +-- newrelic/hooks/mlmodel_openai.py | 4 +- newrelic/hooks/mlmodel_sklearn.py | 32 ++-- newrelic/network/addresses.py | 8 +- newrelic/samplers/data_sampler.py | 2 +- newrelic/samplers/gc_data.py | 16 +- newrelic/samplers/memory_usage.py | 4 +- tests/adapter_daphne/test_daphne.py | 8 +- tests/adapter_gevent/conftest.py | 2 +- .../test_aiohttp_app_factory.py | 4 +- tests/adapter_gunicorn/test_asgi_app.py | 4 +- tests/adapter_gunicorn/test_gaiohttp.py | 4 +- tests/adapter_hypercorn/test_hypercorn.py | 12 +- tests/adapter_uvicorn/test_uvicorn.py | 4 +- tests/adapter_waitress/conftest.py | 2 +- tests/adapter_waitress/test_wsgi.py | 8 +- tests/agent_features/test_asgi_browser.py | 4 +- .../test_asgi_distributed_tracing.py | 4 +- .../test_asgi_w3c_trace_context.py | 18 +- tests/agent_features/test_attribute.py | 14 +- .../test_attributes_in_action.py | 28 +-- tests/agent_features/test_browser.py | 4 +- tests/agent_features/test_cat.py | 2 +- .../agent_features/test_code_level_metrics.py | 8 +- .../test_coroutine_transaction.py | 2 +- tests/agent_features/test_custom_events.py | 12 +- .../test_distributed_tracing.py | 4 +- tests/agent_features/test_error_events.py | 2 +- .../test_error_group_callback.py | 5 +- .../agent_features/test_high_security_mode.py | 2 +- tests/agent_features/test_log_events.py | 14 +- tests/agent_features/test_logs_in_context.py | 2 +- tests/agent_features/test_notice_error.py | 2 +- tests/agent_features/test_serverless_mode.py | 2 +- tests/agent_features/test_span_events.py | 14 +- tests/agent_features/test_stack_trace.py | 2 +- ...n_event_data_and_some_browser_stuff_too.py | 2 +- .../agent_features/test_w3c_trace_context.py | 18 +- tests/agent_streaming/test_streaming_rpc.py | 10 +- tests/agent_unittests/test_agent_connect.py | 6 +- .../agent_unittests/test_check_environment.py | 2 +- tests/agent_unittests/test_environment.py | 2 +- .../agent_unittests/test_full_uri_payloads.py | 2 +- tests/agent_unittests/test_http_client.py | 18 +- .../test_region_aware_settings.py | 18 +- tests/agent_unittests/test_sampler_metrics.py | 44 ++--- .../application_celery/_target_application.py | 2 +- tests/application_gearman/test_gearman.py | 2 +- .../test_application.py | 26 +-- .../component_flask_rest/_test_application.py | 3 +- .../component_flask_rest/test_application.py | 2 +- tests/component_graphqlserver/test_graphql.py | 28 +-- tests/component_tastypie/test_application.py | 10 +- tests/cross_agent/test_cat_map.py | 2 +- tests/cross_agent/test_collector_hostname.py | 4 +- tests/cross_agent/test_distributed_tracing.py | 2 +- tests/cross_agent/test_lambda_event_source.py | 2 +- tests/cross_agent/test_w3c_trace_context.py | 4 +- tests/datastore_aiomcache/test_aiomcache.py | 4 +- tests/datastore_aioredis/conftest.py | 4 +- .../test_custom_conn_pool.py | 2 +- .../test_execute_command.py | 2 +- tests/datastore_aioredis/test_get_and_set.py | 2 +- tests/datastore_aioredis/test_multiple_dbs.py | 12 +- tests/datastore_aioredis/test_span_event.py | 2 +- .../test_uninstrumented_methods.py | 2 +- .../datastore_aredis/test_custom_conn_pool.py | 2 +- .../datastore_aredis/test_execute_command.py | 2 +- tests/datastore_aredis/test_get_and_set.py | 2 +- tests/datastore_aredis/test_multiple_dbs.py | 8 +- tests/datastore_aredis/test_span_event.py | 2 +- .../test_uninstrumented_methods.py | 2 +- tests/datastore_asyncpg/test_multiple_dbs.py | 4 +- tests/datastore_asyncpg/test_query.py | 40 ++--- tests/datastore_bmemcached/test_memcache.py | 6 +- tests/datastore_elasticsearch/conftest.py | 2 +- .../test_elasticsearch.py | 2 +- .../test_instrumented_methods.py | 4 +- tests/datastore_elasticsearch/test_mget.py | 6 +- .../test_multiple_dbs.py | 8 +- tests/datastore_firestore/conftest.py | 6 +- .../test_async_batching.py | 2 +- .../datastore_firestore/test_async_client.py | 2 +- .../test_async_collections.py | 10 +- .../test_async_documents.py | 2 +- tests/datastore_firestore/test_async_query.py | 20 +-- .../test_async_transaction.py | 14 +- tests/datastore_firestore/test_batching.py | 4 +- tests/datastore_firestore/test_client.py | 2 +- tests/datastore_firestore/test_collections.py | 10 +- tests/datastore_firestore/test_documents.py | 2 +- tests/datastore_firestore/test_query.py | 20 +-- tests/datastore_firestore/test_transaction.py | 14 +- tests/datastore_memcache/conftest.py | 2 +- tests/datastore_memcache/test_memcache.py | 6 +- tests/datastore_memcache/test_multiple_dbs.py | 14 +- tests/datastore_memcache/test_span_event.py | 6 +- tests/datastore_mysql/conftest.py | 2 +- tests/datastore_mysql/test_database.py | 84 +++++---- tests/datastore_postgresql/test_database.py | 34 ++-- tests/datastore_psycopg/test_connection.py | 39 +++-- tests/datastore_psycopg/test_cursor.py | 39 +++-- tests/datastore_psycopg/test_multiple_dbs.py | 8 +- tests/datastore_psycopg/test_obfuscation.py | 40 ++--- tests/datastore_psycopg/test_register.py | 10 +- tests/datastore_psycopg/test_rollback.py | 2 +- tests/datastore_psycopg/test_span_event.py | 2 +- tests/datastore_psycopg2/test_async.py | 20 +-- tests/datastore_psycopg2/test_cursor.py | 30 ++-- tests/datastore_psycopg2/test_multiple_dbs.py | 11 +- tests/datastore_psycopg2/test_obfuscation.py | 42 +++-- tests/datastore_psycopg2/test_register.py | 12 +- tests/datastore_psycopg2/test_rollback.py | 2 +- tests/datastore_psycopg2/test_span_event.py | 2 +- tests/datastore_psycopg2cffi/test_database.py | 48 +++--- tests/datastore_pylibmc/test_memcache.py | 6 +- tests/datastore_pymemcache/test_memcache.py | 4 +- tests/datastore_pymongo/test_pymongo.py | 160 +++++++++--------- tests/datastore_pymssql/test_database.py | 41 +++-- tests/datastore_pymysql/test_database.py | 65 ++++--- tests/datastore_pyodbc/test_pyodbc.py | 12 +- tests/datastore_pysolr/test_solr.py | 8 +- tests/datastore_redis/test_asyncio.py | 4 +- .../datastore_redis/test_custom_conn_pool.py | 2 +- tests/datastore_redis/test_execute_command.py | 2 +- tests/datastore_redis/test_generators.py | 2 +- tests/datastore_redis/test_get_and_set.py | 2 +- tests/datastore_redis/test_multiple_dbs.py | 4 +- tests/datastore_redis/test_span_event.py | 2 +- .../test_uninstrumented_methods.py | 2 +- ...est_uninstrumented_rediscluster_methods.py | 2 +- tests/datastore_solrpy/test_solr.py | 10 +- .../test_aiobotocore_dynamodb.py | 18 +- .../test_aiobotocore_s3.py | 24 +-- .../test_aiobotocore_sns.py | 4 +- .../test_aiobotocore_sqs.py | 16 +- .../_mock_bedrock_encoding_utils.py | 4 +- .../_mock_external_bedrock_server.py | 8 +- tests/external_botocore/conftest.py | 2 +- .../test_bedrock_chat_completion.py | 34 ++-- ...t_bedrock_chat_completion_via_langchain.py | 2 +- .../test_bedrock_embeddings.py | 16 +- tests/external_botocore/test_boto3_iam.py | 2 +- tests/external_botocore/test_boto3_s3.py | 30 ++-- tests/external_botocore/test_boto3_sns.py | 2 +- .../test_botocore_dynamodb.py | 18 +- tests/external_botocore/test_botocore_ec2.py | 2 +- tests/external_botocore/test_botocore_s3.py | 26 +-- tests/external_botocore/test_botocore_sqs.py | 14 +- tests/external_botocore/test_s3transfer.py | 22 +-- tests/external_feedparser/test_feedparser.py | 8 +- tests/external_http/test_http.py | 12 +- tests/external_httplib/test_httplib.py | 38 ++--- tests/external_httplib/test_urllib.py | 46 ++--- tests/external_httplib/test_urllib2.py | 32 ++-- tests/external_httplib2/test_httplib2.py | 16 +- tests/external_httpx/test_client.py | 42 ++--- tests/external_requests/test_requests.py | 24 +-- tests/external_requests/test_span_event.py | 6 +- tests/external_urllib3/test_urllib3.py | 46 ++--- .../framework_aiohttp/_target_application.py | 2 +- tests/framework_aiohttp/conftest.py | 6 +- tests/framework_aiohttp/test_client.py | 4 +- .../test_client_async_await.py | 4 +- tests/framework_aiohttp/test_client_cat.py | 18 +- tests/framework_aiohttp/test_middleware.py | 2 +- tests/framework_aiohttp/test_server.py | 12 +- tests/framework_aiohttp/test_server_cat.py | 4 +- tests/framework_bottle/test_application.py | 16 +- .../framework_django/test_asgi_application.py | 4 +- tests/framework_django/views.py | 3 +- tests/framework_falcon/test_application.py | 6 +- tests/framework_fastapi/test_application.py | 2 +- tests/framework_flask/_test_compress.py | 2 +- tests/framework_graphql/test_application.py | 76 ++++----- tests/framework_grpc/conftest.py | 2 +- .../sample_application/__init__.py | 24 +-- tests/framework_grpc/test_clients.py | 39 ++--- .../test_distributed_tracing.py | 9 +- tests/framework_grpc/test_server.py | 6 +- tests/framework_sanic/test_application.py | 16 +- .../framework_sanic/test_cross_application.py | 10 +- tests/framework_starlette/test_application.py | 16 +- tests/framework_starlette/test_bg_tasks.py | 30 ++-- .../framework_tornado/_target_application.py | 2 +- .../framework_tornado/test_custom_handler.py | 2 +- tests/framework_tornado/test_externals.py | 24 +-- tests/framework_tornado/test_inbound_cat.py | 6 +- tests/framework_tornado/test_server.py | 12 +- tests/logger_logging/test_local_decorating.py | 16 +- tests/logger_loguru/test_attributes.py | 2 +- tests/logger_loguru/test_local_decorating.py | 6 +- tests/logger_loguru/test_settings.py | 6 +- tests/logger_structlog/conftest.py | 2 +- .../logger_structlog/test_local_decorating.py | 6 +- .../messagebroker_confluentkafka/conftest.py | 6 +- .../test_consumer.py | 18 +- .../test_producer.py | 4 +- .../test_serialization.py | 12 +- tests/messagebroker_kafkapython/conftest.py | 6 +- .../test_consumer.py | 18 +- .../test_producer.py | 4 +- .../test_serialization.py | 4 +- tests/messagebroker_pika/conftest.py | 9 +- tests/messagebroker_pika/test_cat.py | 4 +- .../test_distributed_tracing.py | 6 +- .../test_pika_async_connection_consume.py | 28 +-- .../test_pika_blocking_connection_consume.py | 30 ++-- ...a_blocking_connection_consume_generator.py | 22 +-- .../test_pika_instance_info.py | 2 +- .../_mock_external_openai_server.py | 4 +- tests/mlmodel_langchain/conftest.py | 4 +- tests/mlmodel_langchain/test_agent.py | 4 +- tests/mlmodel_langchain/test_chain.py | 66 ++++---- tests/mlmodel_langchain/test_tool.py | 22 +-- tests/mlmodel_langchain/test_vectorstore.py | 22 ++- .../_mock_external_openai_server.py | 4 +- tests/mlmodel_openai/conftest.py | 6 +- tests/mlmodel_openai/test_chat_completion.py | 12 +- .../test_chat_completion_stream.py | 16 +- .../test_chat_completion_stream_v1.py | 28 +-- .../mlmodel_openai/test_chat_completion_v1.py | 16 +- tests/mlmodel_openai/test_embeddings.py | 12 +- tests/mlmodel_openai/test_embeddings_error.py | 24 +-- .../test_embeddings_error_v1.py | 40 ++--- tests/mlmodel_openai/test_embeddings_v1.py | 16 +- tests/mlmodel_sklearn/test_metric_scorers.py | 12 +- .../mlmodel_sklearn/test_prediction_stats.py | 4 +- tests/testing_support/db_settings.py | 6 +- tests/testing_support/external_fixtures.py | 4 +- tests/testing_support/fixtures.py | 18 +- .../mock_external_grpc_server.py | 4 +- tests/testing_support/sample_applications.py | 2 +- .../validators/validate_apdex_metrics.py | 2 +- .../validators/validate_application_errors.py | 12 +- .../validators/validate_custom_events.py | 16 +- ...date_custom_metrics_outside_transaction.py | 6 +- .../validators/validate_custom_parameters.py | 6 +- .../validate_datastore_trace_inputs.py | 16 +- .../validate_dimensional_metric_payload.py | 48 ++---- ...dimensional_metrics_outside_transaction.py | 8 +- ...or_event_attributes_outside_transaction.py | 5 +- .../validate_error_trace_attributes.py | 2 +- .../validators/validate_internal_metrics.py | 4 +- .../validators/validate_log_event_count.py | 2 +- ...ate_log_event_count_outside_transaction.py | 2 +- .../validators/validate_log_events.py | 14 +- ...validate_log_events_outside_transaction.py | 14 +- .../validators/validate_metric_payload.py | 2 +- .../validators/validate_ml_event_payload.py | 8 +- .../validators/validate_ml_events.py | 16 +- .../validate_non_transaction_error_event.py | 4 +- .../validators/validate_span_events.py | 10 +- .../validators/validate_synthetics_event.py | 8 +- .../validate_synthetics_transaction_trace.py | 12 +- ...lidate_time_metrics_outside_transaction.py | 6 +- .../validators/validate_transaction_errors.py | 12 +- .../validate_transaction_metrics.py | 20 +-- .../validate_transaction_slow_sql_count.py | 2 +- .../validators/validate_tt_collector_json.py | 2 +- .../validators/validate_tt_parameters.py | 10 +- 374 files changed, 1972 insertions(+), 2207 deletions(-) diff --git a/.github/workflows/get-envs.py b/.github/workflows/get-envs.py index e4bbc79ade..665387248b 100755 --- a/.github/workflows/get-envs.py +++ b/.github/workflows/get-envs.py @@ -25,11 +25,7 @@ def main(f): filtered_envs = environments[GROUP_NUMBER::TOTAL_GROUPS] joined_envs = ",".join(filtered_envs) - assert joined_envs, "No environments found.\nenvironments = %s\nGROUP_NUMBER = %d\nTOTAL_GROUPS = %d" % ( - str(environments), - GROUP_NUMBER + 1, - TOTAL_GROUPS, - ) + assert joined_envs, f"No environments found.\nenvironments = {str(environments)}\nGROUP_NUMBER = {GROUP_NUMBER + 1}\nTOTAL_GROUPS = {TOTAL_GROUPS}" print(joined_envs) diff --git a/newrelic/admin/__init__.py b/newrelic/admin/__init__.py index 4ad2e8cb21..e07241ba19 100644 --- a/newrelic/admin/__init__.py +++ b/newrelic/admin/__init__.py @@ -49,7 +49,7 @@ def usage(name): details = _commands[name] if details.deprecated: print("[WARNING] This command is deprecated and will be removed") - print("Usage: newrelic-admin %s %s" % (name, details.options)) + print(f"Usage: newrelic-admin {name} {details.options}") @command("help", "[command]", hidden=True) @@ -72,18 +72,18 @@ def help(args): name = args[0] if name not in _commands: - print("Unknown command '%s'." % name, end=" ") + print(f"Unknown command '{name}'.", end=" ") print("Type 'newrelic-admin help' for usage.") else: details = _commands[name] - print("Usage: newrelic-admin %s %s" % (name, details.options)) + print(f"Usage: newrelic-admin {name} {details.options}") if details.description: print() description = details.description if details.deprecated: - description = "[DEPRECATED] " + description + description = f"[DEPRECATED] {description}" print(description) @@ -115,7 +115,7 @@ def emit(self, record): def load_internal_plugins(): for name in _builtin_plugins: - module_name = "%s.%s" % (__name__, name) + module_name = f"{__name__}.{name}" __import__(module_name) @@ -149,7 +149,7 @@ def main(): callback = _commands[command] except Exception: - print("Unknown command '%s'." % command, end="") + print(f"Unknown command '{command}'.", end="") print("Type 'newrelic-admin help' for usage.") sys.exit(1) diff --git a/newrelic/admin/license_key.py b/newrelic/admin/license_key.py index ea1e65bb62..4effdbd820 100644 --- a/newrelic/admin/license_key.py +++ b/newrelic/admin/license_key.py @@ -56,4 +56,4 @@ def license_key(args): _settings = global_settings() - print("license_key = %r" % obfuscate_license_key(_settings.license_key)) + print(f"license_key = {obfuscate_license_key(_settings.license_key)!r}") diff --git a/newrelic/admin/local_config.py b/newrelic/admin/local_config.py index 9d1459c902..5d6d1daaab 100644 --- a/newrelic/admin/local_config.py +++ b/newrelic/admin/local_config.py @@ -52,4 +52,4 @@ def local_config(args): log_file=log_file, log_level=log_level) for key, value in sorted(global_settings()): - print('%s = %r' % (key, value)) + print(f'{key} = {value!r}') diff --git a/newrelic/admin/network_config.py b/newrelic/admin/network_config.py index 9d6c3b3f27..590e5a400b 100644 --- a/newrelic/admin/network_config.py +++ b/newrelic/admin/network_config.py @@ -53,10 +53,10 @@ def network_config(args): _settings = global_settings() - print('host = %r' % _settings.host) - print('port = %r' % _settings.port) - print('proxy_scheme = %r' % _settings.proxy_scheme) - print('proxy_host = %r' % _settings.proxy_host) - print('proxy_port = %r' % _settings.proxy_port) - print('proxy_user = %r' % _settings.proxy_user) - print('proxy_pass = %r' % _settings.proxy_pass) + print(f'host = {_settings.host!r}') + print(f'port = {_settings.port!r}') + print(f'proxy_scheme = {_settings.proxy_scheme!r}') + print(f'proxy_host = {_settings.proxy_host!r}') + print(f'proxy_port = {_settings.proxy_port!r}') + print(f'proxy_user = {_settings.proxy_user!r}') + print(f'proxy_pass = {_settings.proxy_pass!r}') diff --git a/newrelic/admin/record_deploy.py b/newrelic/admin/record_deploy.py index 8de478c72c..d08ef1dd71 100644 --- a/newrelic/admin/record_deploy.py +++ b/newrelic/admin/record_deploy.py @@ -76,12 +76,10 @@ def record_deploy( app_id = fetch_app_id(app_name, client, headers) if app_id is None: raise RuntimeError( - "The application named %r was not found in your account. Please " - "try running the newrelic-admin server-config command to force " - "the application to register with New Relic." % app_name + f"The application named {app_name!r} was not found in your account. Please try running the newrelic-admin server-config command to force the application to register with New Relic." ) - path = "/v2/applications/{}/deployments.json".format(app_id) + path = f"/v2/applications/{app_id}/deployments.json" if user is None: user = pwd.getpwuid(os.getuid()).pw_gecos @@ -105,12 +103,7 @@ def record_deploy( if status_code != 201: raise RuntimeError( - "An unexpected HTTP response of %r was received " - "for request made to https://%s:%d%s. The payload for the " - "request was %r. The response payload for the request was %r. " - "If this issue persists then please report this problem to New " - "Relic support for further investigation." - % (status_code, host, port, path, data, response) + f"An unexpected HTTP response of {status_code!r} was received for request made to https://{host}:{int(port)}{path}. The payload for the request was {data!r}. The response payload for the request was {response!r}. If this issue persists then please report this problem to New Relic support for further investigation." ) diff --git a/newrelic/admin/run_program.py b/newrelic/admin/run_program.py index cb99c87a79..747cf95d24 100644 --- a/newrelic/admin/run_program.py +++ b/newrelic/admin/run_program.py @@ -40,7 +40,7 @@ def log_message(text, *args): if startup_debug: text = text % args timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) - print('NEWRELIC: %s (%d) - %s' % (timestamp, os.getpid(), text)) + print(f'NEWRELIC: {timestamp} ({os.getpid()}) - {text}') log_message('New Relic Admin Script (%s)', __file__) @@ -78,8 +78,7 @@ def log_message(text, *args): if 'PYTHONPATH' in os.environ: path = os.environ['PYTHONPATH'].split(os.path.pathsep) if boot_directory not in path: - python_path = "%s%s%s" % (boot_directory, os.path.pathsep, - os.environ['PYTHONPATH']) + python_path = f"{boot_directory}{os.path.pathsep}{os.environ['PYTHONPATH']}" os.environ['PYTHONPATH'] = python_path diff --git a/newrelic/admin/run_python.py b/newrelic/admin/run_python.py index 5a4454fb7c..6aad03b5c5 100644 --- a/newrelic/admin/run_python.py +++ b/newrelic/admin/run_python.py @@ -35,7 +35,7 @@ def log_message(text, *args): if startup_debug: text = text % args timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) - print('NEWRELIC: %s (%d) - %s' % (timestamp, os.getpid(), text)) + print(f'NEWRELIC: {timestamp} ({os.getpid()}) - {text}') log_message('New Relic Admin Script (%s)', __file__) @@ -71,8 +71,7 @@ def log_message(text, *args): if 'PYTHONPATH' in os.environ: path = os.environ['PYTHONPATH'].split(os.path.pathsep) if not boot_directory in path: - python_path = "%s%s%s" % (boot_directory, os.path.pathsep, - os.environ['PYTHONPATH']) + python_path = f"{boot_directory}{os.path.pathsep}{os.environ['PYTHONPATH']}" os.environ['PYTHONPATH'] = python_path diff --git a/newrelic/admin/server_config.py b/newrelic/admin/server_config.py index 6141e1f4b2..b2a4b5074e 100644 --- a/newrelic/admin/server_config.py +++ b/newrelic/admin/server_config.py @@ -73,4 +73,4 @@ def server_config(args): _logger.debug('Registration took %s seconds.', _duration) for key, value in sorted(_application.settings): - print('%s = %r' % (key, value)) + print(f'{key} = {value!r}') diff --git a/newrelic/admin/validate_config.py b/newrelic/admin/validate_config.py index 86195470ec..c31bd1c5ef 100644 --- a/newrelic/admin/validate_config.py +++ b/newrelic/admin/validate_config.py @@ -223,7 +223,7 @@ def validate_config(args): url = parts[1].strip() print("Registration successful. Reporting to:") print() - print(" %s" % url) + print(f" {url}") print() break diff --git a/newrelic/api/database_trace.py b/newrelic/api/database_trace.py index 8990a1ef48..c09d1345bf 100644 --- a/newrelic/api/database_trace.py +++ b/newrelic/api/database_trace.py @@ -89,11 +89,7 @@ def __enter__(self): return result def __repr__(self): - return "<%s object at 0x%x %s>" % ( - self.__class__.__name__, - id(self), - dict(sql=self.sql, dbapi2_module=self.dbapi2_module), - ) + return f"<{self.__class__.__name__} object at 0x{id(self):x} {dict(sql=self.sql, dbapi2_module=self.dbapi2_module)}>" @property def is_async_mode(self): diff --git a/newrelic/api/datastore_trace.py b/newrelic/api/datastore_trace.py index 0401c79ea5..df198f094f 100644 --- a/newrelic/api/datastore_trace.py +++ b/newrelic/api/datastore_trace.py @@ -92,18 +92,7 @@ def __enter__(self): return result def __repr__(self): - return "<%s object at 0x%x %s>" % ( - self.__class__.__name__, - id(self), - dict( - product=self.product, - target=self.target, - operation=self.operation, - host=self.host, - port_path_or_id=self.port_path_or_id, - database_name=self.database_name, - ), - ) + return f"<{self.__class__.__name__} object at 0x{id(self):x} {dict(product=self.product, target=self.target, operation=self.operation, host=self.host, port_path_or_id=self.port_path_or_id, database_name=self.database_name)}>" def finalize_data(self, transaction, exc=None, value=None, tb=None): if not self.instance_reporting_enabled: diff --git a/newrelic/api/external_trace.py b/newrelic/api/external_trace.py index 2e147df450..26762e687c 100644 --- a/newrelic/api/external_trace.py +++ b/newrelic/api/external_trace.py @@ -36,11 +36,7 @@ def __init__(self, library, url, method=None, **kwargs): self.params = {} def __repr__(self): - return "<%s object at 0x%x %s>" % ( - self.__class__.__name__, - id(self), - dict(library=self.library, url=self.url, method=self.method), - ) + return f"<{self.__class__.__name__} object at 0x{id(self):x} {dict(library=self.library, url=self.url, method=self.method)}>" def process_response(self, status_code, headers): self._add_agent_attribute("http.statusCode", status_code) diff --git a/newrelic/api/function_trace.py b/newrelic/api/function_trace.py index 85d7617b68..56bcad8607 100644 --- a/newrelic/api/function_trace.py +++ b/newrelic/api/function_trace.py @@ -37,7 +37,7 @@ def __init__(self, name, group=None, label=None, params=None, terminal=False, ro group = group or "Function" if group.startswith("/"): - group = "Function" + group + group = f"Function{group}" self.name = name self.group = group @@ -55,18 +55,7 @@ def __enter__(self): return result def __repr__(self): - return "<%s object at 0x%x %s>" % ( - self.__class__.__name__, - id(self), - dict( - name=self.name, - group=self.group, - label=self.label, - params=self.params, - terminal=self.terminal, - rollup=self.rollup, - ), - ) + return f"<{self.__class__.__name__} object at 0x{id(self):x} {dict(name=self.name, group=self.group, label=self.label, params=self.params, terminal=self.terminal, rollup=self.rollup)}>" def terminal_node(self): return self.terminal diff --git a/newrelic/api/generator_trace.py b/newrelic/api/generator_trace.py index 4196597093..f6ac3f9b45 100644 --- a/newrelic/api/generator_trace.py +++ b/newrelic/api/generator_trace.py @@ -71,7 +71,7 @@ def wrapper(wrapped, instance, args, kwargs): _params = params def _generator(generator): - _gname = '%s (generator)' % _name + _gname = f'{_name} (generator)' try: value = None diff --git a/newrelic/api/graphql_trace.py b/newrelic/api/graphql_trace.py index e8803fa68a..3887744f48 100644 --- a/newrelic/api/graphql_trace.py +++ b/newrelic/api/graphql_trace.py @@ -39,16 +39,7 @@ def __init__(self, **kwargs): self.product = "GraphQL" def __repr__(self): - return "<%s object at 0x%x %s>" % ( - self.__class__.__name__, - id(self), - dict( - operation_name=self.operation_name, - operation_type=self.operation_type, - deepest_path=self.deepest_path, - graphql=self.graphql, - ), - ) + return f"<{self.__class__.__name__} object at 0x{id(self):x} {dict(operation_name=self.operation_name, operation_type=self.operation_type, deepest_path=self.deepest_path, graphql=self.graphql)}>" @property def formatted(self): @@ -102,9 +93,9 @@ def set_transaction_name(self, priority=None): transaction = current_transaction() if transaction: name = ( - "%s/%s/%s" % (self.operation_type, self.operation_name, self.deepest_path) + f"{self.operation_type}/{self.operation_name}/{self.deepest_path}" if self.deepest_path - else "%s/%s" % (self.operation_type, self.operation_name) + else f"{self.operation_type}/{self.operation_name}" ) transaction.set_transaction_name(name, "GraphQL", priority=priority) @@ -154,7 +145,7 @@ def __init__(self, field_name=None, field_parent_type=None, field_return_type=No self._product = None def __repr__(self): - return "<%s object at 0x%x %s>" % (self.__class__.__name__, id(self), dict(field_name=self.field_name)) + return f"<{self.__class__.__name__} object at 0x{id(self):x} {dict(field_name=self.field_name)}>" def __enter__(self): super(GraphQLResolverTrace, self).__enter__() diff --git a/newrelic/api/log.py b/newrelic/api/log.py index 1bff50865c..4cb1c84a37 100644 --- a/newrelic/api/log.py +++ b/newrelic/api/log.py @@ -47,7 +47,7 @@ def safe_json_encode(obj, ignore_string_types=False, **kwargs): return repr(obj) except Exception: # If repr fails then default to an unprinatable object name - return "" % type(obj).__name__ + return f"" class NewRelicContextFormatter(logging.Formatter): @@ -115,7 +115,7 @@ def log_record_to_dict(cls, record, stack_trace_limit=0): # add them to the output record. keys_to_add = set(record.__dict__.keys()) - DEFAULT_LOG_RECORD_KEYS for key in keys_to_add: - output["extra." + key] = getattr(record, key) + output[f"extra.{key}"] = getattr(record, key) if record.exc_info: output.update(cls.format_exc_info(record.exc_info, stack_trace_limit)) @@ -156,7 +156,7 @@ def emit(self, record): level_name = str(getattr(record, "levelname", "UNKNOWN")) if settings.application_logging.metrics.enabled: nr.record_custom_metric("Logging/lines", {"count": 1}) - nr.record_custom_metric("Logging/lines/%s" % level_name, {"count": 1}) + nr.record_custom_metric(f"Logging/lines/{level_name}", {"count": 1}) if settings.application_logging.forwarding.enabled: if self.formatter: @@ -251,16 +251,7 @@ def emit(self, record): status_code, response = self.client.send_request(path=self.PATH, headers=headers, payload=payload) if status_code < 200 or status_code >= 300: raise RuntimeError( - "An unexpected HTTP response of %r was received for request made to https://%s:%d%s." - "The response payload for the request was %r. If this issue persists then please " - "report this problem to New Relic support for further investigation." - % ( - status_code, - self.client._host, - self.client._port, - self.PATH, - truncate(response.decode("utf-8"), 1024), - ) + f"An unexpected HTTP response of {status_code!r} was received for request made to https://{self.client._host}:{int(self.client._port)}{self.PATH}.The response payload for the request was {truncate(response.decode('utf-8'), 1024)!r}. If this issue persists then please report this problem to New Relic support for further investigation." ) except Exception: @@ -275,5 +266,5 @@ def default_host(self, license_key): return "log-api.newrelic.com" region = region_aware_match.group(1) - host = "log-api." + region + ".newrelic.com" + host = f"log-api.{region}.newrelic.com" return host diff --git a/newrelic/api/memcache_trace.py b/newrelic/api/memcache_trace.py index 87f12f9fc7..1dd5886b09 100644 --- a/newrelic/api/memcache_trace.py +++ b/newrelic/api/memcache_trace.py @@ -32,7 +32,7 @@ def __init__(self, command, **kwargs): self.command = command def __repr__(self): - return "<%s object at 0x%x %s>" % (self.__class__.__name__, id(self), dict(command=self.command)) + return f"<{self.__class__.__name__} object at 0x{id(self):x} {dict(command=self.command)}>" def terminal_node(self): return True diff --git a/newrelic/api/message_trace.py b/newrelic/api/message_trace.py index bfa34b4009..7279b2a192 100644 --- a/newrelic/api/message_trace.py +++ b/newrelic/api/message_trace.py @@ -64,11 +64,7 @@ def __enter__(self): return result def __repr__(self): - return "<%s object at 0x%x %s>" % ( - self.__class__.__name__, - id(self), - dict(library=self.library, operation=self.operation), - ) + return f"<{self.__class__.__name__} object at 0x{id(self):x} {dict(library=self.library, operation=self.operation)}>" def terminal_node(self): return self.terminal diff --git a/newrelic/api/message_transaction.py b/newrelic/api/message_transaction.py index 54a71f6eff..c1e46bdd79 100644 --- a/newrelic/api/message_transaction.py +++ b/newrelic/api/message_transaction.py @@ -60,8 +60,8 @@ def __init__( @staticmethod def get_transaction_name(library, destination_type, destination_name): - group = "Message/%s/%s" % (library, destination_type) - name = "Named/%s" % destination_name + group = f"Message/{library}/{destination_type}" + name = f"Named/{destination_name}" return name, group def _update_agent_attributes(self): @@ -77,7 +77,7 @@ def _update_agent_attributes(self): ms_attrs["message.correlationId"] = self.correlation_id if self.headers: for k, v in self.headers.items(): - new_key = "message.headers.%s" % k + new_key = f"message.headers.{k}" new_val = str(v) ms_attrs[new_key] = new_val if self.routing_key is not None: diff --git a/newrelic/api/ml_model.py b/newrelic/api/ml_model.py index f9895ba7b6..c3ab678d3e 100644 --- a/newrelic/api/ml_model.py +++ b/newrelic/api/ml_model.py @@ -116,8 +116,7 @@ def _wrap_callback(model, content): if not isinstance(token_count_val, int) or token_count_val < 0: _logger.warning( - "llm_token_count_callback returned an invalid value of %s. This value must be a positive integer and will not be recorded for the token_count." - % token_count_val + f"llm_token_count_callback returned an invalid value of {token_count_val}. This value must be a positive integer and will not be recorded for the token_count." ) return None diff --git a/newrelic/api/profile_trace.py b/newrelic/api/profile_trace.py index 5714d7e822..fe65d02ac2 100644 --- a/newrelic/api/profile_trace.py +++ b/newrelic/api/profile_trace.py @@ -22,7 +22,7 @@ from newrelic.common.object_names import callable_name from newrelic.common.object_wrapper import FunctionWrapper, wrap_object -AGENT_PACKAGE_DIRECTORY = os.path.dirname(AGENT_PACKAGE_FILE) + "/" +AGENT_PACKAGE_DIRECTORY = f"{os.path.dirname(AGENT_PACKAGE_FILE)}/" class ProfileTrace(): @@ -99,12 +99,12 @@ def _callable(): if func: name = callable_name(func) else: - name = "%s:%s#%s" % (func_filename, func_name, func_line_no) + name = f"{func_filename}:{func_name}#{func_line_no}" else: func = arg name = callable_name(arg) if not name: - name = "%s:@%s#%s" % (func_filename, func_name, func_line_no) + name = f"{func_filename}:@{func_name}#{func_line_no}" function_trace = FunctionTrace(name=name, parent=parent) function_trace.__enter__() diff --git a/newrelic/api/solr_trace.py b/newrelic/api/solr_trace.py index c249b5a0d0..5b03c33289 100644 --- a/newrelic/api/solr_trace.py +++ b/newrelic/api/solr_trace.py @@ -31,11 +31,7 @@ def __init__(self, library, command, **kwargs): self.command = command def __repr__(self): - return "<%s object at 0x%x %s>" % ( - self.__class__.__name__, - id(self), - dict(library=self.library, command=self.command), - ) + return f"<{self.__class__.__name__} object at 0x{id(self):x} {dict(library=self.library, command=self.command)}>" def terminal_node(self): return True diff --git a/newrelic/api/supportability.py b/newrelic/api/supportability.py index 1fd727d466..0451ba5e6d 100644 --- a/newrelic/api/supportability.py +++ b/newrelic/api/supportability.py @@ -18,7 +18,7 @@ def wrap_api_call(method, method_name): - metric_name = 'Supportability/api/%s' % method_name + metric_name = f'Supportability/api/{method_name}' @function_wrapper def _nr_wrap_api_call_(wrapped, instance, args, kwargs): diff --git a/newrelic/api/time_trace.py b/newrelic/api/time_trace.py index 4531907c08..38546ea778 100644 --- a/newrelic/api/time_trace.py +++ b/newrelic/api/time_trace.py @@ -52,7 +52,7 @@ def __init__(self, parent=None, source=None): self.exc_data = (None, None, None) self.should_record_segment_params = False # 16-digit random hex. Padded with zeros in the front. - self.guid = "%016x" % random.getrandbits(64) + self.guid = f"{random.getrandbits(64):016x}" self.agent_attributes = {} self.user_attributes = {} @@ -71,7 +71,7 @@ def _is_leaf(self): return self.child_count == len(self.children) def __repr__(self): - return "<%s object at 0x%x %s>" % (self.__class__.__name__, id(self), dict(name=getattr(self, "name", None))) + return f"<{self.__class__.__name__} object at 0x{id(self):x} {dict(name=getattr(self, 'name', None))}>" def __enter__(self): self.parent = parent = self.parent or current_trace() @@ -214,8 +214,7 @@ def add_code_level_metrics(self, source): node.add_attrs(self._add_agent_attribute) except Exception as exc: _logger.debug( - "Failed to extract source code context from callable %s. Report this issue to newrelic support. Exception: %s" - % (source, exc) + f"Failed to extract source code context from callable {source}. Report this issue to newrelic support. Exception: {exc}" ) def _observe_exception(self, exc_info=None, ignore=None, expected=None, status_code=None): @@ -443,8 +442,7 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, _, error_group_name = process_user_attribute("error.group.name", error_group_name_raw) if error_group_name is None or not isinstance(error_group_name, str): raise ValueError( - "Invalid attribute value for error.group.name. Expected string, got: %s" - % repr(error_group_name_raw) + f"Invalid attribute value for error.group.name. Expected string, got: {repr(error_group_name_raw)}" ) except Exception: _logger.error( diff --git a/newrelic/api/transaction.py b/newrelic/api/transaction.py index a80812b25b..5c990926d3 100644 --- a/newrelic/api/transaction.py +++ b/newrelic/api/transaction.py @@ -269,7 +269,7 @@ def __init__(self, application, enabled=None, source=None): self.rum_token = None - trace_id = "%032x" % random.getrandbits(128) + trace_id = f"{random.getrandbits(128):032x}" # 16-digit random hex. Padded with zeros in the front. # This is the official transactionId in the UI. @@ -547,7 +547,7 @@ def __exit__(self, exc, value, tb): self.total_time += exclusive if self.client_cross_process_id is not None: - metric_name = "ClientApplication/%s/all" % (self.client_cross_process_id) + metric_name = f"ClientApplication/{self.client_cross_process_id}/all" self.record_custom_metric(metric_name, duration) # Record supportability metrics for api calls @@ -557,19 +557,19 @@ def __exit__(self, exc, value, tb): if self._frameworks: for framework, version in self._frameworks: - self.record_custom_metric("Python/Framework/%s/%s" % (framework, version), 1) + self.record_custom_metric(f"Python/Framework/{framework}/{version}", 1) if self._message_brokers: for message_broker, version in self._message_brokers: - self.record_custom_metric("Python/MessageBroker/%s/%s" % (message_broker, version), 1) + self.record_custom_metric(f"Python/MessageBroker/{message_broker}/{version}", 1) if self._dispatchers: for dispatcher, version in self._dispatchers: - self.record_custom_metric("Python/Dispatcher/%s/%s" % (dispatcher, version), 1) + self.record_custom_metric(f"Python/Dispatcher/{dispatcher}/{version}", 1) if self._ml_models: for ml_model, version in self._ml_models: - self.record_custom_metric("Supportability/Python/ML/%s/%s" % (ml_model, version), 1) + self.record_custom_metric(f"Supportability/Python/ML/{ml_model}/{version}", 1) if self._settings.distributed_tracing.enabled: # Sampled and priority need to be computed at the end of the @@ -716,9 +716,9 @@ def name_for_metric(self): # leading slash may be significant in that situation. if group in ("Uri", "NormalizedUri") and transaction_name.startswith("/"): - name = "%s%s" % (group, transaction_name) + name = f"{group}{transaction_name}" else: - name = "%s/%s" % (group, transaction_name) + name = f"{group}/{transaction_name}" return name @@ -739,7 +739,7 @@ def path(self): if self._frozen_path: return self._frozen_path - return "%s/%s" % (self.type, self.name_for_metric) + return f"{self.type}/{self.name_for_metric}" @property def trip_id(self): @@ -771,7 +771,7 @@ def path_hash(self): if not self.is_part_of_cat: return None - identifier = "%s;%s" % (self.application.name, self.path) + identifier = f"{self.application.name};{self.path}" # Check if identifier is already part of the _alternate_path_hashes and # return the value if available. @@ -864,7 +864,7 @@ def trace_intrinsics(self): # Add all synthetics attributes for k, v in self.synthetics_attributes.items(): if k: - i_attrs["synthetics_%s" % snake_case(k)] = v + i_attrs[f"synthetics_{snake_case(k)}"] = v if self.total_time: i_attrs["totalTime"] = self.total_time @@ -976,7 +976,7 @@ def request_parameters(self): r_attrs = {} for k, v in self._request_params.items(): - new_key = "request.parameters.%s" % k + new_key = f"request.parameters.{k}" new_val = ",".join(v) final_key, final_val = process_user_attribute(new_key, new_val) @@ -1012,7 +1012,7 @@ def user_attributes(self): def _compute_sampled_and_priority(self): if self._priority is None: # truncate priority field to 6 digits past the decimal - self._priority = float("%.6f" % random.random()) # nosec + self._priority = float(f"{random.random():.6f}") # nosec if self._sampled is None: self._sampled = self._application.compute_sampled() @@ -1143,7 +1143,7 @@ def _generate_distributed_trace_headers(self, data=None): tracestate = NrTraceState(data).text() if self.tracestate: - tracestate += "," + self.tracestate + tracestate += f",{self.tracestate}" yield ("tracestate", tracestate) self._record_supportability("Supportability/TraceContext/Create/Success") @@ -1338,7 +1338,7 @@ def accept_distributed_trace_headers(self, headers, transport_type="HTTP"): trusted_account_key = self._settings.trusted_account_key or ( self._settings.serverless_mode.enabled and self._settings.account_id ) - payload = vendors.pop(trusted_account_key + "@nr", "") + payload = vendors.pop(f"{trusted_account_key}@nr", "") self.tracing_vendors = ",".join(vendors.keys()) self.tracestate = vendors.text(limit=31) except: @@ -1536,7 +1536,7 @@ def set_transaction_name(self, name, group=None, priority=None): group = group or "Function" if group.startswith("/"): - group = "Function" + group + group = f"Function{group}" self._group = group self._name = name @@ -1845,22 +1845,22 @@ def add_ml_model_info(self, name, version=None): def dump(self, file): """Dumps details about the transaction to the file object.""" - print("Application: %s" % (self.application.name), file=file) - print("Time Started: %s" % (time.asctime(time.localtime(self.start_time))), file=file) - print("Thread Id: %r" % (self.thread_id), file=file) - print("Current Status: %d" % (self._state), file=file) - print("Recording Enabled: %s" % (self.enabled), file=file) - print("Ignore Transaction: %s" % (self.ignore_transaction), file=file) - print("Transaction Dead: %s" % (self._dead), file=file) - print("Transaction Stopped: %s" % (self.stopped), file=file) - print("Background Task: %s" % (self.background_task), file=file) - print("Request URI: %s" % (self._request_uri), file=file) - print("Transaction Group: %s" % (self._group), file=file) - print("Transaction Name: %s" % (self._name), file=file) - print("Name Priority: %r" % (self._name_priority), file=file) - print("Frozen Path: %s" % (self._frozen_path), file=file) - print("AutoRUM Disabled: %s" % (self.autorum_disabled), file=file) - print("Supress Apdex: %s" % (self.suppress_apdex), file=file) + print(f"Application: {self.application.name}", file=file) + print(f"Time Started: {time.asctime(time.localtime(self.start_time))}", file=file) + print(f"Thread Id: {self.thread_id!r}", file=file) + print(f"Current Status: {self._state}", file=file) + print(f"Recording Enabled: {self.enabled}", file=file) + print(f"Ignore Transaction: {self.ignore_transaction}", file=file) + print(f"Transaction Dead: {self._dead}", file=file) + print(f"Transaction Stopped: {self.stopped}", file=file) + print(f"Background Task: {self.background_task}", file=file) + print(f"Request URI: {self._request_uri}", file=file) + print(f"Transaction Group: {self._group}", file=file) + print(f"Transaction Name: {self._name}", file=file) + print(f"Name Priority: {self._name_priority!r}", file=file) + print(f"Frozen Path: {self._frozen_path}", file=file) + print(f"AutoRUM Disabled: {self.autorum_disabled}", file=file) + print(f"Supress Apdex: {self.suppress_apdex}", file=file) def current_transaction(active_only=True): diff --git a/newrelic/api/web_transaction.py b/newrelic/api/web_transaction.py index 60e16d0897..b2dd27cece 100644 --- a/newrelic/api/web_transaction.py +++ b/newrelic/api/web_transaction.py @@ -153,7 +153,7 @@ def _encode_nonce(nonce): if not nonce: return "" else: - return ' nonce="%s"' % ensure_str(nonce) # Extra space intentional + return f' nonce="{ensure_str(nonce)}"' # Extra space intentional class WebTransaction(Transaction): @@ -538,7 +538,7 @@ def _to_wsgi(key): return "CONTENT_LENGTH" elif key == "CONTENT-TYPE": return "CONTENT_TYPE" - return "HTTP_" + key.replace("-", "_") + return f"HTTP_{key.replace('-', '_')}" @staticmethod def _from_wsgi(key): diff --git a/newrelic/bootstrap/sitecustomize.py b/newrelic/bootstrap/sitecustomize.py index b479295aa6..2fed5fdc2b 100644 --- a/newrelic/bootstrap/sitecustomize.py +++ b/newrelic/bootstrap/sitecustomize.py @@ -29,7 +29,7 @@ def log_message(text, *args, **kwargs): if startup_debug or critical: text = text % args timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - sys.stdout.write("NEWRELIC: %s (%d) - %s\n" % (timestamp, os.getpid(), text)) + sys.stdout.write(f"NEWRELIC: {timestamp} ({os.getpid()}) - {text}\n") sys.stdout.flush() @@ -170,8 +170,8 @@ def del_sys_path_entry(path): # and the sys.path entry is removed afterwards to reduce chance that will # cause any issues. - log_message("new_relic_path = %r" % new_relic_path) - log_message("do_insert_path = %r" % do_insert_path) + log_message(f"new_relic_path = {new_relic_path!r}") + log_message(f"do_insert_path = {do_insert_path!r}") try: if do_insert_path: diff --git a/newrelic/common/agent_http.py b/newrelic/common/agent_http.py index 8a71daa81a..eb5b210d33 100644 --- a/newrelic/common/agent_http.py +++ b/newrelic/common/agent_http.py @@ -50,11 +50,7 @@ def get_default_verify_paths(): # does not rely on this, but is used to target specific agents if there # is a problem with data collector handling requests. -USER_AGENT = "NewRelic-PythonAgent/%s (Python %s %s)" % ( - version, - sys.version.split()[0], - sys.platform, -) +USER_AGENT = f"NewRelic-PythonAgent/{version} (Python {sys.version.split()[0]} {sys.platform})" # This is a monkey patch for urllib3 + python3.6 + gevent/eventlet. @@ -138,19 +134,19 @@ def log_request(cls, fp, method, url, params, payload, headers, body=None, compr cls.AUDIT_LOG_ID += 1 print( - "TIME: %r" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), + f"TIME: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())!r}", file=fp, ) print(file=fp) - print("ID: %r" % cls.AUDIT_LOG_ID, file=fp) + print(f"ID: {cls.AUDIT_LOG_ID!r}", file=fp) print(file=fp) - print("PID: %r" % os.getpid(), file=fp) + print(f"PID: {os.getpid()!r}", file=fp) print(file=fp) - print("URL: %r" % url, file=fp) + print(f"URL: {url!r}", file=fp) print(file=fp) - print("PARAMS: %r" % params, file=fp) + print(f"PARAMS: {params!r}", file=fp) print(file=fp) - print("HEADERS: %r" % headers, file=fp) + print(f"HEADERS: {headers!r}", file=fp) print(file=fp) print("DATA:", end=" ", file=fp) @@ -191,18 +187,18 @@ def log_response(cls, fp, log_id, status, headers, data, connection="direct"): except Exception: result = data - print("TIME: %r" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), file=fp) + print(f"TIME: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())!r}", file=fp) print(file=fp) - print("ID: %r" % log_id, file=fp) + print(f"ID: {log_id!r}", file=fp) print(file=fp) - print("PID: %r" % os.getpid(), file=fp) + print(f"PID: {os.getpid()!r}", file=fp) print(file=fp) if exc_info: - print("Exception: %r" % exc_info[1], file=fp) + print(f"Exception: {exc_info[1]!r}", file=fp) print(file=fp) else: - print("STATUS: %r" % status, file=fp) + print(f"STATUS: {status!r}", file=fp) print(file=fp) print("HEADERS:", end=" ", file=fp) pprint(dict(headers), stream=fp) @@ -303,7 +299,7 @@ def __init__( else: self._host = proxy.host self._port = proxy.port or 443 - self._prefix = self.PREFIX_SCHEME + host + ":" + str(port) + self._prefix = f"{self.PREFIX_SCHEME + host}:{str(port)}" urlopen_kwargs["assert_same_host"] = False if proxy_headers: self._headers.update(proxy_headers) @@ -328,7 +324,7 @@ def _parse_proxy(scheme, host, port, username, password): else: auth = username if auth and password is not None: - auth = auth + ":" + password + auth = f"{auth}:{password}" # Host must be defined if not host: @@ -377,7 +373,7 @@ def log_request( compression_time=None, ): if not self._prefix: - url = self.CONNECTION_CLS.scheme + "://" + self._host + url + url = f"{self.CONNECTION_CLS.scheme}://{self._host}{url}" return super(HttpClient, self).log_request(fp, method, url, params, payload, headers, body, compression_time) @@ -405,7 +401,7 @@ def send_request( ): if self._proxy: proxy_scheme = self._proxy.scheme or "http" - connection = proxy_scheme + "-proxy" + connection = f"{proxy_scheme}-proxy" else: connection = "direct" @@ -536,16 +532,16 @@ def _supportability_request(params, payload, body, compression_time): # Compression was applied if compression_time is not None: internal_metric( - "Supportability/Python/Collector/%s/ZLIB/Bytes" % agent_method, + f"Supportability/Python/Collector/{agent_method}/ZLIB/Bytes", len(body), ) internal_metric("Supportability/Python/Collector/ZLIB/Bytes", len(body)) internal_metric( - "Supportability/Python/Collector/%s/ZLIB/Compress" % agent_method, + f"Supportability/Python/Collector/{agent_method}/ZLIB/Compress", compression_time, ) internal_metric( - "Supportability/Python/Collector/%s/Output/Bytes" % agent_method, + f"Supportability/Python/Collector/{agent_method}/Output/Bytes", len(payload), ) # Top level metric to aggregate overall bytes being sent @@ -555,15 +551,15 @@ def _supportability_request(params, payload, body, compression_time): def _supportability_response(status, exc, connection="direct"): if exc or not 200 <= status < 300: internal_count_metric("Supportability/Python/Collector/Failures", 1) - internal_count_metric("Supportability/Python/Collector/Failures/%s" % connection, 1) + internal_count_metric(f"Supportability/Python/Collector/Failures/{connection}", 1) if exc: internal_count_metric( - "Supportability/Python/Collector/Exception/" "%s" % callable_name(exc), + f"Supportability/Python/Collector/Exception/{callable_name(exc)}", 1, ) else: - internal_count_metric("Supportability/Python/Collector/HTTPError/%d" % status, 1) + internal_count_metric(f"Supportability/Python/Collector/HTTPError/{status}", 1) class ApplicationModeClient(SupportabilityMixin, HttpClient): @@ -624,7 +620,7 @@ def send_request( request_id = self.log_request( self._audit_log_fp, "POST", - "https://fake-collector.newrelic.com" + path, + f"https://fake-collector.newrelic.com{path}", params, payload, headers, diff --git a/newrelic/common/encoding_utils.py b/newrelic/common/encoding_utils.py index a7ae1ad614..59dabb346c 100644 --- a/newrelic/common/encoding_utils.py +++ b/newrelic/common/encoding_utils.py @@ -247,7 +247,7 @@ def generate_path_hash(name, seed): name = name.encode("UTF-8") path_hash = rotated ^ int(hashlib.md5(name).hexdigest()[-8:], base=16) # nosec - return "%08x" % path_hash + return f"{path_hash:08x}" def base64_encode(text): @@ -417,13 +417,9 @@ def text(self): if "id" in self: guid = self["id"] else: - guid = "{:016x}".format(random.getrandbits(64)) + guid = f"{random.getrandbits(64):016x}" - return "00-{}-{}-{:02x}".format( - self["tr"].lower().zfill(32), - guid, - int(self.get("sa", 0)), - ) + return f"00-{self['tr'].lower().zfill(32)}-{guid}-{int(self.get('sa', 0)):02x}" @classmethod def decode(cls, payload): @@ -466,7 +462,7 @@ def decode(cls, payload): class W3CTraceState(OrderedDict): def text(self, limit=32): - return ",".join("{}={}".format(k, v) for k, v in itertools.islice(self.items(), limit)) + return ",".join(f"{k}={v}" for k, v in itertools.islice(self.items(), limit)) @classmethod def decode(cls, tracestate): @@ -490,24 +486,10 @@ class NrTraceState(dict): def text(self): pr = self.get("pr", "") if pr: - pr = ("%.6f" % pr).rstrip("0").rstrip(".") - - payload = "-".join( - ( - "0-0", - self["ac"], - self["ap"], - self.get("id", ""), - self.get("tx", ""), - "1" if self.get("sa") else "0", - pr, - str(self["ti"]), - ) - ) - return "{}@nr={}".format( - self.get("tk", self["ac"]), - payload, - ) + pr = f"{pr:.6f}".rstrip("0").rstrip(".") + + payload = f"0-0-{self['ac']}-{self['ap']}-{self.get('id', '')}-{self.get('tx', '')}-{'1' if self.get('sa') else '0'}-{pr}-{str(self['ti'])}" + return f"{self.get('tk', self['ac'])}@nr={payload}" @classmethod def decode(cls, payload, tk): @@ -553,7 +535,7 @@ def capitalize(string): elif len(string) == 1: return string.capitalize() else: - return "".join((string[0].upper(), string[1:])) + return f"{string[0].upper()}{string[1:]}" def camel_case(string, upper=False): diff --git a/newrelic/common/log_file.py b/newrelic/common/log_file.py index 583e24e5ce..91ffd8ef0a 100644 --- a/newrelic/common/log_file.py +++ b/newrelic/common/log_file.py @@ -76,7 +76,7 @@ def _initialize_file_logging(log_file, log_level): _agent_logger.setLevel(log_level) _agent_logger.debug("Initializing Python agent logging.") - _agent_logger.debug('Log file "%s".' % log_file) + _agent_logger.debug(f'Log file "{log_file}".') def initialize_logging(log_file, log_level): @@ -101,7 +101,7 @@ def initialize_logging(log_file, log_level): except Exception: _initialize_stderr_logging(log_level) - _agent_logger.exception("Falling back to stderr logging as unable to create log file %r." % log_file) + _agent_logger.exception(f"Falling back to stderr logging as unable to create log file {log_file!r}.") _initialized = True diff --git a/newrelic/common/object_names.py b/newrelic/common/object_names.py index d9852082a3..d62371acc5 100644 --- a/newrelic/common/object_names.py +++ b/newrelic/common/object_names.py @@ -103,7 +103,7 @@ def _module_name(object): # happens for example with namedtuple classes in Python 3. if mname and mname not in sys.modules: - mname = '<%s>' % mname + mname = f'<{mname}>' # If unable to derive the module name, fallback to unknown. @@ -128,7 +128,7 @@ def _object_context(object): if cname is None: cname = getattr(object.__self__.__class__, '__qualname__') - path = '%s.%s' % (cname, object.__name__) + path = f'{cname}.{object.__name__}' else: # For functions, the __qualname__ attribute gives us the name. @@ -291,7 +291,7 @@ def parse_exc_info(exc_info): name = value.__class__.__name__ if module: - fullnames = ("%s:%s" % (module, name), "%s.%s" % (module, name)) + fullnames = (f"{module}:{name}", f"{module}.{name}") else: fullnames = (name,) @@ -299,6 +299,6 @@ def parse_exc_info(exc_info): # Ensure exception messages are strings message = str(value) except Exception: - message = "" % type(value).__name__ + message = f"" return (module, name, fullnames, message) diff --git a/newrelic/common/system_info.py b/newrelic/common/system_info.py index 30f6b2a4e6..381b94a5e5 100644 --- a/newrelic/common/system_info.py +++ b/newrelic/common/system_info.py @@ -273,7 +273,7 @@ def _linux_physical_memory_used(filename=None): # data data + stack # dt dirty pages (unused in Linux 2.6) - filename = filename or "/proc/%d/statm" % os.getpid() + filename = filename or f"/proc/{os.getpid()}/statm" try: with open(filename, "r") as fp: @@ -329,7 +329,7 @@ def _resolve_hostname(use_dyno_names, dyno_shorten_prefixes): for prefix in dyno_shorten_prefixes: if prefix and dyno_name.startswith(prefix): - return "%s.*" % prefix + return f"{prefix}.*" return dyno_name diff --git a/newrelic/common/utilization.py b/newrelic/common/utilization.py index 826ad778ba..ea3fe7980c 100644 --- a/newrelic/common/utilization.py +++ b/newrelic/common/utilization.py @@ -58,7 +58,7 @@ class CommonUtilization(): @classmethod def record_error(cls, resource, data): # As per spec - internal_count_metric("Supportability/utilization/%s/error" % cls.VENDOR_NAME, 1) + internal_count_metric(f"Supportability/utilization/{cls.VENDOR_NAME}/error", 1) _logger.warning("Invalid %r data (%r): %r", cls.VENDOR_NAME, resource, data) @classmethod diff --git a/newrelic/config.py b/newrelic/config.py index df45894d9a..5b657bbb9a 100644 --- a/newrelic/config.py +++ b/newrelic/config.py @@ -236,9 +236,7 @@ def _raise_configuration_error(section, option=None): if not _ignore_errors: if section: raise newrelic.api.exceptions.ConfigurationError( - 'Invalid configuration for section "%s". ' - "Check New Relic agent log file for further " - "details." % section + f'Invalid configuration for section "{section}". Check New Relic agent log file for further details.' ) raise newrelic.api.exceptions.ConfigurationError( "Invalid configuration. Check New Relic agent log file for further details." @@ -251,14 +249,10 @@ def _raise_configuration_error(section, option=None): if not _ignore_errors: if section: raise newrelic.api.exceptions.ConfigurationError( - 'Invalid configuration for option "%s" in ' - 'section "%s". Check New Relic agent log ' - "file for further details." % (option, section) + f'Invalid configuration for option "{option}" in section "{section}". Check New Relic agent log file for further details.' ) raise newrelic.api.exceptions.ConfigurationError( - 'Invalid configuration for option "%s". ' - "Check New Relic agent log file for further " - "details." % option + f'Invalid configuration for option "{option}". Check New Relic agent log file for further details.' ) @@ -800,7 +794,7 @@ def translate_deprecated_settings(settings, cached_settings): ignored_params = fetch_config_setting(settings, "ignored_params") for p in ignored_params: - attr_value = "request.parameters." + p + attr_value = f"request.parameters.{p}" excluded_attrs = fetch_config_setting(settings, "attributes.exclude") if attr_value not in excluded_attrs: @@ -940,10 +934,7 @@ def _load_configuration( if _configuration_done: if _config_file != config_file or _environment != environment: raise newrelic.api.exceptions.ConfigurationError( - "Configuration has already been done against " - "differing configuration file or environment. " - 'Prior configuration file used was "%s" and ' - 'environment "%s".' % (_config_file, _environment) + f'Configuration has already been done against differing configuration file or environment. Prior configuration file used was "{_config_file}" and environment "{_environment}".' ) return @@ -1004,7 +995,7 @@ def _load_configuration( # name in internal settings object as indication of succeeding. if not _config_object.read([config_file]): - raise newrelic.api.exceptions.ConfigurationError("Unable to open configuration file %s." % config_file) + raise newrelic.api.exceptions.ConfigurationError(f"Unable to open configuration file {config_file}.") _settings.config_file = config_file @@ -1014,7 +1005,7 @@ def _load_configuration( _process_setting("newrelic", "log_file", "get", None) if environment: - _process_setting("newrelic:%s" % environment, "log_file", "get", None) + _process_setting(f"newrelic:{environment}", "log_file", "get", None) if log_file is None: log_file = _settings.log_file @@ -1022,7 +1013,7 @@ def _load_configuration( _process_setting("newrelic", "log_level", "get", _map_log_level) if environment: - _process_setting("newrelic:%s" % environment, "log_level", "get", _map_log_level) + _process_setting(f"newrelic:{environment}", "log_level", "get", _map_log_level) if log_level is None: log_level = _settings.log_level @@ -1042,7 +1033,7 @@ def _load_configuration( if environment: _settings.environment = environment - _process_configuration("newrelic:%s" % environment) + _process_configuration(f"newrelic:{environment}") # Log details of the configuration options which were # read and the values they have as would be applied @@ -1239,7 +1230,7 @@ def _module_function_glob(module, object_path): # Skip adding individual class's methods on failure available_functions.update( { - "%s.%s" % (cls, k): v + f"{cls}.{k}": v for k, v in available_classes.get(cls).__dict__.items() if callable(v) and not isinstance(v, type) } @@ -1995,7 +1986,7 @@ def _process_module_definition(target, module, function="instrument"): return try: - section = "import-hook:%s" % target + section = f"import-hook:{target}" if _config_object.has_section(section): enabled = _config_object.getboolean(section, "enabled") except configparser.NoOptionError: diff --git a/newrelic/console.py b/newrelic/console.py index c813a3073e..2e527dd9e4 100644 --- a/newrelic/console.py +++ b/newrelic/console.py @@ -53,7 +53,7 @@ def shell_command(wrapped): parser = optparse.OptionParser() for name in args[1:]: - parser.add_option("--%s" % name, dest=name) + parser.add_option(f"--{name}", dest=name) @functools.wraps(wrapped) def wrapper(self, line): @@ -69,10 +69,10 @@ def wrapper(self, line): return wrapped(self, *args, **kwargs) if wrapper.__name__.startswith("do_"): - prototype = wrapper.__name__[3:] + " " + doc_signature(wrapped) + prototype = f"{wrapper.__name__[3:]} {doc_signature(wrapped)}" if hasattr(wrapper, "__doc__") and wrapper.__doc__ is not None: - wrapper.__doc__ = "\n".join((prototype, wrapper.__doc__.lstrip("\n"))) + wrapper.__doc__ = "\n".join((prototype, wrapper.__doc__.lstrip('\n'))) # noqa: flynt return wrapper @@ -105,7 +105,7 @@ def __init__(self, name): self.name = name def __repr__(self): - return "Use %s() or %s to exit" % (self.name, eof) + return f"Use {self.name}() or {eof} to exit" def __call__(self, code=None): # If executed with our interactive console, only raise the @@ -195,7 +195,7 @@ def do_prompt(self, flag=None): Enable or disable the console prompt.""" if flag == "on": - self.prompt = "(newrelic:%d) " % os.getpid() + self.prompt = f"(newrelic:{os.getpid()}) " elif flag == "off": self.prompt = "" @@ -235,7 +235,7 @@ def do_sys_modules(self): for name, module in sorted(sys.modules.items()): if module is not None: file = getattr(module, "__file__", None) - print("%s - %s" % (name, file), file=self.stdout) + print(f"{name} - {file}", file=self.stdout) @shell_command def do_sys_meta_path(self): @@ -250,7 +250,7 @@ def do_os_environ(self): Displays the set of user environment variables.""" for key, name in os.environ.items(): - print("%s = %r" % (key, name), file=self.stdout) + print(f"{key} = {name!r}", file=self.stdout) @shell_command def do_current_time(self): @@ -294,7 +294,7 @@ def do_dump_config(self, name=None): config = flatten_settings(config) keys = sorted(config.keys()) for key in keys: - print("%s = %r" % (key, config[key]), file=self.stdout) + print(f"{key} = {config[key]!r}", file=self.stdout) @shell_command def do_agent_status(self): @@ -344,13 +344,13 @@ def do_import_hooks(self): result = results[key] if result is None: if key[0] not in sys.modules: - print("%s: PENDING" % (key,), file=self.stdout) + print(f"{key}: PENDING", file=self.stdout) else: - print("%s: IMPORTED" % (key,), file=self.stdout) + print(f"{key}: IMPORTED", file=self.stdout) elif not result: - print("%s: INSTRUMENTED" % (key,), file=self.stdout) + print(f"{key}: INSTRUMENTED", file=self.stdout) else: - print("%s: FAILED" % (key,), file=self.stdout) + print(f"{key}: FAILED", file=self.stdout) for line in result: print(line, end="", file=self.stdout) @@ -412,15 +412,15 @@ def do_threads(self): all = [] for threadId, stack in sys._current_frames().items(): block = [] - block.append("# ThreadID: %s" % threadId) + block.append(f"# ThreadID: {threadId}") thr = threading._active.get(threadId) if thr: - block.append("# Type: %s" % type(thr).__name__) - block.append("# Name: %s" % thr.name) + block.append(f"# Type: {type(thr).__name__}") + block.append(f"# Name: {thr.name}") for filename, lineno, name, line in traceback.extract_stack(stack): - block.append("File: '%s', line %d, in %s" % (filename, lineno, name)) + block.append(f"File: '{filename}', line {int(lineno)}, in {name}") if line: - block.append(" %s" % (line.strip())) + block.append(f" {line.strip()}") all.append("\n".join(block)) print("\n\n".join(all), file=self.stdout) @@ -512,7 +512,7 @@ def __init__(self, config_file, stdin=None, stdout=None, log=None): self.__log_object = log if not self.__config_object.read([config_file]): - raise RuntimeError("Unable to open configuration file %s." % config_file) + raise RuntimeError(f"Unable to open configuration file {config_file}.") listener_socket = self.__config_object.get("newrelic", "console.listener_socket") % {"pid": "*"} @@ -545,7 +545,7 @@ def do_servers(self, line): Display a list of the servers which can be connected to.""" for i in range(len(self.__servers)): - print("%s: %s" % (i + 1, self.__servers[i]), file=self.stdout) + print(f"{i + 1}: {self.__servers[i]}", file=self.stdout) def do_connect(self, line): """connect [index] diff --git a/newrelic/core/agent.py b/newrelic/core/agent.py index fde6178e1b..1b81bb9b4b 100644 --- a/newrelic/core/agent.py +++ b/newrelic/core/agent.py @@ -152,7 +152,7 @@ def agent_singleton(): initialize_logging(settings.log_file, settings.log_level) - _logger.info("New Relic Python Agent (%s)" % newrelic.version) + _logger.info(f"New Relic Python Agent ({newrelic.version})") check_environment() @@ -250,16 +250,16 @@ def uwsgi_atexit_callback(): def dump(self, file): """Dumps details about the agent to the file object.""" - print("Time Created: %s" % (time.asctime(time.localtime(self._creation_time))), file=file) - print("Initialization PID: %s" % (self._process_id), file=file) - print("Default Harvest Count: %d" % (self._default_harvest_count), file=file) - print("Flexible Harvest Count: %d" % (self._flexible_harvest_count), file=file) - print("Last Default Harvest: %s" % (time.asctime(time.localtime(self._last_default_harvest))), file=file) - print("Last Flexible Harvest: %s" % (time.asctime(time.localtime(self._last_flexible_harvest))), file=file) - print("Default Harvest Duration: %.2f" % (self._default_harvest_duration), file=file) - print("Flexible Harvest Duration: %.2f" % (self._flexible_harvest_duration), file=file) - print("Agent Shutdown: %s" % (self._harvest_shutdown.isSet()), file=file) - print("Applications: %r" % (sorted(self._applications.keys())), file=file) + print(f"Time Created: {time.asctime(time.localtime(self._creation_time))}", file=file) + print(f"Initialization PID: {self._process_id}", file=file) + print(f"Default Harvest Count: {self._default_harvest_count}", file=file) + print(f"Flexible Harvest Count: {self._flexible_harvest_count}", file=file) + print(f"Last Default Harvest: {time.asctime(time.localtime(self._last_default_harvest))}", file=file) + print(f"Last Flexible Harvest: {time.asctime(time.localtime(self._last_flexible_harvest))}", file=file) + print(f"Default Harvest Duration: {self._default_harvest_duration:.2f}", file=file) + print(f"Flexible Harvest Duration: {self._flexible_harvest_duration:.2f}", file=file) + print(f"Agent Shutdown: {self._harvest_shutdown.isSet()}", file=file) + print(f"Applications: {sorted(self._applications.keys())!r}", file=file) def global_settings(self): """Returns the global default settings object. If access is @@ -620,7 +620,7 @@ def _harvest_flexible(self, shutdown=False): try: application.harvest(shutdown=False, flexible=True) except Exception: - _logger.exception("Failed to harvest data for %s." % application.name) + _logger.exception(f"Failed to harvest data for {application.name}.") self._flexible_harvest_duration = time.time() - self._last_flexible_harvest @@ -644,7 +644,7 @@ def _harvest_default(self, shutdown=False): try: application.harvest(shutdown, flexible=False) except Exception: - _logger.exception("Failed to harvest data for %s." % application.name) + _logger.exception(f"Failed to harvest data for {application.name}.") self._default_harvest_duration = time.time() - self._last_default_harvest diff --git a/newrelic/core/agent_protocol.py b/newrelic/core/agent_protocol.py index 5270982dab..8d1db09fea 100644 --- a/newrelic/core/agent_protocol.py +++ b/newrelic/core/agent_protocol.py @@ -238,7 +238,7 @@ def send( if not 200 <= status < 300: if status == 413: internal_count_metric( - "Supportability/Python/Collector/MaxPayloadSizeLimit/%s" % method, + f"Supportability/Python/Collector/MaxPayloadSizeLimit/{method}", 1, ) level, message = self.LOG_MESSAGES.get(status, self.LOG_MESSAGES["default"]) diff --git a/newrelic/core/agent_streaming.py b/newrelic/core/agent_streaming.py index 213e07a5d7..dea53f3644 100644 --- a/newrelic/core/agent_streaming.py +++ b/newrelic/core/agent_streaming.py @@ -128,7 +128,7 @@ def process_responses(self): details = response_iterator.details() self.record_metric( - "Supportability/InfiniteTracing/Span/gRPC/%s" % code.name, + f"Supportability/InfiniteTracing/Span/gRPC/{code.name}", {"count": 1}, ) diff --git a/newrelic/core/application.py b/newrelic/core/application.py index 0c3c690da2..4a5632f807 100644 --- a/newrelic/core/application.py +++ b/newrelic/core/application.py @@ -156,31 +156,31 @@ def compute_sampled(self): def dump(self, file): """Dumps details about the application to the file object.""" - print("Time Created: %s" % (time.asctime(time.localtime(self._creation_time))), file=file) - print("Linked Applications: %r" % (self._linked_applications), file=file) - print("Registration PID: %s" % (self._process_id), file=file) - print("Harvest Count: %d" % (self._harvest_count), file=file) - print("Agent Restart: %d" % (self._agent_restart), file=file) - print("Forced Shutdown: %s" % (self._agent_shutdown), file=file) + print(f"Time Created: {time.asctime(time.localtime(self._creation_time))}", file=file) + print(f"Linked Applications: {self._linked_applications!r}", file=file) + print(f"Registration PID: {self._process_id}", file=file) + print(f"Harvest Count: {self._harvest_count}", file=file) + print(f"Agent Restart: {self._agent_restart}", file=file) + print(f"Forced Shutdown: {self._agent_shutdown}", file=file) active_session = self._active_session if active_session: try: - print("Collector URL: %s" % (active_session._protocol.client._host), file=file) + print(f"Collector URL: {active_session._protocol.client._host}", file=file) except AttributeError: pass - print("Agent Run ID: %s" % (active_session.agent_run_id), file=file) - print("URL Normalization Rules: %r" % (self._rules_engine["url"].rules), file=file) - print("Metric Normalization Rules: %r" % (self._rules_engine["metric"].rules), file=file) - print("Transaction Normalization Rules: %r" % (self._rules_engine["transaction"].rules), file=file) - print("Transaction Segment Allowlist Rules: %r" % (self._rules_engine["segment"].rules), file=file) - print("Harvest Period Start: %s" % (time.asctime(time.localtime(self._period_start))), file=file) - print("Transaction Count: %d" % (self._transaction_count), file=file) - print("Last Transaction: %s" % (time.asctime(time.localtime(self._last_transaction))), file=file) - print("Global Events Count: %d" % (self._global_events_account), file=file) - print("Harvest Metrics Count: %d" % (self._stats_engine.metrics_count()), file=file) - print("Harvest Discard Count: %d" % (self._discard_count), file=file) + print(f"Agent Run ID: {active_session.agent_run_id}", file=file) + print(f"URL Normalization Rules: {self._rules_engine['url'].rules!r}", file=file) + print(f"Metric Normalization Rules: {self._rules_engine['metric'].rules!r}", file=file) + print(f"Transaction Normalization Rules: {self._rules_engine['transaction'].rules!r}", file=file) + print(f"Transaction Segment Allowlist Rules: {self._rules_engine['segment'].rules!r}", file=file) + print(f"Harvest Period Start: {time.asctime(time.localtime(self._period_start))}", file=file) + print(f"Transaction Count: {self._transaction_count}", file=file) + print(f"Last Transaction: {time.asctime(time.localtime(self._last_transaction))}", file=file) + print(f"Global Events Count: {self._global_events_account}", file=file) + print(f"Harvest Metrics Count: {self._stats_engine.metrics_count()}", file=file) + print(f"Harvest Discard Count: {self._discard_count}", file=file) def activate_session(self, activate_agent=None, timeout=0.0): """Creates a background thread to initiate registration of the @@ -224,7 +224,7 @@ def activate_session(self, activate_agent=None, timeout=0.0): self._detect_deadlock = True thread = threading.Thread( - target=self.connect_to_data_collector, name="NR-Activate-Session/%s" % self.name, args=(activate_agent,) + target=self.connect_to_data_collector, name=f"NR-Activate-Session/{self.name}", args=(activate_agent,) ) thread.daemon = True thread.start() @@ -544,17 +544,15 @@ def connect_to_data_collector(self, activate_agent): ) ai_monitoring_streaming = configuration.ai_monitoring.streaming.enabled internal_metric( - "Supportability/Logging/Forwarding/Python/%s" - % ("enabled" if application_logging_forwarding else "disabled"), + f"Supportability/Logging/Forwarding/Python/{'enabled' if application_logging_forwarding else 'disabled'}", 1, ) internal_metric( - "Supportability/Logging/LocalDecorating/Python/%s" - % ("enabled" if application_logging_local_decorating else "disabled"), + f"Supportability/Logging/LocalDecorating/Python/{'enabled' if application_logging_local_decorating else 'disabled'}", 1, ) internal_metric( - "Supportability/Logging/Metrics/Python/%s" % ("enabled" if application_logging_metrics else "disabled"), + f"Supportability/Logging/Metrics/Python/{'enabled' if application_logging_metrics else 'disabled'}", 1, ) if not ai_monitoring_streaming: @@ -569,13 +567,11 @@ def connect_to_data_collector(self, activate_agent): infinite_tracing_batching = configuration.infinite_tracing.batching infinite_tracing_compression = configuration.infinite_tracing.compression internal_metric( - "Supportability/InfiniteTracing/gRPC/Batching/%s" - % ("enabled" if infinite_tracing_batching else "disabled"), + f"Supportability/InfiniteTracing/gRPC/Batching/{'enabled' if infinite_tracing_batching else 'disabled'}", 1, ) internal_metric( - "Supportability/InfiniteTracing/gRPC/Compression/%s" - % ("enabled" if infinite_tracing_compression else "disabled"), + f"Supportability/InfiniteTracing/gRPC/Compression/{'enabled' if infinite_tracing_compression else 'disabled'}", 1, ) @@ -1157,7 +1153,7 @@ def harvest(self, shutdown=False, flexible=False): call_metric = "flexible" if flexible else "default" with InternalTraceContext(internal_metrics): - with InternalTrace("Supportability/Python/Harvest/Calls/" + call_metric): + with InternalTrace(f"Supportability/Python/Harvest/Calls/{call_metric}"): self._harvest_count += 1 start = time.time() @@ -1253,7 +1249,7 @@ def harvest(self, shutdown=False, flexible=False): if self._uninstrumented: for uninstrumented in self._uninstrumented: internal_count_metric("Supportability/Python/Uninstrumented", 1) - internal_count_metric("Supportability/Uninstrumented/%s" % uninstrumented, 1) + internal_count_metric(f"Supportability/Uninstrumented/{uninstrumented}", 1) # Create our time stamp as to when this reporting period # ends and start reporting the data. @@ -1591,7 +1587,7 @@ def harvest(self, shutdown=False, flexible=False): exc_type = sys.exc_info()[0] - internal_metric("Supportability/Python/Harvest/Exception/%s" % callable_name(exc_type), 1) + internal_metric(f"Supportability/Python/Harvest/Exception/{callable_name(exc_type)}", 1) if self._period_start != period_end: self._stats_engine.rollback(stats) @@ -1604,7 +1600,7 @@ def harvest(self, shutdown=False, flexible=False): exc_type = sys.exc_info()[0] - internal_metric("Supportability/Python/Harvest/Exception/%s" % callable_name(exc_type), 1) + internal_metric(f"Supportability/Python/Harvest/Exception/{callable_name(exc_type)}", 1) self._discard_count += 1 @@ -1614,7 +1610,7 @@ def harvest(self, shutdown=False, flexible=False): exc_type = sys.exc_info()[0] - internal_metric("Supportability/Python/Harvest/Exception/%s" % callable_name(exc_type), 1) + internal_metric(f"Supportability/Python/Harvest/Exception/{callable_name(exc_type)}", 1) _logger.exception( "Unexpected exception when attempting " @@ -1734,7 +1730,7 @@ def process_agent_commands(self): # we don't know about a specific agent command we just # ignore it. - func_name = "cmd_%s" % cmd_name + func_name = f"cmd_{cmd_name}" cmd_handler = getattr(self, func_name, None) diff --git a/newrelic/core/attribute.py b/newrelic/core/attribute.py index fd37993f30..16dacb18a0 100644 --- a/newrelic/core/attribute.py +++ b/newrelic/core/attribute.py @@ -123,7 +123,7 @@ class NullValueException(ValueError): class Attribute(_Attribute): def __repr__(self): - return "Attribute(name=%r, value=%r, destinations=%r)" % (self.name, self.value, bin(self.destinations)) + return f"Attribute(name={self.name!r}, value={self.value!r}, destinations={bin(self.destinations)!r})" def create_attributes(attr_dict, destinations, attribute_filter): diff --git a/newrelic/core/attribute_filter.py b/newrelic/core/attribute_filter.py index 162046ca3a..a07d016416 100644 --- a/newrelic/core/attribute_filter.py +++ b/newrelic/core/attribute_filter.py @@ -65,7 +65,7 @@ def __init__(self, flattened_settings): self.cache = {} def __repr__(self): - return "" % (bin(self.enabled_destinations), self.rules) + return f"" def _set_enabled_destinations(self, settings): # Determines and returns bitfield representing attribute destinations enabled. @@ -207,7 +207,7 @@ def __ge__(self, other): return self._as_sortable() >= other._as_sortable() def __repr__(self): - return "(%s, %s, %s, %s)" % (self.name, bin(self.destinations), self.is_wildcard, self.is_include) + return f"({self.name}, {bin(self.destinations)}, {self.is_wildcard}, {self.is_include})" def name_match(self, name): if self.is_wildcard: diff --git a/newrelic/core/code_level_metrics.py b/newrelic/core/code_level_metrics.py index ba00d93af7..846a95b4fd 100644 --- a/newrelic/core/code_level_metrics.py +++ b/newrelic/core/code_level_metrics.py @@ -34,7 +34,7 @@ def add_attrs(self, add_attr_function): # Add attributes for k, v in self._asdict().items(): if v is not None: - add_attr_function("code.%s" % k, v) + add_attr_function(f"code.{k}", v) def extract_code_from_callable(func): @@ -97,7 +97,7 @@ def extract_code_from_callable(func): func_name = func_path[-1] # function name is last in path if len(func_path) > 1: class_name = ".".join((func_path[:-1])) - namespace = ".".join((module_name, class_name)) + namespace = f"{module_name}.{class_name}" else: namespace = module_name diff --git a/newrelic/core/config.py b/newrelic/core/config.py index 0820965151..26dbbf0cbd 100644 --- a/newrelic/core/config.py +++ b/newrelic/core/config.py @@ -602,7 +602,7 @@ def default_host(license_key): return "collector.newrelic.com" region = region_aware_match.group(1) - host = "collector." + region + ".nr-data.net" + host = f"collector.{region}.nr-data.net" return host @@ -619,7 +619,7 @@ def default_otlp_host(host): otlp_host = HOST_MAP.get(host, None) if not otlp_host: default = HOST_MAP["collector.newrelic.com"] - _logger.warn("Unable to find corresponding OTLP host using default %s" % default) + _logger.warn(f"Unable to find corresponding OTLP host using default {default}") otlp_host = default return otlp_host @@ -994,7 +994,7 @@ def _flatten(settings, o, name=None): key = key[1:] if name: - key = "%s.%s" % (name, key) + key = f"{name}.{key}" if isinstance(value, Settings): if value.nested: @@ -1024,9 +1024,9 @@ def create_obfuscated_netloc(username, password, hostname, mask): password = mask if username and password: - netloc = "%s:%s@%s" % (username, password, hostname) + netloc = f"{username}:{password}@{hostname}" elif username: - netloc = "%s@%s" % (username, hostname) + netloc = f"{username}@{hostname}" else: netloc = hostname @@ -1080,9 +1080,9 @@ def global_settings_dump(settings_object=None, serializable=False): netloc = create_obfuscated_netloc(components.username, components.password, components.hostname, obfuscated) if components.port: - uri = "%s://%s:%s%s" % (components.scheme, netloc, components.port, components.path) + uri = f"{components.scheme}://{netloc}:{components.port}{components.path}" else: - uri = "%s://%s%s" % (components.scheme, netloc, components.path) + uri = f"{components.scheme}://{netloc}{components.path}" settings["proxy_host"] = uri @@ -1131,7 +1131,7 @@ def apply_config_setting(settings_object, name, value, nested=False): default_value = getattr(target, fields[0], None) if isinstance(value, dict) and value and not isinstance(default_value, dict): for k, v in value.items(): - k_name = "{}.{}".format(fields[0], k) + k_name = f"{fields[0]}.{k}" apply_config_setting(target, k_name, v, nested=True) else: setattr(target, fields[0], value) @@ -1367,8 +1367,8 @@ def error_matches_rules( return None # Retrieve settings based on prefix - classes_rules = getattr(settings.error_collector, "%s_classes" % rules_prefix, set()) - status_codes_rules = getattr(settings.error_collector, "%s_status_codes" % rules_prefix, set()) + classes_rules = getattr(settings.error_collector, f"{rules_prefix}_classes", set()) + status_codes_rules = getattr(settings.error_collector, f"{rules_prefix}_status_codes", set()) _, _, fullnames, _ = parse_exc_info(exc_info) fullname = fullnames[0] @@ -1390,7 +1390,7 @@ def error_matches_rules( # Coerce into integer status_code = int(status_code) except: - _logger.error("Failed to coerce status code into integer. status_code: %s" % str(status_code)) + _logger.error(f"Failed to coerce status code into integer. status_code: {str(status_code)}") else: if status_code in status_codes_rules: return True diff --git a/newrelic/core/context.py b/newrelic/core/context.py index d13d7ed080..c782350ff6 100644 --- a/newrelic/core/context.py +++ b/newrelic/core/context.py @@ -48,7 +48,7 @@ def log_propagation_failure(s): elif trace_cache_id is not None: self.trace = self.trace_cache.get(trace_cache_id, None) if self.trace is None: - log_propagation_failure("No trace with id %d." % trace_cache_id) + log_propagation_failure(f"No trace with id {trace_cache_id}.") elif hasattr(request, "_nr_trace") and request._nr_trace is not None: # Unpack traces from objects patched with them self.trace = request._nr_trace diff --git a/newrelic/core/data_collector.py b/newrelic/core/data_collector.py index d9ab80065b..2d312bc0d5 100644 --- a/newrelic/core/data_collector.py +++ b/newrelic/core/data_collector.py @@ -69,7 +69,7 @@ def connect_span_stream(self, span_iterator, record_metric): port = self.configuration.infinite_tracing.trace_observer_port ssl = self.configuration.infinite_tracing.ssl compression_setting = self.configuration.infinite_tracing.compression - endpoint = "{}:{}".format(host, port) + endpoint = f"{host}:{port}" if ( self.configuration.distributed_tracing.enabled diff --git a/newrelic/core/database_node.py b/newrelic/core/database_node.py index 7294410034..7eb18c6b96 100644 --- a/newrelic/core/database_node.py +++ b/newrelic/core/database_node.py @@ -138,9 +138,9 @@ def time_metrics(self, stats, root, parent): # Determine the scoped metric - statement_metric_name = "Datastore/statement/%s/%s/%s" % (product, target, operation) + statement_metric_name = f"Datastore/statement/{product}/{target}/{operation}" - operation_metric_name = "Datastore/operation/%s/%s" % (product, operation) + operation_metric_name = f"Datastore/operation/{product}/{operation}" if target: scoped_metric_name = statement_metric_name @@ -153,19 +153,19 @@ def time_metrics(self, stats, root, parent): yield TimeMetric(name="Datastore/all", scope="", duration=self.duration, exclusive=self.exclusive) - yield TimeMetric(name="Datastore/%s/all" % product, scope="", duration=self.duration, exclusive=self.exclusive) + yield TimeMetric(name=f"Datastore/{product}/all", scope="", duration=self.duration, exclusive=self.exclusive) if root.type == "WebTransaction": yield TimeMetric(name="Datastore/allWeb", scope="", duration=self.duration, exclusive=self.exclusive) yield TimeMetric( - name="Datastore/%s/allWeb" % product, scope="", duration=self.duration, exclusive=self.exclusive + name=f"Datastore/{product}/allWeb", scope="", duration=self.duration, exclusive=self.exclusive ) else: yield TimeMetric(name="Datastore/allOther", scope="", duration=self.duration, exclusive=self.exclusive) yield TimeMetric( - name="Datastore/%s/allOther" % product, scope="", duration=self.duration, exclusive=self.exclusive + name=f"Datastore/{product}/allOther", scope="", duration=self.duration, exclusive=self.exclusive ) # Unscoped operation metric @@ -181,11 +181,7 @@ def time_metrics(self, stats, root, parent): if self.instance_hostname and self.port_path_or_id: - instance_metric_name = "Datastore/instance/%s/%s/%s" % ( - product, - self.instance_hostname, - self.port_path_or_id, - ) + instance_metric_name = f"Datastore/instance/{product}/{self.instance_hostname}/{self.port_path_or_id}" yield TimeMetric(name=instance_metric_name, scope="", duration=self.duration, exclusive=self.exclusive) @@ -195,9 +191,9 @@ def slow_sql_node(self, stats, root): target = self.target if target: - name = "Datastore/statement/%s/%s/%s" % (product, target, operation) + name = f"Datastore/statement/{product}/{target}/{operation}" else: - name = "Datastore/operation/%s/%s" % (product, operation) + name = f"Datastore/operation/{product}/{operation}" request_uri = "" if root.type == "WebTransaction": diff --git a/newrelic/core/database_utils.py b/newrelic/core/database_utils.py index 41b87cd533..db734833b2 100644 --- a/newrelic/core/database_utils.py +++ b/newrelic/core/database_utils.py @@ -44,9 +44,9 @@ _dollar_quotes_p = r'(\$(?!\d)[^$]*?\$).*?(?:\1|$)' _oracle_quotes_p = (r"q'\[.*?(?:\]'|$)|q'\{.*?(?:\}'|$)|" r"q'\<.*?(?:\>'|$)|q'\(.*?(?:\)'|$)") -_any_quotes_p = _single_quotes_p + '|' + _double_quotes_p -_single_dollar_p = _single_quotes_p + '|' + _dollar_quotes_p -_single_oracle_p = _single_quotes_p + '|' + _oracle_quotes_p +_any_quotes_p = f"{_single_quotes_p}|{_double_quotes_p}" +_single_dollar_p = f"{_single_quotes_p}|{_dollar_quotes_p}" +_single_oracle_p = f"{_single_quotes_p}|{_oracle_quotes_p}" _single_quotes_re = re.compile(_single_quotes_p) _any_quotes_re = re.compile(_any_quotes_p) @@ -85,7 +85,7 @@ # first to avoid the situation of partial matches on shorter expressions. UUIDs # might be an example. -_all_literals_p = '(' + ')|('.join([_uuid_p, _hex_p, _int_p, _bool_p]) + ')' +_all_literals_p = f"({_uuid_p})|({_hex_p})|({_int_p})|({_bool_p})" _all_literals_re = re.compile(_all_literals_p, re.IGNORECASE) _quotes_table = { @@ -278,10 +278,7 @@ def _parse_default(sql, regex): _parse_identifier_6_p = r'\{\s*(\S+)\s*\}' _parse_identifier_7_p = r'([^\s\(\)\[\],]+)' -_parse_identifier_p = ''.join(('(', _parse_identifier_1_p, '|', - _parse_identifier_2_p, '|', _parse_identifier_3_p, '|', - _parse_identifier_4_p, '|', _parse_identifier_5_p, '|', - _parse_identifier_6_p, '|', _parse_identifier_7_p, ')')) +_parse_identifier_p = f"({_parse_identifier_1_p}|{_parse_identifier_2_p}|{_parse_identifier_3_p}|{_parse_identifier_4_p}|{_parse_identifier_5_p}|{_parse_identifier_6_p}|{_parse_identifier_7_p})" _parse_from_p = r'\s+FROM\s+' + _parse_identifier_p _parse_from_re = re.compile(_parse_from_p, re.IGNORECASE) @@ -670,7 +667,7 @@ def _explain_plan(connections, sql, database, connect_params, cursor_params, 'semicolons in the query string.', database.client) return None - query = '%s %s' % (database.explain_query, sql) + query = f'{database.explain_query} {sql}' if settings.debug.log_explain_plan_queries: _logger.debug('Executing explain plan for %r on %r.', query, @@ -827,8 +824,7 @@ def __init__(self, sql, database=None): except UnicodeError as e: settings = global_settings() if settings.debug.log_explain_plan_queries: - _logger.debug('An error occurred while decoding sql ' - 'statement: %s' % e.reason) + _logger.debug(f'An error occurred while decoding sql statement: {e.reason}') self._operation = '' self._target = '' diff --git a/newrelic/core/datastore_node.py b/newrelic/core/datastore_node.py index 290ec08464..9300b5d94f 100644 --- a/newrelic/core/datastore_node.py +++ b/newrelic/core/datastore_node.py @@ -48,11 +48,9 @@ def time_metrics(self, stats, root, parent): # Determine the scoped metric - statement_metric_name = 'Datastore/statement/%s/%s/%s' % (product, - target, operation) + statement_metric_name = f'Datastore/statement/{product}/{target}/{operation}' - operation_metric_name = 'Datastore/operation/%s/%s' % (product, - operation) + operation_metric_name = f'Datastore/operation/{product}/{operation}' if target: scoped_metric_name = statement_metric_name @@ -67,20 +65,20 @@ def time_metrics(self, stats, root, parent): yield TimeMetric(name='Datastore/all', scope='', duration=self.duration, exclusive=self.exclusive) - yield TimeMetric(name='Datastore/%s/all' % product, scope='', + yield TimeMetric(name=f'Datastore/{product}/all', scope='', duration=self.duration, exclusive=self.exclusive) if root.type == 'WebTransaction': yield TimeMetric(name='Datastore/allWeb', scope='', duration=self.duration, exclusive=self.exclusive) - yield TimeMetric(name='Datastore/%s/allWeb' % product, scope='', + yield TimeMetric(name=f'Datastore/{product}/allWeb', scope='', duration=self.duration, exclusive=self.exclusive) else: yield TimeMetric(name='Datastore/allOther', scope='', duration=self.duration, exclusive=self.exclusive) - yield TimeMetric(name='Datastore/%s/allOther' % product, scope='', + yield TimeMetric(name=f'Datastore/{product}/allOther', scope='', duration=self.duration, exclusive=self.exclusive) # Unscoped operation metric @@ -102,8 +100,7 @@ def time_metrics(self, stats, root, parent): self.port_path_or_id and ds_tracer_settings.instance_reporting.enabled): - instance_metric_name = 'Datastore/instance/%s/%s/%s' % (product, - self.instance_hostname, self.port_path_or_id) + instance_metric_name = f'Datastore/instance/{product}/{self.instance_hostname}/{self.port_path_or_id}' yield TimeMetric(name=instance_metric_name, scope='', duration=self.duration, exclusive=self.exclusive) diff --git a/newrelic/core/environment.py b/newrelic/core/environment.py index e1838dfba7..db59e51b50 100644 --- a/newrelic/core/environment.py +++ b/newrelic/core/environment.py @@ -245,7 +245,7 @@ def environment_settings(): # If it has no version it's likely not a real package so don't report it unless # it's a new relic hook. if nr_hook or version: - plugins.append("%s (%s)" % (name, version)) + plugins.append(f"{name} ({version})") env.append(("Plugin List", plugins)) diff --git a/newrelic/core/external_node.py b/newrelic/core/external_node.py index 7554fbb3b9..ddda33976a 100644 --- a/newrelic/core/external_node.py +++ b/newrelic/core/external_node.py @@ -46,8 +46,7 @@ def details(self): @property def name(self): - return 'External/%s/%s/%s' % ( - self.netloc, self.library, self.method or '') + return f"External/{self.netloc}/{self.library}/{self.method or ''}" @property def url_with_path(self): @@ -81,7 +80,7 @@ def netloc(self): if (scheme, port) in (('http', 80), ('https', 443)): port = None - netloc = port and ('%s:%s' % (hostname, port)) or hostname + netloc = port and (f'{hostname}:{port}') or hostname return netloc def time_metrics(self, stats, root, parent): @@ -113,7 +112,7 @@ def time_metrics(self, stats, root, parent): self.cross_process_id = None self.external_txn_name = None - name = 'External/%s/all' % netloc + name = f'External/{netloc}/all' yield TimeMetric(name=name, scope='', duration=self.duration, exclusive=self.exclusive) @@ -121,7 +120,7 @@ def time_metrics(self, stats, root, parent): if self.cross_process_id is None: method = self.method or '' - name = 'External/%s/%s/%s' % (netloc, self.library, method) + name = f'External/{netloc}/{self.library}/{method}' yield TimeMetric(name=name, scope='', duration=self.duration, exclusive=self.exclusive) @@ -130,8 +129,7 @@ def time_metrics(self, stats, root, parent): duration=self.duration, exclusive=self.exclusive) else: - name = 'ExternalTransaction/%s/%s/%s' % (netloc, - self.cross_process_id, self.external_txn_name) + name = f'ExternalTransaction/{netloc}/{self.cross_process_id}/{self.external_txn_name}' yield TimeMetric(name=name, scope='', duration=self.duration, exclusive=self.exclusive) @@ -139,7 +137,7 @@ def time_metrics(self, stats, root, parent): yield TimeMetric(name=name, scope=root.path, duration=self.duration, exclusive=self.exclusive) - name = 'ExternalApp/%s/%s/all' % (netloc, self.cross_process_id) + name = f'ExternalApp/{netloc}/{self.cross_process_id}/all' yield TimeMetric(name=name, scope='', duration=self.duration, exclusive=self.exclusive) @@ -151,11 +149,9 @@ def trace_node(self, stats, root, connections): method = self.method or '' if self.cross_process_id is None: - name = 'External/%s/%s/%s' % (netloc, self.library, method) + name = f'External/{netloc}/{self.library}/{method}' else: - name = 'ExternalTransaction/%s/%s/%s' % (netloc, - self.cross_process_id, - self.external_txn_name) + name = f'ExternalTransaction/{netloc}/{self.cross_process_id}/{self.external_txn_name}' name = root.string_table.cache(name) diff --git a/newrelic/core/function_node.py b/newrelic/core/function_node.py index 1dfbc3c0ef..f8df1976ce 100644 --- a/newrelic/core/function_node.py +++ b/newrelic/core/function_node.py @@ -34,7 +34,7 @@ def time_metrics(self, stats, root, parent): """ - name = '%s/%s' % (self.group, self.name) + name = f'{self.group}/{self.name}' yield TimeMetric(name=name, scope='', duration=self.duration, exclusive=self.exclusive) @@ -88,7 +88,7 @@ def time_metrics(self, stats, root, parent): def trace_node(self, stats, root, connections): - name = '%s/%s' % (self.group, self.name) + name = f'{self.group}/{self.name}' name = root.string_table.cache(name) @@ -115,6 +115,6 @@ def span_event(self, *args, **kwargs): attrs = super(FunctionNode, self).span_event(*args, **kwargs) i_attrs = attrs[0] - i_attrs['name'] = '%s/%s' % (self.group, self.name) + i_attrs['name'] = f'{self.group}/{self.name}' return attrs diff --git a/newrelic/core/graphql_node.py b/newrelic/core/graphql_node.py index a32e185ee9..b767225841 100644 --- a/newrelic/core/graphql_node.py +++ b/newrelic/core/graphql_node.py @@ -60,7 +60,7 @@ def name(self): field_name = self.field_name or "" product = self.product - name = 'GraphQL/resolve/%s/%s' % (product, field_name) + name = f'GraphQL/resolve/{product}/{field_name}' return name @@ -74,7 +74,7 @@ def time_metrics(self, stats, root, parent): # Determine the scoped metric - field_resolver_metric_name = 'GraphQL/resolve/%s/%s' % (product, field_name) + field_resolver_metric_name = f'GraphQL/resolve/{product}/{field_name}' yield TimeMetric(name=field_resolver_metric_name, scope=root.path, duration=self.duration, exclusive=self.exclusive) @@ -97,8 +97,7 @@ def name(self): deepest_path = self.deepest_path product = self.product - name = 'GraphQL/operation/%s/%s/%s/%s' % (product, operation_type, - operation_name, deepest_path) + name = f'GraphQL/operation/{product}/{operation_type}/{operation_name}/{deepest_path}' return name @@ -115,8 +114,7 @@ def time_metrics(self, stats, root, parent): # Determine the scoped metric - operation_metric_name = 'GraphQL/operation/%s/%s/%s/%s' % (product, - operation_type, operation_name, deepest_path) + operation_metric_name = f'GraphQL/operation/{product}/{operation_type}/{operation_name}/{deepest_path}' scoped_metric_name = operation_metric_name @@ -128,20 +126,20 @@ def time_metrics(self, stats, root, parent): yield TimeMetric(name='GraphQL/all', scope='', duration=self.duration, exclusive=self.exclusive) - yield TimeMetric(name='GraphQL/%s/all' % product, scope='', + yield TimeMetric(name=f'GraphQL/{product}/all', scope='', duration=self.duration, exclusive=self.exclusive) if root.type == 'WebTransaction': yield TimeMetric(name='GraphQL/allWeb', scope='', duration=self.duration, exclusive=self.exclusive) - yield TimeMetric(name='GraphQL/%s/allWeb' % product, scope='', + yield TimeMetric(name=f'GraphQL/{product}/allWeb', scope='', duration=self.duration, exclusive=self.exclusive) else: yield TimeMetric(name='GraphQL/allOther', scope='', duration=self.duration, exclusive=self.exclusive) - yield TimeMetric(name='GraphQL/%s/allOther' % product, scope='', + yield TimeMetric(name=f'GraphQL/{product}/allOther', scope='', duration=self.duration, exclusive=self.exclusive) # Unscoped operation metric diff --git a/newrelic/core/loop_node.py b/newrelic/core/loop_node.py index 1bdbc7cf6c..151787ff19 100644 --- a/newrelic/core/loop_node.py +++ b/newrelic/core/loop_node.py @@ -47,7 +47,7 @@ def time_metrics(self, stats, root, parent): """ - name = 'EventLoop/Wait/%s' % self.name + name = f'EventLoop/Wait/{self.name}' yield TimeMetric(name=name, scope='', duration=self.duration, exclusive=self.duration) @@ -62,15 +62,15 @@ def time_metrics(self, stats, root, parent): exclusive=None) if root.type == 'WebTransaction': - yield TimeMetric(name=name + 'Web', scope='', + yield TimeMetric(name=f"{name}Web", scope='', duration=self.duration, exclusive=None) else: - yield TimeMetric(name=name + 'Other', scope='', + yield TimeMetric(name=f"{name}Other", scope='', duration=self.duration, exclusive=None) def trace_node(self, stats, root, connections): - name = 'EventLoop/Wait/%s' % self.name + name = f'EventLoop/Wait/{self.name}' name = root.string_table.cache(name) @@ -94,6 +94,6 @@ def span_event(self, *args, **kwargs): attrs = super(LoopNode, self).span_event(*args, **kwargs) i_attrs = attrs[0] - i_attrs['name'] = 'EventLoop/Wait/%s' % self.name + i_attrs['name'] = f'EventLoop/Wait/{self.name}' return attrs diff --git a/newrelic/core/memcache_node.py b/newrelic/core/memcache_node.py index 2f73609d0d..678a89c5ec 100644 --- a/newrelic/core/memcache_node.py +++ b/newrelic/core/memcache_node.py @@ -28,7 +28,7 @@ class MemcacheNode(_MemcacheNode, GenericNodeMixin): @property def name(self): - return 'Memcache/%s' % self.command + return f'Memcache/{self.command}' def time_metrics(self, stats, root, parent): """Return a generator yielding the timed metrics for this @@ -46,7 +46,7 @@ def time_metrics(self, stats, root, parent): yield TimeMetric(name='Memcache/allOther', scope='', duration=self.duration, exclusive=self.exclusive) - name = 'Memcache/%s' % self.command + name = f'Memcache/{self.command}' yield TimeMetric(name=name, scope='', duration=self.duration, exclusive=self.exclusive) diff --git a/newrelic/core/message_node.py b/newrelic/core/message_node.py index 02e431eb31..aa1fec992e 100644 --- a/newrelic/core/message_node.py +++ b/newrelic/core/message_node.py @@ -30,8 +30,7 @@ class MessageNode(_MessageNode, GenericNodeMixin): @property def name(self): - name = 'MessageBroker/%s/%s/%s/Named/%s' % (self.library, - self.destination_type, self.operation, self.destination_name) + name = f'MessageBroker/{self.library}/{self.destination_type}/{self.operation}/Named/{self.destination_name}' return name def time_metrics(self, stats, root, parent): diff --git a/newrelic/core/node_mixin.py b/newrelic/core/node_mixin.py index 734d8e998e..a45e6645d2 100644 --- a/newrelic/core/node_mixin.py +++ b/newrelic/core/node_mixin.py @@ -90,9 +90,9 @@ def name(self): operation = self.operation or "other" if target: - name = "Datastore/statement/%s/%s/%s" % (product, target, operation) + name = f"Datastore/statement/{product}/{target}/{operation}" else: - name = "Datastore/operation/%s/%s" % (product, operation) + name = f"Datastore/operation/{product}/{operation}" return name @@ -123,7 +123,7 @@ def span_event(self, *args, **kwargs): else: a_attrs["peer.hostname"] = "Unknown" - peer_address = "%s:%s" % (self.instance_hostname or "Unknown", self.port_path_or_id or "Unknown") + peer_address = f"{self.instance_hostname or 'Unknown'}:{self.port_path_or_id or 'Unknown'}" _, a_attrs["peer.address"] = attribute.process_user_attribute("peer.address", peer_address) diff --git a/newrelic/core/otlp_utils.py b/newrelic/core/otlp_utils.py index 0719fed33c..4c9c57f8fb 100644 --- a/newrelic/core/otlp_utils.py +++ b/newrelic/core/otlp_utils.py @@ -106,7 +106,7 @@ def create_key_value(key, value): # those are not valid custom attribute types according to our api spec, # we will not bother to support them here either. else: - _logger.warning("Unsupported attribute value type %s: %s." % (key, value)) + _logger.warning(f"Unsupported attribute value type {key}: {value}.") def create_key_values_from_iterable(iterable): diff --git a/newrelic/core/profile_sessions.py b/newrelic/core/profile_sessions.py index 6f67317562..5d71fde34c 100644 --- a/newrelic/core/profile_sessions.py +++ b/newrelic/core/profile_sessions.py @@ -449,9 +449,9 @@ def flatten(self): # are labeled with an @ sign in the second element of the tuple. if func_line == exec_line: - method_data = (filename, "@%s#%s" % (func_name, func_line), exec_line) + method_data = (filename, f"@{func_name}#{func_line}", exec_line) else: - method_data = (filename, "%s#%s" % (func_name, func_line), exec_line) + method_data = (filename, f"{func_name}#{func_line}", exec_line) return [method_data, self.call_count, 0, [x.flatten() for x in self.children.values() if not x.ignore]] diff --git a/newrelic/core/rules_engine.py b/newrelic/core/rules_engine.py index ddbf780331..4b40c984b2 100644 --- a/newrelic/core/rules_engine.py +++ b/newrelic/core/rules_engine.py @@ -174,7 +174,7 @@ def __init__(self, rules): # should always return prefixes and term strings as Unicode. choices = "|".join([re.escape(x) for x in prefixes]) - pattern = "^(%s)/(.+)$" % choices + pattern = f"^({choices})/(.+)$" self.prefixes = re.compile(pattern) @@ -228,4 +228,4 @@ def normalize(self, txn_name): result = [x if x in allowlist_terms else "*" for x in segments] result = self.COLLAPSE_STAR_RE.sub("\\1", "/".join(result)) - return "/".join((prefix, result)), False + return f"{prefix}/{result}", False diff --git a/newrelic/core/solr_node.py b/newrelic/core/solr_node.py index 849ca535af..9ffcd29a69 100644 --- a/newrelic/core/solr_node.py +++ b/newrelic/core/solr_node.py @@ -29,7 +29,7 @@ class SolrNode(_SolrNode, GenericNodeMixin): @property def name(self): - return 'SolrClient/%s/%s' % (self.library, self.command) + return f'SolrClient/{self.library}/{self.command}' def time_metrics(self, stats, root, parent): """Return a generator yielding the timed metrics for this @@ -46,7 +46,7 @@ def time_metrics(self, stats, root, parent): yield TimeMetric(name='Solr/allOther', scope='', duration=self.duration, exclusive=self.exclusive) - name = 'Solr/%s' % self.command + name = f'Solr/{self.command}' yield TimeMetric(name=name, scope='', duration=self.duration, exclusive=self.exclusive) diff --git a/newrelic/core/stack_trace.py b/newrelic/core/stack_trace.py index c7e378e4a2..1ab1f96c4e 100644 --- a/newrelic/core/stack_trace.py +++ b/newrelic/core/stack_trace.py @@ -27,8 +27,7 @@ def _format_stack_trace(frames): result = ['Traceback (most recent call last):'] - result.extend(['File "{source}", line {line}, in {name}'.format(**d) - for d in frames]) + result.extend([f'File "{f["source"]}", line {f["line"]}, in {f["name"]}' for f in frames]) return result def _extract_stack(f, skip, limit): diff --git a/newrelic/core/stats_engine.py b/newrelic/core/stats_engine.py index 72a72a0243..e77647d9d8 100644 --- a/newrelic/core/stats_engine.py +++ b/newrelic/core/stats_engine.py @@ -328,7 +328,7 @@ def __str__(self): return str(self.__stats_table) def __repr__(self): - return "%s(%s)" % (__class__.__name__, repr(self.__stats_table)) + return f"{__class__.__name__}({repr(self.__stats_table)})" def items(self): return self.metrics() @@ -860,8 +860,7 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, _, error_group_name = process_user_attribute("error.group.name", error_group_name_raw) if error_group_name is None or not isinstance(error_group_name, str): raise ValueError( - "Invalid attribute value for error.group.name. Expected string, got: %s" - % repr(error_group_name_raw) + f"Invalid attribute value for error.group.name. Expected string, got: {repr(error_group_name_raw)}" ) else: agent_attributes["error.group.name"] = error_group_name diff --git a/newrelic/core/string_table.py b/newrelic/core/string_table.py index bbad866c1d..f8d38a1148 100644 --- a/newrelic/core/string_table.py +++ b/newrelic/core/string_table.py @@ -20,7 +20,7 @@ def __init__(self): def cache(self, value): if not value in self.__mapping: - token = '`%d' % len(self.__values) + token = f'`{len(self.__values)}' self.__mapping[value] = token self.__values.append(value) return self.__mapping[value] diff --git a/newrelic/core/trace_cache.py b/newrelic/core/trace_cache.py index e0703ac82c..c6c648ad91 100644 --- a/newrelic/core/trace_cache.py +++ b/newrelic/core/trace_cache.py @@ -102,7 +102,7 @@ def __init__(self): self._cache = weakref.WeakValueDictionary() def __repr__(self): - return "<%s object at 0x%x %s>" % (self.__class__.__name__, id(self), str(dict(self.items()))) + return f"<{self.__class__.__name__} object at 0x{id(self):x} {str(dict(self.items()))}>" def current_thread_id(self): """Returns the thread ID for the caller. @@ -379,7 +379,7 @@ def record_event_loop_wait(self, start_time, end_time): seen = None for root in roots: - guid = "%016x" % random.getrandbits(64) + guid = f"{random.getrandbits(64):016x}" node = LoopNode( fetch_name=fetch_name, start_time=start_time, diff --git a/newrelic/core/transaction_node.py b/newrelic/core/transaction_node.py index bafbb7a969..675bdcaf03 100644 --- a/newrelic/core/transaction_node.py +++ b/newrelic/core/transaction_node.py @@ -186,7 +186,7 @@ def time_metrics(self, stats): # Generate the rollup metric. if self.type != "WebTransaction": - rollup = "%s/all" % self.type + rollup = f"{self.type}/all" else: rollup = self.type @@ -202,7 +202,7 @@ def time_metrics(self, stats): metric_suffix = "Other" yield TimeMetric( - name="%s/%s" % (metric_prefix, self.name_for_metric), + name=f"{metric_prefix}/{self.name_for_metric}", scope="", duration=self.total_time, exclusive=self.total_time, @@ -213,16 +213,11 @@ def time_metrics(self, stats): # Generate Distributed Tracing metrics if self.settings.distributed_tracing.enabled: - dt_tag = "%s/%s/%s/%s/all" % ( - self.parent_type or "Unknown", - self.parent_account or "Unknown", - self.parent_app or "Unknown", - self.parent_transport_type or "Unknown", - ) + dt_tag = f"{self.parent_type or 'Unknown'}/{self.parent_account or 'Unknown'}/{self.parent_app or 'Unknown'}/{self.parent_transport_type or 'Unknown'}/all" for bonus_tag in ("", metric_suffix): yield TimeMetric( - name="DurationByCaller/%s%s" % (dt_tag, bonus_tag), + name=f"DurationByCaller/{dt_tag}{bonus_tag}", scope="", duration=self.duration, exclusive=self.duration, @@ -230,7 +225,7 @@ def time_metrics(self, stats): if self.parent_transport_duration is not None: yield TimeMetric( - name="TransportDuration/%s%s" % (dt_tag, bonus_tag), + name=f"TransportDuration/{dt_tag}{bonus_tag}", scope="", duration=self.parent_transport_duration, exclusive=self.parent_transport_duration, @@ -238,7 +233,7 @@ def time_metrics(self, stats): if self.errors: yield TimeMetric( - name="ErrorsByCaller/%s%s" % (dt_tag, bonus_tag), scope="", duration=0.0, exclusive=None + name=f"ErrorsByCaller/{dt_tag}{bonus_tag}", scope="", duration=0.0, exclusive=None ) # Generate Error metrics @@ -249,10 +244,10 @@ def time_metrics(self, stats): yield TimeMetric(name="Errors/all", scope="", duration=0.0, exclusive=None) # Generate individual error metric for transaction. - yield TimeMetric(name="Errors/%s" % self.path, scope="", duration=0.0, exclusive=None) + yield TimeMetric(name=f"Errors/{self.path}", scope="", duration=0.0, exclusive=None) # Generate rollup metric for WebTransaction errors. - yield TimeMetric(name="Errors/all%s" % metric_suffix, scope="", duration=0.0, exclusive=None) + yield TimeMetric(name=f"Errors/all{metric_suffix}", scope="", duration=0.0, exclusive=None) else: yield TimeMetric(name="ErrorsExpected/all", scope="", duration=0.0, exclusive=None) @@ -297,7 +292,7 @@ def apdex_metrics(self, stats): # Generate the full apdex metric. yield ApdexMetric( - name="Apdex/%s" % self.name_for_metric, + name=f"Apdex/{self.name_for_metric}", satisfying=satisfying, tolerating=tolerating, frustrating=frustrating, @@ -598,7 +593,7 @@ def _event_intrinsics(self, stats_table): # Add all synthetics attributes for k, v in self.synthetics_attributes.items(): if k: - intrinsics["nr.synthetics%s" % camel_case(k, upper=True)] = v + intrinsics[f"nr.synthetics{camel_case(k, upper=True)}"] = v def _add_call_time(source, target): # include time for keys previously added to stats table via diff --git a/newrelic/hooks/application_celery.py b/newrelic/hooks/application_celery.py index f3261289d4..9fff9c818b 100644 --- a/newrelic/hooks/application_celery.py +++ b/newrelic/hooks/application_celery.py @@ -55,7 +55,7 @@ def task_info(instance, *args, **kwargs): if task_name in MAPPING_TASK_NAMES: try: subtask = kwargs["task"]["task"] - task_name = "/".join((task_name, subtask)) + task_name = f"{task_name}/{subtask}" task_source = task.app._tasks[subtask] except Exception: pass diff --git a/newrelic/hooks/application_gearman.py b/newrelic/hooks/application_gearman.py index 12646c3d2c..dbe86e3584 100644 --- a/newrelic/hooks/application_gearman.py +++ b/newrelic/hooks/application_gearman.py @@ -69,8 +69,7 @@ def _bind_params(submitted_connections, *args, **kwargs): first_connection = list(submitted_connections)[0] - url = 'gearman://%s:%s' % (first_connection.gearman_host, - first_connection.gearman_port) + url = f'gearman://{first_connection.gearman_host}:{first_connection.gearman_port}' with ExternalTrace('gearman', url): return wrapped(*args, **kwargs) @@ -105,8 +104,7 @@ def _bind_params(current_connection, *args, **kwargs): current_connection = _bind_params(*args, **kwargs) - tracer.url = 'gearman://%s:%s' % (current_connection.gearman_host, - current_connection.gearman_port) + tracer.url = f'gearman://{current_connection.gearman_host}:{current_connection.gearman_port}' return wrapped(*args, **kwargs) diff --git a/newrelic/hooks/component_djangorestframework.py b/newrelic/hooks/component_djangorestframework.py index f2d9c31c81..b7e132f797 100644 --- a/newrelic/hooks/component_djangorestframework.py +++ b/newrelic/hooks/component_djangorestframework.py @@ -44,10 +44,9 @@ def _args(request, *args, **kwargs): None) if view_func_callable_name: if handler == view.http_method_not_allowed: - name = '%s.%s' % (view_func_callable_name, - 'http_method_not_allowed') + name = f'{view_func_callable_name}.http_method_not_allowed' else: - name = '%s.%s' % (view_func_callable_name, request_method) + name = f'{view_func_callable_name}.{request_method}' else: name = callable_name(handler) diff --git a/newrelic/hooks/component_tastypie.py b/newrelic/hooks/component_tastypie.py index da93efbfb3..ed67a2be56 100644 --- a/newrelic/hooks/component_tastypie.py +++ b/newrelic/hooks/component_tastypie.py @@ -39,11 +39,11 @@ def outer_fn_wrapper(outer_fn, instance, args, kwargs): callback = getattr(instance, 'top_level', None) elif meta.api_name is not None: group = 'Python/TastyPie/Api' - name = '%s/%s/%s' % (meta.api_name, meta.resource_name, view_name) + name = f'{meta.api_name}/{meta.resource_name}/{view_name}' callback = getattr(instance, view_name, None) else: group = 'Python/TastyPie/Resource' - name = '%s/%s' % (meta.resource_name, view_name) + name = f'{meta.resource_name}/{view_name}' callback = getattr(instance, view_name, None) # Give preference to naming web transaction and trace node after diff --git a/newrelic/hooks/database_asyncpg.py b/newrelic/hooks/database_asyncpg.py index 1abd3a0d01..0a00514a59 100644 --- a/newrelic/hooks/database_asyncpg.py +++ b/newrelic/hooks/database_asyncpg.py @@ -89,7 +89,7 @@ async def query(self, query, *args, **kwargs): async def prepare(self, stmt_name, query, *args, **kwargs): with DatabaseTrace( - "PREPARE {stmt_name} FROM '{query}'".format(stmt_name=stmt_name, query=query), + f"PREPARE {stmt_name} FROM '{query}'", dbapi2_module=PostgresApi, connect_params=getattr(self, "_nr_connect_params", None), source=self.__wrapped__.prepare, diff --git a/newrelic/hooks/database_dbapi2.py b/newrelic/hooks/database_dbapi2.py index 3621cac3ba..17d3243d07 100644 --- a/newrelic/hooks/database_dbapi2.py +++ b/newrelic/hooks/database_dbapi2.py @@ -79,7 +79,7 @@ def executemany(self, sql, seq_of_parameters): def callproc(self, procname, parameters=DEFAULT): with DatabaseTrace( - sql="CALL %s" % procname, + sql=f"CALL {procname}", dbapi2_module=self._nr_dbapi2_module, connect_params=self._nr_connect_params, source=self.__wrapped__.callproc, @@ -132,7 +132,7 @@ def __init__(self, connect, dbapi2_module): self._nr_dbapi2_module = dbapi2_module def __call__(self, *args, **kwargs): - rollup = ["Datastore/all", "Datastore/%s/all" % self._nr_dbapi2_module._nr_database_product] + rollup = ["Datastore/all", f"Datastore/{self._nr_dbapi2_module._nr_database_product}/all"] with FunctionTrace(name=callable_name(self.__wrapped__), terminal=True, rollup=rollup, source=self.__wrapped__): return self.__connection_wrapper__( diff --git a/newrelic/hooks/database_dbapi2_async.py b/newrelic/hooks/database_dbapi2_async.py index fa777feb8e..fba2126818 100644 --- a/newrelic/hooks/database_dbapi2_async.py +++ b/newrelic/hooks/database_dbapi2_async.py @@ -77,7 +77,7 @@ async def executemany(self, sql, seq_of_parameters): async def callproc(self, procname, parameters=DEFAULT): with DatabaseTrace( - sql="CALL %s" % procname, + sql=f"CALL {procname}", dbapi2_module=self._nr_dbapi2_module, connect_params=self._nr_connect_params, source=self.__wrapped__.callproc, @@ -147,7 +147,7 @@ def __init__(self, connect, dbapi2_module): self._nr_dbapi2_module = dbapi2_module async def __call__(self, *args, **kwargs): - rollup = ["Datastore/all", "Datastore/%s/all" % self._nr_dbapi2_module._nr_database_product] + rollup = ["Datastore/all", f"Datastore/{self._nr_dbapi2_module._nr_database_product}/all"] with FunctionTrace(name=callable_name(self.__wrapped__), terminal=True, rollup=rollup, source=self.__wrapped__): connection = await self.__wrapped__(*args, **kwargs) diff --git a/newrelic/hooks/database_psycopg.py b/newrelic/hooks/database_psycopg.py index 474ffc9a98..9b46b7afd0 100644 --- a/newrelic/hooks/database_psycopg.py +++ b/newrelic/hooks/database_psycopg.py @@ -430,7 +430,7 @@ def _add_defaults(parsed_host, parsed_hostaddr, parsed_port, parsed_database): port = "default" elif parsed_host.startswith("/"): host = "localhost" - port = "%s/.s.PGSQL.%s" % (parsed_host, parsed_port or "5432") + port = f"{parsed_host}/.s.PGSQL.{parsed_port or '5432'}" else: host = parsed_host port = parsed_port or "5432" @@ -504,4 +504,4 @@ def instrument_psycopg_sql(module): if not issubclass(cls, module.Composable): continue - wrap_function_wrapper(module, name + ".as_string", wrapper_psycopg_as_string) + wrap_function_wrapper(module, f"{name}.as_string", wrapper_psycopg_as_string) diff --git a/newrelic/hooks/database_psycopg2.py b/newrelic/hooks/database_psycopg2.py index 8efb84cce7..ec1d96ce06 100644 --- a/newrelic/hooks/database_psycopg2.py +++ b/newrelic/hooks/database_psycopg2.py @@ -222,7 +222,7 @@ def _add_defaults(parsed_host, parsed_hostaddr, parsed_port, parsed_database): port = "default" elif parsed_host.startswith("/"): host = "localhost" - port = "%s/.s.PGSQL.%s" % (parsed_host, parsed_port or "5432") + port = f"{parsed_host}/.s.PGSQL.{parsed_port or '5432'}" else: host = parsed_host port = parsed_port or "5432" @@ -310,4 +310,4 @@ def instrument_psycopg2_sql(module): if not issubclass(cls, module.Composable): continue - wrap_function_wrapper(module, name + ".as_string", wrapper_psycopg2_as_string) + wrap_function_wrapper(module, f"{name}.as_string", wrapper_psycopg2_as_string) diff --git a/newrelic/hooks/datastore_aiomcache.py b/newrelic/hooks/datastore_aiomcache.py index fb770b8192..e2d605392e 100644 --- a/newrelic/hooks/datastore_aiomcache.py +++ b/newrelic/hooks/datastore_aiomcache.py @@ -21,4 +21,4 @@ def instrument_aiomcache_client(module): for name in _memcache_client_methods: if hasattr(module.Client, name): - wrap_datastore_trace(module, "Client.%s" % name, product="Memcached", target=None, operation=name) + wrap_datastore_trace(module, f"Client.{name}", product="Memcached", target=None, operation=name) diff --git a/newrelic/hooks/datastore_aioredis.py b/newrelic/hooks/datastore_aioredis.py index e27f8d7a99..19c9c41996 100644 --- a/newrelic/hooks/datastore_aioredis.py +++ b/newrelic/hooks/datastore_aioredis.py @@ -84,7 +84,7 @@ def _nr_wrapper_AioRedis_method_(wrapped, instance, args, kwargs): # Method should be run when awaited, therefore we wrap in an async wrapper. return _nr_wrapper_AioRedis_async_method_(wrapped)(*args, **kwargs) - name = "%s.%s" % (instance_class_name, operation) + name = f"{instance_class_name}.{operation}" wrap_function_wrapper(module, name, _nr_wrapper_AioRedis_method_) @@ -131,7 +131,7 @@ async def wrap_Connection_send_command(wrapped, instance, args, kwargs): # Convert multi args to single arg string if operation in _redis_multipart_commands and len(args) > 1: - operation = "%s %s" % (operation, args[1].strip().lower()) + operation = f"{operation} {args[1].strip().lower()}" operation = _redis_operation_re.sub("_", operation) @@ -190,7 +190,7 @@ def wrap_RedisConnection_execute(wrapped, instance, args, kwargs): # pragma: no # Convert multi args to single arg string if operation in _redis_multipart_commands and len(args) > 1: - operation = "%s %s" % (operation, args[1].strip().lower()) + operation = f"{operation} {args[1].strip().lower()}" operation = _redis_operation_re.sub("_", operation) diff --git a/newrelic/hooks/datastore_aredis.py b/newrelic/hooks/datastore_aredis.py index 236cbf3f8c..4eeb4a230e 100644 --- a/newrelic/hooks/datastore_aredis.py +++ b/newrelic/hooks/datastore_aredis.py @@ -34,7 +34,7 @@ async def _nr_wrapper_Aredis_method_(wrapped, instance, args, kwargs): with DatastoreTrace(product="Redis", target=None, operation=operation): return await wrapped(*args, **kwargs) - name = "%s.%s" % (instance_class_name, operation) + name = f"{instance_class_name}.{operation}" wrap_function_wrapper(module, name, _nr_wrapper_Aredis_method_) @@ -80,7 +80,7 @@ async def wrap_Connection_send_command(wrapped, instance, args, kwargs): # Convert multi args to single arg string if operation in _redis_multipart_commands and len(args) > 1: - operation = "%s %s" % (operation, args[1].strip().lower()) + operation = f"{operation} {args[1].strip().lower()}" operation = _redis_operation_re.sub("_", operation) diff --git a/newrelic/hooks/datastore_bmemcached.py b/newrelic/hooks/datastore_bmemcached.py index 3091f0992b..3e891f85e3 100644 --- a/newrelic/hooks/datastore_bmemcached.py +++ b/newrelic/hooks/datastore_bmemcached.py @@ -35,4 +35,4 @@ def instrument_bmemcached_client(module): for name in _memcache_client_methods: if hasattr(module.Client, name): - wrap_datastore_trace(module, "Client.%s" % name, product="Memcached", target=None, operation=name) + wrap_datastore_trace(module, f"Client.{name}", product="Memcached", target=None, operation=name) diff --git a/newrelic/hooks/datastore_elasticsearch.py b/newrelic/hooks/datastore_elasticsearch.py index b72e5c4c5e..980385a1b3 100644 --- a/newrelic/hooks/datastore_elasticsearch.py +++ b/newrelic/hooks/datastore_elasticsearch.py @@ -123,7 +123,7 @@ def _nr_wrapper_Elasticsearch_method_(wrapped, instance, args, kwargs): index = arg_extractor(*args, **kwargs) if prefix: - operation = "%s.%s" % (prefix, method_name) + operation = f"{prefix}.{method_name}" else: operation = method_name @@ -142,7 +142,7 @@ def _nr_wrapper_Elasticsearch_method_(wrapped, instance, args, kwargs): return result - wrap_function_wrapper(module, "%s.%s" % (class_name, method_name), _nr_wrapper_Elasticsearch_method_) + wrap_function_wrapper(module, f"{class_name}.{method_name}", _nr_wrapper_Elasticsearch_method_) _elasticsearch_client_methods_below_v8 = ( diff --git a/newrelic/hooks/datastore_firestore.py b/newrelic/hooks/datastore_firestore.py index 6d3196a7c3..ba7fdfb507 100644 --- a/newrelic/hooks/datastore_firestore.py +++ b/newrelic/hooks/datastore_firestore.py @@ -66,7 +66,7 @@ def closure(obj, *args, **kwargs): def instrument_google_cloud_firestore_v1_base_client(module): rollup = ("Datastore/all", "Datastore/Firestore/all") wrap_function_trace( - module, "BaseClient.__init__", name="%s:BaseClient.__init__" % module.__name__, terminal=True, rollup=rollup + module, "BaseClient.__init__", name=f"{module.__name__}:BaseClient.__init__", terminal=True, rollup=rollup ) @@ -77,7 +77,7 @@ def instrument_google_cloud_firestore_v1_client(module): if hasattr(class_, method): wrap_datastore_trace( module, - "Client.%s" % method, + f"Client.{method}", operation=method, product="Firestore", target=None, @@ -95,7 +95,7 @@ def instrument_google_cloud_firestore_v1_async_client(module): if hasattr(class_, method): wrap_datastore_trace( module, - "AsyncClient.%s" % method, + f"AsyncClient.{method}", operation=method, product="Firestore", target=None, @@ -113,7 +113,7 @@ def instrument_google_cloud_firestore_v1_collection(module): if hasattr(class_, method): wrap_datastore_trace( module, - "CollectionReference.%s" % method, + f"CollectionReference.{method}", product="Firestore", target=_get_object_id, operation=method, @@ -126,7 +126,7 @@ def instrument_google_cloud_firestore_v1_collection(module): if hasattr(class_, method): wrap_datastore_trace( module, - "CollectionReference.%s" % method, + f"CollectionReference.{method}", operation=method, product="Firestore", target=_get_object_id, @@ -144,7 +144,7 @@ def instrument_google_cloud_firestore_v1_async_collection(module): if hasattr(class_, method): wrap_datastore_trace( module, - "AsyncCollectionReference.%s" % method, + f"AsyncCollectionReference.{method}", product="Firestore", target=_get_object_id, host=_get_client_target_host, @@ -157,7 +157,7 @@ def instrument_google_cloud_firestore_v1_async_collection(module): if hasattr(class_, method): wrap_datastore_trace( module, - "AsyncCollectionReference.%s" % method, + f"AsyncCollectionReference.{method}", operation=method, product="Firestore", target=_get_object_id, @@ -175,7 +175,7 @@ def instrument_google_cloud_firestore_v1_document(module): if hasattr(class_, method): wrap_datastore_trace( module, - "DocumentReference.%s" % method, + f"DocumentReference.{method}", product="Firestore", target=_get_object_id, operation=method, @@ -188,7 +188,7 @@ def instrument_google_cloud_firestore_v1_document(module): if hasattr(class_, method): wrap_datastore_trace( module, - "DocumentReference.%s" % method, + f"DocumentReference.{method}", operation=method, product="Firestore", target=_get_object_id, @@ -206,7 +206,7 @@ def instrument_google_cloud_firestore_v1_async_document(module): if hasattr(class_, method): wrap_datastore_trace( module, - "AsyncDocumentReference.%s" % method, + f"AsyncDocumentReference.{method}", product="Firestore", target=_get_object_id, operation=method, @@ -219,7 +219,7 @@ def instrument_google_cloud_firestore_v1_async_document(module): if hasattr(class_, method): wrap_datastore_trace( module, - "AsyncDocumentReference.%s" % method, + f"AsyncDocumentReference.{method}", operation=method, product="Firestore", target=_get_object_id, @@ -237,7 +237,7 @@ def instrument_google_cloud_firestore_v1_query(module): if hasattr(class_, method): wrap_datastore_trace( module, - "Query.%s" % method, + f"Query.{method}", product="Firestore", target=_get_parent_id, operation=method, @@ -250,7 +250,7 @@ def instrument_google_cloud_firestore_v1_query(module): if hasattr(class_, method): wrap_datastore_trace( module, - "Query.%s" % method, + f"Query.{method}", operation=method, product="Firestore", target=_get_parent_id, @@ -266,7 +266,7 @@ def instrument_google_cloud_firestore_v1_query(module): if hasattr(class_, method): wrap_datastore_trace( module, - "CollectionGroup.%s" % method, + f"CollectionGroup.{method}", operation=method, product="Firestore", target=_get_parent_id, @@ -284,7 +284,7 @@ def instrument_google_cloud_firestore_v1_async_query(module): if hasattr(class_, method): wrap_datastore_trace( module, - "AsyncQuery.%s" % method, + f"AsyncQuery.{method}", product="Firestore", target=_get_parent_id, operation=method, @@ -297,7 +297,7 @@ def instrument_google_cloud_firestore_v1_async_query(module): if hasattr(class_, method): wrap_datastore_trace( module, - "AsyncQuery.%s" % method, + f"AsyncQuery.{method}", operation=method, product="Firestore", target=_get_parent_id, @@ -313,7 +313,7 @@ def instrument_google_cloud_firestore_v1_async_query(module): if hasattr(class_, method): wrap_datastore_trace( module, - "AsyncCollectionGroup.%s" % method, + f"AsyncCollectionGroup.{method}", operation=method, product="Firestore", target=_get_parent_id, @@ -331,7 +331,7 @@ def instrument_google_cloud_firestore_v1_aggregation(module): if hasattr(class_, method): wrap_datastore_trace( module, - "AggregationQuery.%s" % method, + f"AggregationQuery.{method}", product="Firestore", target=_get_collection_ref_id, operation=method, @@ -344,7 +344,7 @@ def instrument_google_cloud_firestore_v1_aggregation(module): if hasattr(class_, method): wrap_datastore_trace( module, - "AggregationQuery.%s" % method, + f"AggregationQuery.{method}", operation=method, product="Firestore", target=_get_collection_ref_id, @@ -362,7 +362,7 @@ def instrument_google_cloud_firestore_v1_async_aggregation(module): if hasattr(class_, method): wrap_datastore_trace( module, - "AsyncAggregationQuery.%s" % method, + f"AsyncAggregationQuery.{method}", product="Firestore", target=_get_collection_ref_id, operation=method, @@ -375,7 +375,7 @@ def instrument_google_cloud_firestore_v1_async_aggregation(module): if hasattr(class_, method): wrap_datastore_trace( module, - "AsyncAggregationQuery.%s" % method, + f"AsyncAggregationQuery.{method}", operation=method, product="Firestore", target=_get_collection_ref_id, @@ -393,7 +393,7 @@ def instrument_google_cloud_firestore_v1_batch(module): if hasattr(class_, method): wrap_datastore_trace( module, - "WriteBatch.%s" % method, + f"WriteBatch.{method}", product="Firestore", target=None, operation=method, @@ -410,7 +410,7 @@ def instrument_google_cloud_firestore_v1_async_batch(module): if hasattr(class_, method): wrap_datastore_trace( module, - "AsyncWriteBatch.%s" % method, + f"AsyncWriteBatch.{method}", product="Firestore", target=None, operation=method, @@ -427,7 +427,7 @@ def instrument_google_cloud_firestore_v1_bulk_batch(module): if hasattr(class_, method): wrap_datastore_trace( module, - "BulkWriteBatch.%s" % method, + f"BulkWriteBatch.{method}", product="Firestore", target=None, operation=method, @@ -445,7 +445,7 @@ def instrument_google_cloud_firestore_v1_transaction(module): operation = method[1:] # Trim leading underscore wrap_datastore_trace( module, - "Transaction.%s" % method, + f"Transaction.{method}", product="Firestore", target=None, operation=operation, @@ -463,7 +463,7 @@ def instrument_google_cloud_firestore_v1_async_transaction(module): operation = method[1:] # Trim leading underscore wrap_datastore_trace( module, - "AsyncTransaction.%s" % method, + f"AsyncTransaction.{method}", product="Firestore", target=None, operation=operation, diff --git a/newrelic/hooks/datastore_memcache.py b/newrelic/hooks/datastore_memcache.py index 90b2d43dcf..23f6d2d73b 100644 --- a/newrelic/hooks/datastore_memcache.py +++ b/newrelic/hooks/datastore_memcache.py @@ -83,7 +83,7 @@ def _nr_datastore_trace_wrapper_(wrapped, instance, args, kwargs): def wrap_memcache_single(module, object_path, product, target, operation): - wrap_object(module, "Client.%s" % object_path, MemcacheSingleWrapper, (product, target, operation, module)) + wrap_object(module, f"Client.{object_path}", MemcacheSingleWrapper, (product, target, operation, module)) _memcache_client_methods = ( @@ -112,4 +112,4 @@ def instrument_memcache(module): for name in _memcache_multi_methods: if hasattr(module.Client, name): - wrap_datastore_trace(module, "Client.%s" % name, product="Memcached", target=None, operation=name) + wrap_datastore_trace(module, f"Client.{name}", product="Memcached", target=None, operation=name) diff --git a/newrelic/hooks/datastore_motor.py b/newrelic/hooks/datastore_motor.py index 1c000285ea..3cea77ac42 100644 --- a/newrelic/hooks/datastore_motor.py +++ b/newrelic/hooks/datastore_motor.py @@ -28,9 +28,7 @@ def _bind_params(name, *args, **kwargs): name = _bind_params(*args, **kwargs) if name.startswith('__') or name.startswith('_nr_'): - raise AttributeError('%s class has no attribute %s. To access ' - 'use object[%r].' % (instance.__class__.__name__, - name, name)) + raise AttributeError(f'{instance.__class__.__name__} class has no attribute {name}. To access use object[{name!r}].') return wrapped(*args, **kwargs) @@ -44,5 +42,5 @@ def patch_motor(module): 'MotorCollection'] for patched_class in patched_classes: if hasattr(module, patched_class): - wrap_function_wrapper(module, patched_class + '.__getattr__', + wrap_function_wrapper(module, f"{patched_class}.__getattr__", _nr_wrapper_Motor_getattr_) diff --git a/newrelic/hooks/datastore_pyelasticsearch.py b/newrelic/hooks/datastore_pyelasticsearch.py index b8418e3470..2cfeeeacf6 100644 --- a/newrelic/hooks/datastore_pyelasticsearch.py +++ b/newrelic/hooks/datastore_pyelasticsearch.py @@ -101,7 +101,7 @@ def _nr_wrapper_ElasticSearch_method_(wrapped, instance, args, kwargs): return DatastoreTraceWrapper(wrapped, product="Elasticsearch", target=index, operation=name)(*args, **kwargs) if hasattr(module.ElasticSearch, name): - wrap_function_wrapper(module, "ElasticSearch.%s" % name, _nr_wrapper_ElasticSearch_method_) + wrap_function_wrapper(module, f"ElasticSearch.{name}", _nr_wrapper_ElasticSearch_method_) def instrument_pyelasticsearch_client(module): diff --git a/newrelic/hooks/datastore_pylibmc.py b/newrelic/hooks/datastore_pylibmc.py index 3d42a70fb6..81a3b582a9 100644 --- a/newrelic/hooks/datastore_pylibmc.py +++ b/newrelic/hooks/datastore_pylibmc.py @@ -40,4 +40,4 @@ def instrument_pylibmc_client(module): for name in _memcache_client_methods: if hasattr(module.Client, name): - wrap_datastore_trace(module, "Client.%s" % name, product="Memcached", target=None, operation=name) + wrap_datastore_trace(module, f"Client.{name}", product="Memcached", target=None, operation=name) diff --git a/newrelic/hooks/datastore_pymemcache.py b/newrelic/hooks/datastore_pymemcache.py index 690e95d616..9edb1d7230 100644 --- a/newrelic/hooks/datastore_pymemcache.py +++ b/newrelic/hooks/datastore_pymemcache.py @@ -40,4 +40,4 @@ def instrument_pymemcache_client(module): for name in _memcache_client_methods: if hasattr(module.Client, name): - wrap_datastore_trace(module, "Client.%s" % name, product="Memcached", target=None, operation=name) + wrap_datastore_trace(module, f"Client.{name}", product="Memcached", target=None, operation=name) diff --git a/newrelic/hooks/datastore_pymongo.py b/newrelic/hooks/datastore_pymongo.py index c9c34b1fc3..d185ce2c6c 100644 --- a/newrelic/hooks/datastore_pymongo.py +++ b/newrelic/hooks/datastore_pymongo.py @@ -66,7 +66,7 @@ def instrument_pymongo_connection(module): rollup = ("Datastore/all", "Datastore/MongoDB/all") wrap_function_trace( - module, "Connection.__init__", name="%s:Connection.__init__" % module.__name__, terminal=True, rollup=rollup + module, "Connection.__init__", name=f"{module.__name__}:Connection.__init__", terminal=True, rollup=rollup ) @@ -77,7 +77,7 @@ def instrument_pymongo_mongo_client(module): rollup = ("Datastore/all", "Datastore/MongoDB/all") wrap_function_trace( - module, "MongoClient.__init__", name="%s:MongoClient.__init__" % module.__name__, terminal=True, rollup=rollup + module, "MongoClient.__init__", name=f"{module.__name__}:MongoClient.__init__", terminal=True, rollup=rollup ) @@ -88,5 +88,5 @@ def _collection_name(collection, *args, **kwargs): for name in _pymongo_client_methods: if hasattr(module.Collection, name): wrap_datastore_trace( - module, "Collection.%s" % name, product="MongoDB", target=_collection_name, operation=name + module, f"Collection.{name}", product="MongoDB", target=_collection_name, operation=name ) diff --git a/newrelic/hooks/datastore_pysolr.py b/newrelic/hooks/datastore_pysolr.py index 7d4e8697d8..301d87ae48 100644 --- a/newrelic/hooks/datastore_pysolr.py +++ b/newrelic/hooks/datastore_pysolr.py @@ -22,11 +22,11 @@ def instrument_pysolr(module): for name in _pysolr_client_methods: if hasattr(module.Solr, name): - wrap_datastore_trace(module, "Solr.%s" % name, product="Solr", target=None, operation=name) + wrap_datastore_trace(module, f"Solr.{name}", product="Solr", target=None, operation=name) if hasattr(module, "SolrCoreAdmin"): for name in _pysolr_admin_methods: if hasattr(module.SolrCoreAdmin, name): wrap_datastore_trace( - module, "SolrCoreAdmin.%s" % name, product="Solr", target=None, operation="admin.%s" % name + module, f"SolrCoreAdmin.{name}", product="Solr", target=None, operation=f"admin.{name}" ) diff --git a/newrelic/hooks/datastore_redis.py b/newrelic/hooks/datastore_redis.py index 95d988d39b..ad14824195 100644 --- a/newrelic/hooks/datastore_redis.py +++ b/newrelic/hooks/datastore_redis.py @@ -523,7 +523,7 @@ def _instance_info(kwargs): def _wrap_Redis_method_wrapper_(module, instance_class_name, operation): - name = "%s.%s" % (instance_class_name, operation) + name = f"{instance_class_name}.{operation}" if operation in _redis_client_gen_methods: async_wrapper = generator_wrapper else: @@ -544,7 +544,7 @@ def _nr_wrapper_asyncio_Redis_method_(wrapped, instance, args, kwargs): wrapped, product="Redis", target=None, operation=operation, async_wrapper=async_wrapper )(*args, **kwargs) - name = "%s.%s" % (instance_class_name, operation) + name = f"{instance_class_name}.{operation}" if operation in _redis_client_gen_methods: async_wrapper = async_generator_wrapper else: @@ -596,7 +596,7 @@ async def wrap_async_Connection_send_command(wrapped, instance, args, kwargs): # Convert multi args to single arg string if operation in _redis_multipart_commands and len(args) > 1: - operation = "%s %s" % (operation, args[1].strip().lower()) + operation = f"{operation} {args[1].strip().lower()}" operation = _redis_operation_re.sub("_", operation) @@ -647,7 +647,7 @@ def _nr_Connection_send_command_wrapper_(wrapped, instance, args, kwargs): # Convert multi args to single arg string if operation in _redis_multipart_commands and len(args) > 1: - operation = "%s %s" % (operation, args[1].strip().lower()) + operation = f"{operation} {args[1].strip().lower()}" operation = _redis_operation_re.sub("_", operation) diff --git a/newrelic/hooks/datastore_solrpy.py b/newrelic/hooks/datastore_solrpy.py index 74e808ae50..2ba0a72507 100644 --- a/newrelic/hooks/datastore_solrpy.py +++ b/newrelic/hooks/datastore_solrpy.py @@ -30,4 +30,4 @@ def instrument_solrpy(module): for name in _solrpy_client_methods: if hasattr(module.SolrConnection, name): - wrap_datastore_trace(module, "SolrConnection.%s" % name, product="Solr", target=None, operation=name) + wrap_datastore_trace(module, f"SolrConnection.{name}", product="Solr", target=None, operation=name) diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index e4ade6be4a..f281c96097 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -131,7 +131,7 @@ def create_chat_completion_message_event( content = message.get("content", "") if response_id: - id_ = "%s-%d" % (response_id, index) # Response ID was set, append message index to it. + id_ = f"{response_id}-{int(index)}" # Response ID was set, append message index to it. else: id_ = str(uuid.uuid4()) # No response IDs, use random UUID @@ -169,7 +169,7 @@ def create_chat_completion_message_event( content = content[0] if response_id: - id_ = "%s-%d" % (response_id, index) # Response ID was set, append message index to it. + id_ = f"{response_id}-{int(index)}" # Response ID was set, append message index to it. else: id_ = str(uuid.uuid4()) # No response IDs, use random UUID @@ -522,7 +522,7 @@ def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): operation = "embedding" if is_embedding else "completion" # Function trace may not be exited in this function in the case of streaming, so start manually - ft = FunctionTrace(name=function_name, group="Llm/%s/Bedrock" % (operation)) + ft = FunctionTrace(name=function_name, group=f"Llm/{operation}/Bedrock") ft.__enter__() # Get trace information diff --git a/newrelic/hooks/external_facepy.py b/newrelic/hooks/external_facepy.py index 2bf8605685..0cdfeaed14 100644 --- a/newrelic/hooks/external_facepy.py +++ b/newrelic/hooks/external_facepy.py @@ -17,7 +17,7 @@ def instrument(module): def url_query(graph_obj, method, path, *args, **kwargs): - return '/'.join([graph_obj.url, path]) + return f"{graph_obj.url}/{path}" newrelic.api.external_trace.wrap_external_trace( module, 'GraphAPI._query', 'facepy', url_query) diff --git a/newrelic/hooks/external_feedparser.py b/newrelic/hooks/external_feedparser.py index 432d292be6..40d4ac7716 100644 --- a/newrelic/hooks/external_feedparser.py +++ b/newrelic/hooks/external_feedparser.py @@ -46,7 +46,7 @@ def __call__(self, url, *args, **kwargs): if parsed_url.startswith("feed:http"): parsed_url = parsed_url[5:] elif parsed_url.startswith("feed:"): - parsed_url = "http:" + url[5:] + parsed_url = f"http:{url[5:]}" if parsed_url.split(":")[0].lower() in ["http", "https", "ftp"]: current_transaction = newrelic.api.transaction.current_transaction() diff --git a/newrelic/hooks/external_httplib.py b/newrelic/hooks/external_httplib.py index e31aa8e20c..93be1e9839 100644 --- a/newrelic/hooks/external_httplib.py +++ b/newrelic/hooks/external_httplib.py @@ -38,7 +38,7 @@ def _connect_unbound(instance, *args, **kwargs): if hasattr(connection, '_nr_library_info'): library, scheme = connection._nr_library_info - url = '%s://%s:%s' % (scheme, connection.host, connection.port) + url = f'{scheme}://{connection.host}:{connection.port}' # Check if the NR headers have already been added. This is just in # case a higher level library which uses httplib underneath so diff --git a/newrelic/hooks/external_thrift.py b/newrelic/hooks/external_thrift.py index ceea07b80f..e2a94f3ced 100644 --- a/newrelic/hooks/external_thrift.py +++ b/newrelic/hooks/external_thrift.py @@ -19,9 +19,9 @@ def instrument(module): def tsocket_open_url(socket, *args, **kwargs): scheme = 'socket' if socket._unix_socket else 'http' if socket.port: - url = '%s://%s:%s' % (scheme, socket.host, socket.port) + url = f'{scheme}://{socket.host}:{socket.port}' else: - url = '%s://%s' % (scheme, socket.host) + url = f'{scheme}://{socket.host}' return url diff --git a/newrelic/hooks/external_urllib3.py b/newrelic/hooks/external_urllib3.py index fbcf10ef0c..307224f0af 100644 --- a/newrelic/hooks/external_urllib3.py +++ b/newrelic/hooks/external_urllib3.py @@ -19,7 +19,7 @@ def _nr_wrapper_make_request_(wrapped, instance, args, kwargs): def _bind_params(conn, method, url, *args, **kwargs): - return method, "%s://%s:%s" % (instance.scheme, conn.host, conn.port) + return method, f"{instance.scheme}://{conn.host}:{conn.port}" method, url_for_apm_ui = _bind_params(*args, **kwargs) diff --git a/newrelic/hooks/external_xmlrpclib.py b/newrelic/hooks/external_xmlrpclib.py index 7b83852386..3c02ed48e6 100644 --- a/newrelic/hooks/external_xmlrpclib.py +++ b/newrelic/hooks/external_xmlrpclib.py @@ -15,7 +15,7 @@ import newrelic.api.external_trace def wrap_transport_request(self, host, handler, *args, **kwargs): - return "http://%s%s" % (host, handler) + return f"http://{host}{handler}" def instrument(module): diff --git a/newrelic/hooks/framework_aiohttp.py b/newrelic/hooks/framework_aiohttp.py index 68f4e70f1c..601d585f9e 100644 --- a/newrelic/hooks/framework_aiohttp.py +++ b/newrelic/hooks/framework_aiohttp.py @@ -152,7 +152,7 @@ class ResponseProxy: def __getattr__(self, name): # instance.response should be overwritten at this point if instance.response is self: - raise AttributeError("%r object has no attribute %r" % (type(instance).__name__, "response")) + raise AttributeError(f"{type(instance).__name__!r} object has no attribute response") return getattr(instance.response, name) instance.response = ResponseProxy() diff --git a/newrelic/hooks/framework_django.py b/newrelic/hooks/framework_django.py index 5f7c6d797a..d9bc0e4c69 100644 --- a/newrelic/hooks/framework_django.py +++ b/newrelic/hooks/framework_django.py @@ -64,7 +64,7 @@ def _setting_boolean(value): if value.lower() not in _boolean_states: - raise ValueError("Not a boolean: %s" % value) + raise ValueError(f"Not a boolean: {value}") return _boolean_states[value.lower()] diff --git a/newrelic/hooks/framework_flask.py b/newrelic/hooks/framework_flask.py index 0da056a53d..ed115d3456 100644 --- a/newrelic/hooks/framework_flask.py +++ b/newrelic/hooks/framework_flask.py @@ -93,7 +93,7 @@ def _bind_params(rule, endpoint=None, view_func=None, **options): def _nr_wrapper_Flask_views_View_as_view_(wrapped, instance, args, kwargs): view = wrapped(*args, **kwargs) - view._nr_view_func_name = "%s:%s" % (view.__module__, view.__name__) + view._nr_view_func_name = f"{view.__module__}:{view.__name__}" return view diff --git a/newrelic/hooks/framework_graphql.py b/newrelic/hooks/framework_graphql.py index 7166aa857c..c555b3981a 100644 --- a/newrelic/hooks/framework_graphql.py +++ b/newrelic/hooks/framework_graphql.py @@ -188,7 +188,7 @@ def traverse_deepest_unique_path(fields, fragments): if is_named_fragment(field): name = get_node_value(field.type_condition, "name") if name: - deepest_path.append("%s<%s>" % (deepest_path.pop(), name)) + deepest_path.append(f"{deepest_path.pop()}<{name}>") elif is_fragment(field): if len(list(fragments.values())) != 1: diff --git a/newrelic/hooks/framework_grpc.py b/newrelic/hooks/framework_grpc.py index 8ad429991d..dc59634e71 100644 --- a/newrelic/hooks/framework_grpc.py +++ b/newrelic/hooks/framework_grpc.py @@ -26,7 +26,7 @@ def _get_uri_method(instance, *args, **kwargs): target = instance._channel.target().decode("utf-8").lstrip("dns:///") method = instance._method.decode("utf-8").lstrip("/") - uri = "grpc://%s/%s" % (target, method) + uri = f"grpc://{target}/{method}" return (uri, method) @@ -64,7 +64,7 @@ def _future_wrapper(wrapped, instance, args, kwargs): if transaction is None: return wrapped(*args, **kwargs) - guid = "%016x" % random.getrandbits(64) + guid = f"{random.getrandbits(64):016x}" uri, method = _get_uri_method(instance) args, kwargs = prepare(transaction, guid, *args, **kwargs) diff --git a/newrelic/hooks/framework_pylons.py b/newrelic/hooks/framework_pylons.py index f43d92de39..2880eb10b0 100644 --- a/newrelic/hooks/framework_pylons.py +++ b/newrelic/hooks/framework_pylons.py @@ -24,7 +24,7 @@ def name_controller(self, environ, start_response): action = environ['pylons.routes_dict']['action'] - return "%s.%s" % (callable_name(self), action) + return f"{callable_name(self)}.{action}" class capture_error(): def __init__(self, wrapped): diff --git a/newrelic/hooks/framework_sanic.py b/newrelic/hooks/framework_sanic.py index 94b5179c28..4941751c2a 100644 --- a/newrelic/hooks/framework_sanic.py +++ b/newrelic/hooks/framework_sanic.py @@ -40,7 +40,7 @@ def _nr_wrapper_handler_(wrapped, instance, args, kwargs): if view_class: try: method = args[0].method.lower() - name = callable_name(view_class) + "." + method + name = f"{callable_name(view_class)}.{method}" view = getattr(view_class, method) except: pass diff --git a/newrelic/hooks/framework_web2py.py b/newrelic/hooks/framework_web2py.py index e71bfa5fdd..73141f287a 100644 --- a/newrelic/hooks/framework_web2py.py +++ b/newrelic/hooks/framework_web2py.py @@ -36,8 +36,7 @@ def instrument_gluon_compileapp(module): # and view path. def transaction_name_run_models_in(environment): - return '%s::%s' % (environment['request'].application, - environment['response'].view) + return f"{environment['request'].application}::{environment['response'].view}" newrelic.api.transaction_name.wrap_transaction_name(module, 'run_models_in', name=transaction_name_run_models_in, @@ -50,23 +49,21 @@ def transaction_name_run_models_in(environment): # handling. def name_function_run_models_in(environment): - return '%s/%s' % (environment['request'].controller, - environment['request'].function) + return f"{environment['request'].controller}/{environment['request'].function}" newrelic.api.function_trace.wrap_function_trace(module, 'run_models_in', name=name_function_run_models_in, group='Python/Web2Py/Models') def name_function_run_controller_in(controller, function, environment): - return '%s/%s' % (controller, function) + return f'{controller}/{function}' newrelic.api.function_trace.wrap_function_trace(module, 'run_controller_in', name=name_function_run_controller_in, group='Python/Web2Py/Controller') def name_function_run_view_in(environment): - return '%s/%s' % (environment['request'].controller, - environment['request'].function) + return f"{environment['request'].controller}/{environment['request'].function}" newrelic.api.function_trace.wrap_function_trace(module, 'run_view_in', name=name_function_run_view_in, @@ -150,9 +147,9 @@ def name_function_parse_template(filename, path='views/', if 'request' in context: folder = context['request'].folder if path.startswith(folder): - return '%s/%s' % (path[len(folder):], filename) + return f'{path[len(folder):]}/{filename}' else: - return '%s/%s' % (path, filename) + return f'{path}/{filename}' newrelic.api.function_trace.wrap_function_trace(module, 'parse_template', name=name_function_parse_template, group='Template/Compile') @@ -240,11 +237,11 @@ def transaction_name_name_not_found(response, *args, **kwargs): if parts[0] == '/': txn.set_transaction_name('*', 'Web2Py') else: - name = '%s/*' % parts[0].lstrip('/') + name = f"{parts[0].lstrip('/')}/*" txn.set_transaction_name(name, 'Web2Py') else: extension = os.path.splitext(parts[1])[-1] - name = '%s/*%s' % (parts[0].lstrip('/'), extension) + name = f"{parts[0].lstrip('/')}/*{extension}" txn.set_transaction_name(name, 'Web2Py') else: txn.set_transaction_name('*', 'Web2Py') diff --git a/newrelic/hooks/logger_logging.py b/newrelic/hooks/logger_logging.py index 5939f4d87f..a11c2b328c 100644 --- a/newrelic/hooks/logger_logging.py +++ b/newrelic/hooks/logger_logging.py @@ -32,8 +32,8 @@ def add_nr_linking_metadata(message): trace_id = available_metadata.get("trace.id", "") hostname = available_metadata.get("hostname", "") - nr_linking_str = "|".join(("NR-LINKING", entity_guid, hostname, trace_id, span_id, entity_name)) - return "%s %s|" % (message, nr_linking_str) + nr_linking_str = f"NR-LINKING|{entity_guid}|{hostname}|{trace_id}|{span_id}|{entity_name}" + return f"{message} {nr_linking_str}|" @function_wrapper @@ -65,12 +65,12 @@ def wrap_callHandlers(wrapped, instance, args, kwargs): if settings.application_logging.metrics and settings.application_logging.metrics.enabled: if transaction: transaction.record_custom_metric("Logging/lines", {"count": 1}) - transaction.record_custom_metric("Logging/lines/%s" % level_name, {"count": 1}) + transaction.record_custom_metric(f"Logging/lines/{level_name}", {"count": 1}) else: application = application_instance(activate=False) if application and application.enabled: application.record_custom_metric("Logging/lines", {"count": 1}) - application.record_custom_metric("Logging/lines/%s" % level_name, {"count": 1}) + application.record_custom_metric(f"Logging/lines/{level_name}", {"count": 1}) if settings.application_logging.forwarding and settings.application_logging.forwarding.enabled: try: diff --git a/newrelic/hooks/logger_loguru.py b/newrelic/hooks/logger_loguru.py index 363f3cf4d1..e17107f209 100644 --- a/newrelic/hooks/logger_loguru.py +++ b/newrelic/hooks/logger_loguru.py @@ -34,7 +34,7 @@ def _filter_record_attributes(record): attrs = {k: v for k, v in record.items() if k not in LOGURU_FILTERED_RECORD_ATTRS} extra_attrs = dict(record.get("extra", {})) - attrs.update({"extra.%s" % k: v for k, v in extra_attrs.items()}) + attrs.update({f"extra.{k}": v for k, v in extra_attrs.items()}) return attrs @@ -56,12 +56,12 @@ def _nr_log_forwarder(message_instance): if settings.application_logging.metrics and settings.application_logging.metrics.enabled: if transaction: transaction.record_custom_metric("Logging/lines", {"count": 1}) - transaction.record_custom_metric("Logging/lines/%s" % level_name, {"count": 1}) + transaction.record_custom_metric(f"Logging/lines/{level_name}", {"count": 1}) else: application = application_instance(activate=False) if application and application.enabled: application.record_custom_metric("Logging/lines", {"count": 1}) - application.record_custom_metric("Logging/lines/%s" % level_name, {"count": 1}) + application.record_custom_metric(f"Logging/lines/{level_name}", {"count": 1}) if settings.application_logging.forwarding and settings.application_logging.forwarding.enabled: attrs = _filter_record_attributes(record) @@ -93,7 +93,7 @@ def wrap_log(wrapped, instance, args, kwargs): options[1] += 2 except Exception as e: - _logger.debug("Exception in loguru handling: %s" % str(e)) + _logger.debug(f"Exception in loguru handling: {str(e)}") return wrapped(*args, **kwargs) else: return wrapped(**bound_args) diff --git a/newrelic/hooks/logger_structlog.py b/newrelic/hooks/logger_structlog.py index ceef52723d..f07a85fd58 100644 --- a/newrelic/hooks/logger_structlog.py +++ b/newrelic/hooks/logger_structlog.py @@ -70,12 +70,12 @@ def new_relic_event_consumer(logger, level, event): if settings.application_logging.metrics.enabled: if transaction: transaction.record_custom_metric("Logging/lines", {"count": 1}) - transaction.record_custom_metric("Logging/lines/%s" % level_name, {"count": 1}) + transaction.record_custom_metric(f"Logging/lines/{level_name}", {"count": 1}) else: application = application_instance(activate=False) if application and application.enabled: application.record_custom_metric("Logging/lines", {"count": 1}) - application.record_custom_metric("Logging/lines/%s" % level_name, {"count": 1}) + application.record_custom_metric(f"Logging/lines/{level_name}", {"count": 1}) if settings.application_logging.forwarding.enabled: try: diff --git a/newrelic/hooks/messagebroker_confluentkafka.py b/newrelic/hooks/messagebroker_confluentkafka.py index b7c70a129d..8a4c5d3f8c 100644 --- a/newrelic/hooks/messagebroker_confluentkafka.py +++ b/newrelic/hooks/messagebroker_confluentkafka.py @@ -162,10 +162,10 @@ def wrap_Consumer_poll(wrapped, instance, args, kwargs): # Don't add metrics if there was an inactive transaction. # Name the metrics using the same format as the transaction, but in case the active transaction # was an existing one and not a message transaction, reproduce the naming logic here. - group = "Message/%s/%s" % (library, destination_type) - name = "Named/%s" % destination_name - transaction.record_custom_metric("%s/%s/Received/Bytes" % (group, name), received_bytes) - transaction.record_custom_metric("%s/%s/Received/Messages" % (group, name), message_count) + group = f"Message/{library}/{destination_type}" + name = f"Named/{destination_name}" + transaction.record_custom_metric(f"{group}/{name}/Received/Bytes", received_bytes) + transaction.record_custom_metric(f"{group}/{name}/Received/Messages", message_count) transaction.add_messagebroker_info("Confluent-Kafka", get_package_version("confluent-kafka")) return record @@ -191,8 +191,8 @@ def _wrap_serializer(wrapped, instance, args, kwargs): return wrapped(*args, **kwargs) topic = args[1].topic - group = "%s/Kafka/Topic" % group_prefix - name = "Named/%s/%s" % (topic, serializer_name) + group = f"{group_prefix}/Kafka/Topic" + name = f"Named/{topic}/{serializer_name}" return FunctionTraceWrapper(wrapped, name=name, group=group)(*args, **kwargs) diff --git a/newrelic/hooks/messagebroker_kafkapython.py b/newrelic/hooks/messagebroker_kafkapython.py index dff5e2c786..e556beacf2 100644 --- a/newrelic/hooks/messagebroker_kafkapython.py +++ b/newrelic/hooks/messagebroker_kafkapython.py @@ -148,10 +148,10 @@ def wrap_kafkaconsumer_next(wrapped, instance, args, kwargs): # Don't add metrics if there was an inactive transaction. # Name the metrics using the same format as the transaction, but in case the active transaction # was an existing one and not a message transaction, reproduce the naming logic here. - group = "Message/%s/%s" % (library, destination_type) - name = "Named/%s" % destination_name - transaction.record_custom_metric("%s/%s/Received/Bytes" % (group, name), received_bytes) - transaction.record_custom_metric("%s/%s/Received/Messages" % (group, name), message_count) + group = f"Message/{library}/{destination_type}" + name = f"Named/{destination_name}" + transaction.record_custom_metric(f"{group}/{name}/Received/Bytes", received_bytes) + transaction.record_custom_metric(f"{group}/{name}/Received/Messages", message_count) transaction.add_messagebroker_info("Kafka-Python", get_package_version("kafka-python")) return record @@ -185,8 +185,8 @@ def serialize(self, topic, object): if not current_transaction(): return wrapped(*args, **kwargs) - group = "%s/Kafka/Topic" % self._nr_group_prefix - name = "Named/%s/%s" % (topic, self._nr_serializer_name) + group = f"{self._nr_group_prefix}/Kafka/Topic" + name = f"Named/{topic}/{self._nr_serializer_name}" return FunctionTraceWrapper(wrapped, name=name, group=group)(*args, **kwargs) @@ -209,8 +209,8 @@ def _wrap_serializer(wrapped, instance, args, kwargs): if message_trace: topic = message_trace.destination_name - group = "%s/Kafka/Topic" % group_prefix - name = "Named/%s/%s" % (topic, serializer_name) + group = f"{group_prefix}/Kafka/Topic" + name = f"Named/{topic}/{serializer_name}" return FunctionTraceWrapper(wrapped, name=name, group=group)(*args, **kwargs) diff --git a/newrelic/hooks/messagebroker_pika.py b/newrelic/hooks/messagebroker_pika.py index 624243a855..58cab675e7 100644 --- a/newrelic/hooks/messagebroker_pika.py +++ b/newrelic/hooks/messagebroker_pika.py @@ -413,7 +413,7 @@ def callback_wrapper(wrapped, instance, args, kwargs): source=wrapped, ) as mt: # Improve transaction naming - _new_txn_name = "RabbitMQ/Exchange/%s/%s" % (exchange, name) + _new_txn_name = f"RabbitMQ/Exchange/{exchange}/{name}" mt.set_transaction_name(_new_txn_name, group="Message") # Record that something went horribly wrong diff --git a/newrelic/hooks/middleware_flask_compress.py b/newrelic/hooks/middleware_flask_compress.py index f40016ab84..f1cad224fb 100644 --- a/newrelic/hooks/middleware_flask_compress.py +++ b/newrelic/hooks/middleware_flask_compress.py @@ -35,7 +35,7 @@ def _setting_boolean(value): if value.lower() not in _boolean_states: - raise ValueError("Not a boolean: %s" % value) + raise ValueError(f"Not a boolean: {value}") return _boolean_states[value.lower()] diff --git a/newrelic/hooks/middleware_weberror.py b/newrelic/hooks/middleware_weberror.py index c99f68799e..1b575805f9 100644 --- a/newrelic/hooks/middleware_weberror.py +++ b/newrelic/hooks/middleware_weberror.py @@ -22,7 +22,7 @@ def instrument_weberror_errormiddleware(module): def instrument_weberror_reporter(module): def smtp_url(reporter, *args, **kwargs): - return 'smtp://' + reporter.smtp_server + return f"smtp://{reporter.smtp_server}" wrap_external_trace(module, 'EmailReporter.report', 'weberror', smtp_url) wrap_function_trace(module, 'EmailReporter.report') diff --git a/newrelic/hooks/mlmodel_langchain.py b/newrelic/hooks/mlmodel_langchain.py index 82b8ef16f5..cb7998580b 100644 --- a/newrelic/hooks/mlmodel_langchain.py +++ b/newrelic/hooks/mlmodel_langchain.py @@ -248,7 +248,7 @@ def _record_vector_search_success(transaction, linking_metadata, ft, search_id, page_content = getattr(doc, "page_content") metadata = getattr(doc, "metadata") or {} - metadata_dict = {"metadata.%s" % key: value for key, value in metadata.items()} + metadata_dict = {f"metadata.{key}": value for key, value in metadata.items()} llm_vector_search_result = { "id": str(uuid.uuid4()), @@ -413,7 +413,7 @@ def _record_tool_success( # Update tags and metadata previously obtained from run_args with instance values metadata.update(getattr(instance, "metadata", None) or {}) tags.extend(getattr(instance, "tags", None) or []) - full_tool_event_dict = {"metadata.%s" % key: value for key, value in metadata.items() if key != "nr_tool_id"} + full_tool_event_dict = {f"metadata.{key}": value for key, value in metadata.items() if key != "nr_tool_id"} full_tool_event_dict.update( { "id": tool_id, @@ -433,7 +433,7 @@ def _record_tool_success( result = str(response) except Exception: _logger.debug( - "Failed to convert tool response into a string.\n%s" % traceback.format_exception(*sys.exc_info()) + f"Failed to convert tool response into a string.\n{traceback.format_exception(*sys.exc_info())}" ) if settings.ai_monitoring.record_content.enabled: full_tool_event_dict.update( @@ -462,7 +462,7 @@ def _record_tool_error( tags.extend(getattr(instance, "tags", None) or []) # Make sure the builtin attributes take precedence over metadata attributes. - error_tool_event_dict = {"metadata.%s" % key: value for key, value in metadata.items() if key != "nr_tool_id"} + error_tool_event_dict = {f"metadata.{key}": value for key, value in metadata.items() if key != "nr_tool_id"} error_tool_event_dict.update( { "id": tool_id, @@ -643,7 +643,7 @@ def _create_error_chain_run_events(transaction, instance, run_args, completion_i input_message_list = [_input] # Make sure the builtin attributes take precedence over metadata attributes. - full_chat_completion_summary_dict = {"metadata.%s" % key: value for key, value in metadata.items()} + full_chat_completion_summary_dict = {f"metadata.{key}": value for key, value in metadata.items()} full_chat_completion_summary_dict.update( { "id": completion_id, @@ -709,12 +709,11 @@ def _create_successful_chain_run_events( output_message_list = [str(response)] except Exception as e: _logger.warning( - "Unable to capture response inside langchain chain instrumentation. No response message event will be captured. Report this issue to New Relic Support.\n%s" - % traceback.format_exception(*sys.exc_info()) + f"Unable to capture response inside langchain chain instrumentation. No response message event will be captured. Report this issue to New Relic Support.\n{traceback.format_exception(*sys.exc_info())}" ) # Make sure the builtin attributes take precedence over metadata attributes. - full_chat_completion_summary_dict = {"metadata.%s" % key: value for key, value in metadata.items()} + full_chat_completion_summary_dict = {f"metadata.{key}": value for key, value in metadata.items()} full_chat_completion_summary_dict.update( { "id": completion_id, @@ -858,9 +857,9 @@ def instrument_langchain_vectorstore_similarity_search(module): vector_class = VECTORSTORE_CLASSES.get(module.__name__) if vector_class and hasattr(getattr(module, vector_class, ""), "similarity_search"): - wrap_function_wrapper(module, "%s.similarity_search" % vector_class, wrap_similarity_search) + wrap_function_wrapper(module, f"{vector_class}.similarity_search", wrap_similarity_search) if vector_class and hasattr(getattr(module, vector_class, ""), "asimilarity_search"): - wrap_function_wrapper(module, "%s.asimilarity_search" % vector_class, wrap_asimilarity_search) + wrap_function_wrapper(module, f"{vector_class}.asimilarity_search", wrap_asimilarity_search) def instrument_langchain_core_tools(module): diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index dd308209b3..4479d1e966 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -144,7 +144,7 @@ def create_chat_completion_message_event( # Response ID was set, append message index to it. if response_id: - message_id = "%s-%d" % (response_id, index) + message_id = f"{response_id}-{int(index)}" # No response IDs, use random UUID else: message_id = str(uuid.uuid4()) @@ -184,7 +184,7 @@ def create_chat_completion_message_event( # Response ID was set, append message index to it. if response_id: - message_id = "%s-%d" % (response_id, index) + message_id = f"{response_id}-{int(index)}" # No response IDs, use random UUID else: message_id = str(uuid.uuid4()) diff --git a/newrelic/hooks/mlmodel_sklearn.py b/newrelic/hooks/mlmodel_sklearn.py index bdfeccfc83..5ce3b32d85 100644 --- a/newrelic/hooks/mlmodel_sklearn.py +++ b/newrelic/hooks/mlmodel_sklearn.py @@ -67,7 +67,7 @@ def _nr_wrapper_method(wrapped, instance, args, kwargs): if settings and not settings.machine_learning.enabled: return wrapped(*args, **kwargs) - wrapped_attr_name = "_nr_wrapped_%s" % method + wrapped_attr_name = f"_nr_wrapped_{method}" # If the method has already been wrapped do not wrap it again. This happens # when one class inherits from another and they both implement the method. @@ -99,7 +99,7 @@ def _nr_wrapper_method(wrapped, instance, args, kwargs): return PredictReturnTypeProxy(return_val, model_name=class_, training_step=training_step) return return_val - wrap_function_wrapper(module, "%s.%s" % (class_, method), _nr_wrapper_method) + wrap_function_wrapper(module, f"{class_}.{method}", _nr_wrapper_method) def _calc_prediction_feature_stats(prediction_input, class_, feature_column_names, tags): @@ -145,18 +145,18 @@ def _record_stats(data, column_names, class_, column_type, tags): # to upload them one at a time instead of as a dictionary of stats per # feature column. for index, col_name in enumerate(column_names): - metric_name = "MLModel/Sklearn/Named/%s/Predict/%s/%s" % (class_, column_type, col_name) + metric_name = f"MLModel/Sklearn/Named/{class_}/Predict/{column_type}/{col_name}" transaction.record_dimensional_metrics( [ - ("%s/%s" % (metric_name, "Mean"), float(mean[index]), tags), - ("%s/%s" % (metric_name, "Percentile25"), float(percentile25[index]), tags), - ("%s/%s" % (metric_name, "Percentile50"), float(percentile50[index]), tags), - ("%s/%s" % (metric_name, "Percentile75"), float(percentile75[index]), tags), - ("%s/%s" % (metric_name, "StandardDeviation"), float(standard_deviation[index]), tags), - ("%s/%s" % (metric_name, "Min"), float(_min[index]), tags), - ("%s/%s" % (metric_name, "Max"), float(_max[index]), tags), - ("%s/%s" % (metric_name, "Count"), _count, tags), + (f"{metric_name}/Mean", float(mean[index]), tags), + (f"{metric_name}/Percentile25", float(percentile25[index]), tags), + (f"{metric_name}/Percentile50", float(percentile50[index]), tags), + (f"{metric_name}/Percentile75", float(percentile75[index]), tags), + (f"{metric_name}/StandardDeviation", float(standard_deviation[index]), tags), + (f"{metric_name}/Min", float(_min[index]), tags), + (f"{metric_name}/Max", float(_max[index]), tags), + (f"{metric_name}/Count", _count, tags), ] ) @@ -302,13 +302,13 @@ def create_prediction_event(transaction, class_, instance, args, kwargs, return_ if settings and settings.machine_learning and settings.machine_learning.inference_events_value.enabled: event.update( { - "feature.%s" % str(final_feature_names[feature_col_index]): value + f"feature.{str(final_feature_names[feature_col_index])}": value for feature_col_index, value in enumerate(prediction) } ) event.update( { - "label.%s" % str(label_names_list[index]): str(value) + f"label.{str(label_names_list[index])}": str(value) for index, value in enumerate(labels[prediction_index]) } ) @@ -319,7 +319,7 @@ def _nr_instrument_model(module, model_class): for method_name in METHODS_TO_WRAP: if hasattr(getattr(module, model_class), method_name): # Function/MLModel/Sklearn/Named/. - name = "MLModel/Sklearn/Named/%s.%s" % (model_class, method_name) + name = f"MLModel/Sklearn/Named/{model_class}.{method_name}" _wrap_method_trace(module, model_class, method_name, name=name) @@ -359,10 +359,10 @@ def wrap_metric_scorer(wrapped, instance, args, kwargs): if hasattr(score, "__iter__"): for i, s in enumerate(score): transaction._add_agent_attribute( - "%s/TrainingStep/%s/%s[%s]" % (model_name, training_step, wrapped.__name__, i), s + f"{model_name}/TrainingStep/{training_step}/{wrapped.__name__}[{i}]", s ) else: - transaction._add_agent_attribute("%s/TrainingStep/%s/%s" % (model_name, training_step, wrapped.__name__), score) + transaction._add_agent_attribute(f"{model_name}/TrainingStep/{training_step}/{wrapped.__name__}", score) return score diff --git a/newrelic/network/addresses.py b/newrelic/network/addresses.py index 662fbc555c..156e2c2311 100644 --- a/newrelic/network/addresses.py +++ b/newrelic/network/addresses.py @@ -61,20 +61,20 @@ def proxy_details(proxy_scheme, proxy_host, proxy_port, proxy_user, netloc = proxy_host if proxy_port: - netloc = '%s:%s' % (netloc, proxy_port) + netloc = f'{netloc}:{proxy_port}' if proxy_user: proxy_user = proxy_user or '' proxy_pass = proxy_pass or '' if proxy_pass: - netloc = '%s:%s@%s' % (proxy_user, proxy_pass, netloc) + netloc = f'{proxy_user}:{proxy_pass}@{netloc}' else: - netloc = '%s@%s' % (proxy_user, netloc) + netloc = f'{proxy_user}@{netloc}' if proxy_scheme is None: proxy_scheme = 'http' - proxy = '%s://%s%s' % (proxy_scheme, netloc, path) + proxy = f'{proxy_scheme}://{netloc}{path}' return {'http': proxy, 'https': proxy} diff --git a/newrelic/samplers/data_sampler.py b/newrelic/samplers/data_sampler.py index 696e6c8147..9abb3c5536 100644 --- a/newrelic/samplers/data_sampler.py +++ b/newrelic/samplers/data_sampler.py @@ -93,7 +93,7 @@ def metrics(self): return [] if self.group: - return (('%s/%s' % (self.group, key), value) + return ((f'{self.group}/{key}', value) for key, value in self.instance()) else: return self.instance() diff --git a/newrelic/samplers/gc_data.py b/newrelic/samplers/gc_data.py index 0a5dc1ec92..4aebc704d1 100644 --- a/newrelic/samplers/gc_data.py +++ b/newrelic/samplers/gc_data.py @@ -55,12 +55,12 @@ def record_gc(self, phase, info): self.start_time = time.time() elif phase == "stop": total_time = time.time() - self.start_time - self.gc_time_metrics.record_custom_metric("GC/time/%d/all" % self.pid, total_time) + self.gc_time_metrics.record_custom_metric(f"GC/time/{self.pid}/all", total_time) for gen in range(0, 3): if gen <= current_generation: - self.gc_time_metrics.record_custom_metric("GC/time/%d/%d" % (self.pid, gen), total_time) + self.gc_time_metrics.record_custom_metric(f"GC/time/{self.pid}/{gen}", total_time) else: - self.gc_time_metrics.record_custom_metric("GC/time/%d/%d" % (self.pid, gen), 0) + self.gc_time_metrics.record_custom_metric(f"GC/time/{self.pid}/{gen}", 0) def start(self): if hasattr(gc, "callbacks"): @@ -83,10 +83,10 @@ def __call__(self): # Record object count in total and per generation if hasattr(gc, "get_count"): counts = gc.get_count() - yield ("GC/objects/%d/all" % self.pid, {"count": sum(counts)}) + yield (f"GC/objects/{self.pid}/all", {"count": sum(counts)}) for gen, count in enumerate(counts): yield ( - "GC/objects/%d/generation/%d" % (self.pid, gen), + f"GC/objects/{self.pid}/generation/{gen}", {"count": count}, ) @@ -97,7 +97,7 @@ def __call__(self): highest_types = Counter(object_types).most_common(self.top_object_count_limit) for obj_type, count in highest_types: yield ( - "GC/objects/%d/type/%s" % (self.pid, callable_name(obj_type)), + f"GC/objects/{self.pid}/type/{callable_name(obj_type)}", {"count": count}, ) @@ -111,7 +111,7 @@ def __call__(self): self.previous_stats[(stat_name, "all")] = count change_in_value = count - previous_value yield ( - "GC/%s/%d/all" % (stat_name, self.pid), + f"GC/{stat_name}/{self.pid}/all", {"count": change_in_value}, ) @@ -122,7 +122,7 @@ def __call__(self): change_in_value = stats[stat_name] - previous_value yield ( - "GC/%s/%d/%d" % (stat_name, self.pid, gen), + f"GC/{stat_name}/{self.pid}/{gen}", {"count": change_in_value}, ) diff --git a/newrelic/samplers/memory_usage.py b/newrelic/samplers/memory_usage.py index 48b03e6c53..f1895bdfec 100644 --- a/newrelic/samplers/memory_usage.py +++ b/newrelic/samplers/memory_usage.py @@ -38,5 +38,5 @@ def memory_usage_data_source(): yield ("Memory/Physical/Utilization", memory_utilization) if settings.memory_runtime_pid_metrics.enabled: - yield ("Memory/Physical/%d" % (pid), memory) - yield ("Memory/Physical/Utilization/%d" % (pid), memory_utilization) + yield (f"Memory/Physical/{pid}", memory) + yield (f"Memory/Physical/Utilization/{pid}", memory_utilization) diff --git a/tests/adapter_daphne/test_daphne.py b/tests/adapter_daphne/test_daphne.py index e5f9dd832b..88c916954a 100644 --- a/tests/adapter_daphne/test_daphne.py +++ b/tests/adapter_daphne/test_daphne.py @@ -98,7 +98,7 @@ async def fake_app(*args, **kwargs): server = daphne.server.Server( fake_app, - endpoints=["tcp:%d:interface=127.0.0.1" % port], + endpoints=[f"tcp:{port}:interface=127.0.0.1"], ready_callable=on_ready, signal_handlers=False, verbosity=9, @@ -124,13 +124,13 @@ def test_daphne_200(port, app): @validate_transaction_metrics( callable_name(app), custom_metrics=[ - ("Python/Dispatcher/Daphne/%s" % daphne.__version__, 1), + (f"Python/Dispatcher/Daphne/{daphne.__version__}", 1), ], ) @raise_background_exceptions() @wait_for_background_threads() def response(): - return urlopen("http://localhost:%d" % port, timeout=10) # nosec + return urlopen(f"http://localhost:{port}", timeout=10) # nosec assert response().status == 200 @@ -143,7 +143,7 @@ def test_daphne_500(port, app): @wait_for_background_threads() def _test(): try: - urlopen("http://localhost:%d/exc" % port) # nosec + urlopen(f"http://localhost:{port}/exc") # nosec except HTTPError: pass diff --git a/tests/adapter_gevent/conftest.py b/tests/adapter_gevent/conftest.py index 41abbb7f41..46d4c1ee3e 100644 --- a/tests/adapter_gevent/conftest.py +++ b/tests/adapter_gevent/conftest.py @@ -39,4 +39,4 @@ def target_application(): import _application port = _application.setup_application() - return webtest.TestApp("http://localhost:%d" % port) + return webtest.TestApp(f"http://localhost:{port}") diff --git a/tests/adapter_gunicorn/test_aiohttp_app_factory.py b/tests/adapter_gunicorn/test_aiohttp_app_factory.py index dc16b1231a..55eeee05b2 100644 --- a/tests/adapter_gunicorn/test_aiohttp_app_factory.py +++ b/tests/adapter_gunicorn/test_aiohttp_app_factory.py @@ -36,7 +36,7 @@ def test_aiohttp_app_factory(nr_enabled): # Restart the server if it dies during testing for _ in range(5): PORT = get_open_port() - cmd = [gunicorn, '-b', '127.0.0.1:%d' % PORT, '--worker-class', + cmd = [gunicorn, '-b', f'127.0.0.1:{PORT}', '--worker-class', 'aiohttp.GunicornWebWorker', 'async_app:app_factory'] if nr_enabled: @@ -71,7 +71,7 @@ def test_aiohttp_app_factory(nr_enabled): else: continue - with urlopen('http://127.0.0.1:%d' % PORT) as resp: + with urlopen(f'http://127.0.0.1:{PORT}') as resp: assert resp.getcode() == 200 assert resp.read() == b'PONG' diff --git a/tests/adapter_gunicorn/test_asgi_app.py b/tests/adapter_gunicorn/test_asgi_app.py index 93e3484655..6ce6882477 100644 --- a/tests/adapter_gunicorn/test_asgi_app.py +++ b/tests/adapter_gunicorn/test_asgi_app.py @@ -31,7 +31,7 @@ def test_asgi_app(nr_enabled): gunicorn = os.path.join(os.environ['TOX_ENV_DIR'], 'bin', 'gunicorn') PORT = get_open_port() - cmd = [gunicorn, '-b', '127.0.0.1:%d' % PORT, '--worker-class', + cmd = [gunicorn, '-b', f'127.0.0.1:{PORT}', '--worker-class', 'worker.AsgiWorker', 'asgi_app:Application'] if nr_enabled: @@ -66,7 +66,7 @@ def test_asgi_app(nr_enabled): time.sleep(0.1) else: continue - with urlopen('http://127.0.0.1:%d' % PORT) as resp: + with urlopen(f'http://127.0.0.1:{PORT}') as resp: assert resp.getcode() == 200 assert resp.read() == b'PONG' diff --git a/tests/adapter_gunicorn/test_gaiohttp.py b/tests/adapter_gunicorn/test_gaiohttp.py index 9f205bad94..9b12611229 100644 --- a/tests/adapter_gunicorn/test_gaiohttp.py +++ b/tests/adapter_gunicorn/test_gaiohttp.py @@ -36,7 +36,7 @@ def test_gunicorn_gaiohttp_worker(nr_enabled): # Restart the server if it dies during testing for _ in range(5): PORT = get_open_port() - cmd = [gunicorn, '-b', '127.0.0.1:%d' % PORT, '-k', 'gaiohttp', + cmd = [gunicorn, '-b', f'127.0.0.1:{PORT}', '-k', 'gaiohttp', 'app:application'] if nr_enabled: @@ -69,7 +69,7 @@ def test_gunicorn_gaiohttp_worker(nr_enabled): else: continue - with urlopen('http://127.0.0.1:%d' % PORT) as resp: + with urlopen(f'http://127.0.0.1:{PORT}') as resp: assert resp.getcode() == 200 assert resp.read() == b'PONG' diff --git a/tests/adapter_hypercorn/test_hypercorn.py b/tests/adapter_hypercorn/test_hypercorn.py index 262f7a0317..77f1223373 100644 --- a/tests/adapter_hypercorn/test_hypercorn.py +++ b/tests/adapter_hypercorn/test_hypercorn.py @@ -97,7 +97,7 @@ async def shutdown_trigger(): config = hypercorn.config.Config.from_mapping( { - "bind": ["127.0.0.1:%d" % port], + "bind": [f"127.0.0.1:{port}"], } ) @@ -123,7 +123,7 @@ def wait_for_port(port, retries=10): status = None for _ in range(retries): try: - status = urlopen("http://localhost:%d/ignored" % port, timeout=1).status # nosec + status = urlopen(f"http://localhost:{port}/ignored", timeout=1).status # nosec assert status == 200 return except Exception as e: @@ -131,7 +131,7 @@ def wait_for_port(port, retries=10): time.sleep(1) - raise RuntimeError("Failed to wait for port %d. Got status %s" % (port, status)) + raise RuntimeError(f"Failed to wait for port {port}. Got status {status}") @override_application_settings({"transaction_name.naming_scheme": "framework"}) @@ -141,13 +141,13 @@ def test_hypercorn_200(port, app): @validate_transaction_metrics( callable_name(app), custom_metrics=[ - ("Python/Dispatcher/Hypercorn/%s" % hypercorn_version, 1), + (f"Python/Dispatcher/Hypercorn/{hypercorn_version}", 1), ], ) @raise_background_exceptions() @wait_for_background_threads() def response(): - return urlopen("http://localhost:%d" % port, timeout=10) # nosec + return urlopen(f"http://localhost:{port}", timeout=10) # nosec assert response().status == 200 @@ -160,6 +160,6 @@ def test_hypercorn_500(port, app): @wait_for_background_threads() def _test(): with pytest.raises(HTTPError): - urlopen("http://localhost:%d/exc" % port) # nosec + urlopen(f"http://localhost:{port}/exc") # nosec _test() diff --git a/tests/adapter_uvicorn/test_uvicorn.py b/tests/adapter_uvicorn/test_uvicorn.py index 93d155aa85..6a6718891b 100644 --- a/tests/adapter_uvicorn/test_uvicorn.py +++ b/tests/adapter_uvicorn/test_uvicorn.py @@ -112,7 +112,7 @@ def test_uvicorn_200(port, app): @raise_background_exceptions() @wait_for_background_threads() def response(): - return urlopen("http://localhost:%d" % port) + return urlopen(f"http://localhost:{port}") assert response().status == 200 @@ -125,7 +125,7 @@ def test_uvicorn_500(port, app): @wait_for_background_threads() def _test(): try: - urlopen("http://localhost:%d/exc" % port) + urlopen(f"http://localhost:{port}/exc") except HTTPError: pass diff --git a/tests/adapter_waitress/conftest.py b/tests/adapter_waitress/conftest.py index e9024469d1..680005d3d7 100644 --- a/tests/adapter_waitress/conftest.py +++ b/tests/adapter_waitress/conftest.py @@ -38,4 +38,4 @@ def target_application(): import _application port = _application.setup_application() - return webtest.TestApp("http://localhost:%d" % port) + return webtest.TestApp(f"http://localhost:{port}") diff --git a/tests/adapter_waitress/test_wsgi.py b/tests/adapter_waitress/test_wsgi.py index c9fa427196..ba0b402290 100644 --- a/tests/adapter_waitress/test_wsgi.py +++ b/tests/adapter_waitress/test_wsgi.py @@ -35,7 +35,7 @@ def test_wsgi_application_index(target_application): @validate_transaction_metrics( "_application:sample_application", custom_metrics=[ - ("Python/Dispatcher/Waitress/%s" % WAITRESS_VERSION, 1), + (f"Python/Dispatcher/Waitress/{WAITRESS_VERSION}", 1), ], ) @raise_background_exceptions() @@ -53,7 +53,7 @@ def test_raise_exception_application(target_application): @validate_transaction_metrics( "_application:sample_application", custom_metrics=[ - ("Python/Dispatcher/Waitress/%s" % WAITRESS_VERSION, 1), + (f"Python/Dispatcher/Waitress/{WAITRESS_VERSION}", 1), ], ) @raise_background_exceptions() @@ -71,7 +71,7 @@ def test_raise_exception_response(target_application): @validate_transaction_metrics( "_application:sample_application", custom_metrics=[ - ("Python/Dispatcher/Waitress/%s" % WAITRESS_VERSION, 1), + (f"Python/Dispatcher/Waitress/{WAITRESS_VERSION}", 1), ], ) @raise_background_exceptions() @@ -89,7 +89,7 @@ def test_raise_exception_finalize(target_application): @validate_transaction_metrics( "_application:sample_application", custom_metrics=[ - ("Python/Dispatcher/Waitress/%s" % WAITRESS_VERSION, 1), + (f"Python/Dispatcher/Waitress/{WAITRESS_VERSION}", 1), ], ) @raise_background_exceptions() diff --git a/tests/agent_features/test_asgi_browser.py b/tests/agent_features/test_asgi_browser.py index b91be67cd0..8821cb0bbe 100644 --- a/tests/agent_features/test_asgi_browser.py +++ b/tests/agent_features/test_asgi_browser.py @@ -34,7 +34,7 @@ ) from newrelic.common.encoding_utils import deobfuscate -_runtime_error_name = RuntimeError.__module__ + ":" + RuntimeError.__name__ +_runtime_error_name = f"{RuntimeError.__module__}:{RuntimeError.__name__}" @asgi_application() @@ -75,7 +75,7 @@ def test_header_attributes(): assert settings.error_beacon token = "0123456789ABCDEF" # nosec - headers = {"Cookie": "NRAGENT=tk=%s" % token} + headers = {"Cookie": f"NRAGENT=tk={token}"} response = target_application_manual_rum.get("/", headers=headers) diff --git a/tests/agent_features/test_asgi_distributed_tracing.py b/tests/agent_features/test_asgi_distributed_tracing.py index 13e52c91f3..3f9e7c0c88 100644 --- a/tests/agent_features/test_asgi_distributed_tracing.py +++ b/tests/agent_features/test_asgi_distributed_tracing.py @@ -176,9 +176,9 @@ def _make_dt_tag(pi): del dt_payload["d"]["tr"] # now run the test - transaction_name = "test_dt_metrics_%s" % "_".join(metrics) + transaction_name = f"test_dt_metrics_{'_'.join(metrics)}" _rollup_metrics = [ - ("%s/%s%s" % (x, tag, bt), 1) for x in metrics for bt in ["", "Web" if web_transaction else "Other"] + (f"{x}/{tag}{bt}", 1) for x in metrics for bt in ["", "Web" if web_transaction else "Other"] ] def _make_test_transaction(): diff --git a/tests/agent_features/test_asgi_w3c_trace_context.py b/tests/agent_features/test_asgi_w3c_trace_context.py index 8cec2eb7a1..e090a52815 100644 --- a/tests/agent_features/test_asgi_w3c_trace_context.py +++ b/tests/agent_features/test_asgi_w3c_trace_context.py @@ -78,7 +78,7 @@ async def target_asgi_application(scope, receive, send): INBOUND_TRACESTATE = \ 'rojo=f06a0ba902b7,congo=t61rcWkgMzE' LONG_TRACESTATE = \ - ','.join(["{}@rojo=f06a0ba902b7".format(x) for x in range(32)]) + ','.join([f"{x}@rojo=f06a0ba902b7" for x in range(32)]) INBOUND_UNTRUSTED_NR_TRACESTATE = \ ('2@nr=0-0-1345936-55632452-27jjj2d8890283b4-b28ce285632jjhl9-' '1-1.1273-1569367663277') @@ -155,10 +155,10 @@ def _test(): @pytest.mark.parametrize('inbound_tracestate,expected', ( ('', None), - (INBOUND_NR_TRACESTATE + "," + INBOUND_TRACESTATE, INBOUND_TRACESTATE), + (f"{INBOUND_NR_TRACESTATE},{INBOUND_TRACESTATE}", INBOUND_TRACESTATE), (INBOUND_TRACESTATE, INBOUND_TRACESTATE), - (LONG_TRACESTATE + ',' + INBOUND_NR_TRACESTATE, - ','.join("{}@rojo=f06a0ba902b7".format(x) for x in range(31))), + (f"{LONG_TRACESTATE},{INBOUND_NR_TRACESTATE}", + ','.join(f"{x}@rojo=f06a0ba902b7" for x in range(31))), ), ids=( 'empty_inbound_payload', 'nr_payload', @@ -231,7 +231,7 @@ def _test(): "parentSpanId": "00f067aa0ba902b7", "parent.transportType": "HTTP"}, [("Supportability/TraceContext/TraceParent/Accept/Success", 1)]), - (INBOUND_TRACEPARENT + ' ', { + (f"{INBOUND_TRACEPARENT} ", { "traceId": "0af7651916cd43dd8448eb211c80319c", "parentSpanId": "00f067aa0ba902b7", "parent.transportType": "HTTP"}, @@ -283,16 +283,16 @@ def _test(): (INBOUND_NR_TRACESTATE, {'trustedParentId': '27ddd2d8890283b4'}), ('garbage', {'parentId': '00f067aa0ba902b7'}), - (INBOUND_TRACESTATE + ',' + INBOUND_NR_TRACESTATE, + (f"{INBOUND_TRACESTATE},{INBOUND_NR_TRACESTATE}", {'parentId': '00f067aa0ba902b7', 'trustedParentId': '27ddd2d8890283b4', 'tracingVendors': 'rojo,congo'}), - (INBOUND_TRACESTATE + ',' + INBOUND_UNTRUSTED_NR_TRACESTATE, + (f"{INBOUND_TRACESTATE},{INBOUND_UNTRUSTED_NR_TRACESTATE}", {'parentId': '00f067aa0ba902b7', 'tracingVendors': 'rojo,congo,2@nr'}), - ('rojo=12345,' + 'v' * 257 + '=x', + (f"rojo=12345,{'v' * 257}=x", {'tracingVendors': 'rojo'}), - ('rojo=12345,k=' + 'v' * 257, + (f"rojo=12345,k={'v' * 257}", {'tracingVendors': 'rojo'}), )) @override_application_settings(_override_settings) diff --git a/tests/agent_features/test_attribute.py b/tests/agent_features/test_attribute.py index f1fc35558f..3f4a99d385 100644 --- a/tests/agent_features/test_attribute.py +++ b/tests/agent_features/test_attribute.py @@ -307,7 +307,7 @@ def test_custom_params_value_too_long(): @background_task() def test_custom_param_too_many(): for i in range(129): - result = add_custom_attribute("key-%02d" % i, "value") + result = add_custom_attribute(f"key-{i:02}", "value") if i < 128: assert result else: @@ -317,7 +317,7 @@ def test_custom_param_too_many(): @validate_custom_parameters(_required_custom_params_too_many, _forgone_custom_params_too_many) @background_task() def test_custom_params_too_many(): - item_list = [("key-%02d" % i, "value") for i in range(129)] + item_list = [(f"key-{i:02}", "value") for i in range(129)] result = add_custom_attributes(item_list) assert not result @@ -361,9 +361,9 @@ def test_custom_params_int_too_big(): OK_KEY = "*" * (255 - len("request.parameters.")) -OK_REQUEST_PARAM = "request.parameters." + OK_KEY +OK_REQUEST_PARAM = f"request.parameters.{OK_KEY}" TOO_LONG_KEY = "*" * (256 - len("request.parameters.")) -TOO_LONG_REQUEST_PARAM = "request.parameters." + TOO_LONG_KEY +TOO_LONG_REQUEST_PARAM = f"request.parameters.{TOO_LONG_KEY}" assert len(OK_REQUEST_PARAM) == 255 assert len(TOO_LONG_REQUEST_PARAM) == 256 @@ -375,7 +375,7 @@ def test_custom_params_int_too_big(): @validate_attributes("agent", _required_request_key_ok, _forgone_request_key_ok) def test_capture_request_params_key_ok(): target_application = webtest.TestApp(target_wsgi_application) - response = target_application.get("/?%s=bar" % OK_KEY) + response = target_application.get(f"/?{OK_KEY}=bar") assert response.body == b"Hello World!" @@ -386,7 +386,7 @@ def test_capture_request_params_key_ok(): @validate_attributes("agent", _required_request_key_too_long, _forgone_request_key_too_long) def test_capture_request_params_key_too_long(): target_application = webtest.TestApp(target_wsgi_application) - response = target_application.get("/?%s=bar" % TOO_LONG_KEY) + response = target_application.get(f"/?{TOO_LONG_KEY}=bar") assert response.body == b"Hello World!" @@ -397,7 +397,7 @@ def test_capture_request_params_key_too_long(): @validate_attributes("agent", _required_request_value_too_long, _forgone_request_value_too_long) def test_capture_request_params_value_too_long(): target_application = webtest.TestApp(target_wsgi_application) - response = target_application.get("/?foo=%s" % TOO_LONG) + response = target_application.get(f"/?foo={TOO_LONG}") assert response.body == b"Hello World!" diff --git a/tests/agent_features/test_attributes_in_action.py b/tests/agent_features/test_attributes_in_action.py index e686923b2c..b2a129c174 100644 --- a/tests/agent_features/test_attributes_in_action.py +++ b/tests/agent_features/test_attributes_in_action.py @@ -64,7 +64,7 @@ URL_PARAM = "some_key" URL_PARAM2 = "second_key" -REQUEST_URL = "/?" + URL_PARAM + "=someval&" + URL_PARAM2 + "=anotherval" +REQUEST_URL = f"/?{URL_PARAM}=someval&{URL_PARAM2}=anotherval" REQUEST_HEADERS = [ ("Accept", "*/*"), ("Host", "foobar"), @@ -73,7 +73,7 @@ ("Content-Length", "10"), ] -REQ_PARAMS = ["request.parameters." + URL_PARAM, "request.parameters." + URL_PARAM2] +REQ_PARAMS = [f"request.parameters.{URL_PARAM}", f"request.parameters.{URL_PARAM2}"] DISTRIBUTED_TRACE_ATTRS = [ "traceId", "priority", @@ -368,22 +368,22 @@ def test_browser_include_request_params(normal_application): _override_settings = { "error_collector.attributes.include": ["request.parameters.*"], - "error_collector.attributes.exclude": ["request.parameters." + URL_PARAM2], + "error_collector.attributes.exclude": [f"request.parameters.{URL_PARAM2}"], } _expected_attributes = { - "agent": TRACE_ERROR_AGENT_KEYS + ["request.parameters." + URL_PARAM], + "agent": TRACE_ERROR_AGENT_KEYS + [f"request.parameters.{URL_PARAM}"], "user": ERROR_USER_ATTRS, "intrinsic": ["trip_id"], } _expected_attributes_event = { - "agent": TRACE_ERROR_AGENT_KEYS + ["request.parameters." + URL_PARAM], + "agent": TRACE_ERROR_AGENT_KEYS + [f"request.parameters.{URL_PARAM}"], "user": ERROR_USER_ATTRS, "intrinsic": ERROR_EVENT_INTRINSICS, } -_expected_absent_attributes = {"agent": ["request.parameters." + URL_PARAM2], "user": [], "intrinsic": []} +_expected_absent_attributes = {"agent": [f"request.parameters.{URL_PARAM2}"], "user": [], "intrinsic": []} @validate_error_event_attributes(_expected_attributes_event, _expected_absent_attributes) @@ -395,11 +395,11 @@ def test_error_in_transaction_include_exclude(normal_application): _override_settings = { "transaction_tracer.attributes.include": ["request.parameters.*"], - "transaction_tracer.attributes.exclude": ["request.parameters." + URL_PARAM2], + "transaction_tracer.attributes.exclude": [f"request.parameters.{URL_PARAM2}"], } _expected_attributes = { - "agent": TRACE_ERROR_AGENT_KEYS + ["request.parameters." + URL_PARAM], + "agent": TRACE_ERROR_AGENT_KEYS + [f"request.parameters.{URL_PARAM}"], "user": USER_ATTRS, "intrinsic": ["trip_id"], } @@ -413,16 +413,16 @@ def test_transaction_trace_include_exclude(normal_application): _override_settings = { "transaction_events.attributes.include": ["request.parameters.*"], - "transaction_events.attributes.exclude": ["request.parameters." + URL_PARAM2], + "transaction_events.attributes.exclude": [f"request.parameters.{URL_PARAM2}"], } _expected_attributes = { - "agent": TRANS_EVENT_AGENT_KEYS + ["request.parameters." + URL_PARAM], + "agent": TRANS_EVENT_AGENT_KEYS + [f"request.parameters.{URL_PARAM}"], "user": USER_ATTRS, "intrinsic": TRANS_EVENT_INTRINSICS, } -_expected_absent_attributes = {"agent": ["request.parameters." + URL_PARAM2], "user": [], "intrinsic": []} +_expected_absent_attributes = {"agent": [f"request.parameters.{URL_PARAM2}"], "user": [], "intrinsic": []} @validate_transaction_event_attributes(_expected_attributes, _expected_absent_attributes) @@ -434,16 +434,16 @@ def test_transaction_event_include_exclude(normal_application): _override_settings = { "browser_monitoring.attributes.enabled": True, "browser_monitoring.attributes.include": ["request.parameters.*"], - "browser_monitoring.attributes.exclude": ["request.parameters." + URL_PARAM2], + "browser_monitoring.attributes.exclude": [f"request.parameters.{URL_PARAM2}"], } _expected_attributes = { - "agent": ["request.parameters." + URL_PARAM], + "agent": [f"request.parameters.{URL_PARAM}"], "user": USER_ATTRS, "intrinsic": BROWSER_INTRINSIC_KEYS, } -_expected_absent_attributes = {"agent": ABSENT_BROWSER_KEYS + ["request.parameters." + URL_PARAM2], "user": []} +_expected_absent_attributes = {"agent": ABSENT_BROWSER_KEYS + [f"request.parameters.{URL_PARAM2}"], "user": []} @validate_browser_attributes(_expected_attributes, _expected_absent_attributes) diff --git a/tests/agent_features/test_browser.py b/tests/agent_features/test_browser.py index 29e26fbd8f..5781109931 100644 --- a/tests/agent_features/test_browser.py +++ b/tests/agent_features/test_browser.py @@ -35,7 +35,7 @@ from newrelic.api.wsgi_application import wsgi_application from newrelic.common.encoding_utils import deobfuscate -_runtime_error_name = RuntimeError.__module__ + ":" + RuntimeError.__name__ +_runtime_error_name = f"{RuntimeError.__module__}:{RuntimeError.__name__}" @wsgi_application() @@ -75,7 +75,7 @@ def test_header_attributes(): assert settings.error_beacon token = "0123456789ABCDEF" # nosec - headers = {"Cookie": "NRAGENT=tk=%s" % token} + headers = {"Cookie": f"NRAGENT=tk={token}"} response = target_application_manual_rum.get("/", headers=headers) diff --git a/tests/agent_features/test_cat.py b/tests/agent_features/test_cat.py index d812975996..ae926085a1 100644 --- a/tests/agent_features/test_cat.py +++ b/tests/agent_features/test_cat.py @@ -39,7 +39,7 @@ @wsgi_application() def target_wsgi_application(environ, start_response): status_code = int(environ["PATH_INFO"].strip("/")) - status = "%d STATUS" % status_code + status = f"{status_code} STATUS" if status_code == 304: output = b"" diff --git a/tests/agent_features/test_code_level_metrics.py b/tests/agent_features/test_code_level_metrics.py index 9dce616c95..e0cece6456 100644 --- a/tests/agent_features/test_code_level_metrics.py +++ b/tests/agent_features/test_code_level_metrics.py @@ -41,10 +41,10 @@ is_pypy = hasattr(sys, "pypy_version_info") NAMESPACE = "_test_code_level_metrics" -CLASS_NAMESPACE = ".".join((NAMESPACE, "ExerciseClass")) -CALLABLE_CLASS_NAMESPACE = ".".join((NAMESPACE, "ExerciseClassCallable")) -TYPE_CONSTRUCTOR_NAMESPACE = ".".join((NAMESPACE, "ExerciseTypeConstructor")) -TYPE_CONSTRUCTOR_CALLABLE_NAMESPACE = ".".join((NAMESPACE, "ExerciseTypeConstructorCallable")) +CLASS_NAMESPACE = f"{NAMESPACE}.ExerciseClass" +CALLABLE_CLASS_NAMESPACE = f"{NAMESPACE}.ExerciseClassCallable" +TYPE_CONSTRUCTOR_NAMESPACE = f"{NAMESPACE}.ExerciseTypeConstructor" +TYPE_CONSTRUCTOR_CALLABLE_NAMESPACE = f"{NAMESPACE}.ExerciseTypeConstructorCallable" if FILE_PATH.endswith(".pyc"): FILE_PATH = FILE_PATH[:-1] diff --git a/tests/agent_features/test_coroutine_transaction.py b/tests/agent_features/test_coroutine_transaction.py index 8b602ffc01..a9590b7531 100644 --- a/tests/agent_features/test_coroutine_transaction.py +++ b/tests/agent_features/test_coroutine_transaction.py @@ -209,7 +209,7 @@ def _test_async_coroutine_throw_error(): _test_async_coroutine_throw_error() assert metrics.count((metric, "")) == num_coroutines, metrics - assert metrics.count(("Errors/" + metric, "")) == num_coroutines, metrics + assert metrics.count((f"Errors/{metric}", "")) == num_coroutines, metrics assert metrics.count(("Errors/all", "")) == num_coroutines, metrics diff --git a/tests/agent_features/test_custom_events.py b/tests/agent_features/test_custom_events.py index 1951a291f5..103a305bcc 100644 --- a/tests/agent_features/test_custom_events.py +++ b/tests/agent_features/test_custom_events.py @@ -246,12 +246,12 @@ def test_application_create_custom_event_not_called(): { "content": "A" * 9001, "input": "B" * 9001, - "foo": "b" + "a" * 9000 + "r", + "foo": f"b{'a' * 9000}r", }, { "content": "A" * 9001, "input": "B" * 300, - "foo": "b" + "a" * 299, + "foo": f"b{'a' * 299}", }, ], [ @@ -259,12 +259,12 @@ def test_application_create_custom_event_not_called(): { "content": "A" * 9001, "input": "B" * 9001, - "foo": "b" + "a" * 9000 + "r", + "foo": f"b{'a' * 9000}r", }, { "content": "A" * 300, "input": "B" * 9001, - "foo": "b" + "a" * 299, + "foo": f"b{'a' * 299}", }, ], [ @@ -272,12 +272,12 @@ def test_application_create_custom_event_not_called(): { "content": "A" * 9001, "input": "B" * 9001, - "foo": "b" + "a" * 9000 + "r", + "foo": f"b{'a' * 9000}r", }, { "content": "A" * 300, "input": "B" * 300, - "foo": "b" + "a" * 299, + "foo": f"b{'a' * 299}", }, ], ), diff --git a/tests/agent_features/test_distributed_tracing.py b/tests/agent_features/test_distributed_tracing.py index 263b1bdcf0..370dc3774b 100644 --- a/tests/agent_features/test_distributed_tracing.py +++ b/tests/agent_features/test_distributed_tracing.py @@ -253,9 +253,9 @@ def _make_dt_tag(pi): del dt_payload["d"]["tr"] # now run the test - transaction_name = "test_dt_metrics_%s" % "_".join(metrics) + transaction_name = f"test_dt_metrics_{'_'.join(metrics)}" _rollup_metrics = [ - ("%s/%s%s" % (x, tag, bt), 1) for x in metrics for bt in ["", "Web" if web_transaction else "Other"] + (f"{x}/{tag}{bt}", 1) for x in metrics for bt in ["", "Web" if web_transaction else "Other"] ] def _make_test_transaction(): diff --git a/tests/agent_features/test_error_events.py b/tests/agent_features/test_error_events.py index b9ab99b58e..ad10d3a148 100644 --- a/tests/agent_features/test_error_events.py +++ b/tests/agent_features/test_error_events.py @@ -84,7 +84,7 @@ def test_transaction_error_event_lotsa_attributes(): "err_message": ERR_MESSAGE, "external": "2", "db": "2", - "mod_wsgi.queue_start": ("t=%r" % time.time()), + "mod_wsgi.queue_start": (f"t={time.time()!r}"), "SERVER_PORT": "8888", } response = fully_featured_application.get("/", extra_environ=test_environ) diff --git a/tests/agent_features/test_error_group_callback.py b/tests/agent_features/test_error_group_callback.py index 2fe2fc68c7..c739e26826 100644 --- a/tests/agent_features/test_error_group_callback.py +++ b/tests/agent_features/test_error_group_callback.py @@ -246,10 +246,7 @@ def _test(): app = application() if transaction_decorator is None else None # Only set outside transaction notice_error(application=app, attributes={"notice_error_attribute": 1}) - assert not callback_errors, "Callback inputs failed to validate.\nerror: %s\ndata: %s" % ( - traceback.format_exception(*callback_errors[0]), - str(_data[0]), - ) + assert not callback_errors, f"Callback inputs failed to validate.\nerror: {traceback.format_exception(*callback_errors[0])}\ndata: {str(_data[0])}" if transaction_decorator is not None: _test = transaction_decorator(_test) # Manually decorate test function diff --git a/tests/agent_features/test_high_security_mode.py b/tests/agent_features/test_high_security_mode.py index ae64be2ef9..17b4d97f0a 100644 --- a/tests/agent_features/test_high_security_mode.py +++ b/tests/agent_features/test_high_security_mode.py @@ -523,7 +523,7 @@ class TestException(Exception): pass -_test_exception_name = "%s:%s" % (__name__, TestException.__name__) +_test_exception_name = f"{__name__}:{TestException.__name__}" @override_application_settings(_test_transaction_settings_hsm_disabled) diff --git a/tests/agent_features/test_log_events.py b/tests/agent_features/test_log_events.py index f1bb6cdf4c..9a619d8de4 100644 --- a/tests/agent_features/test_log_events.py +++ b/tests/agent_features/test_log_events.py @@ -45,7 +45,7 @@ def __str__(self): class NonSerializableObject(): def __str__(self): - return "<%s object>" % self.__class__.__name__ + return f"<{self.__class__.__name__} object>" __repr__ = __str__ @@ -118,8 +118,8 @@ def exercise_record_log_event(): "non_serializable_attr": NonSerializableObject(), "non_printable_attr": NonPrintableObject(), "attr_value_too_long": "*" * 256, - "attr_name_too_long_" + ("*" * 237): "value", - "attr_name_with_prefix_too_long_" + ("*" * 220): "value", + f"attr_name_too_long_{'*' * 237}": "value", + f"attr_name_with_prefix_too_long_{'*' * 220}": "value", } _exercise_record_log_event_events = [ @@ -297,9 +297,9 @@ def test(): @pytest.mark.parametrize("include,exclude,attr,expected", _test_record_log_event_context_attribute_filtering_params) def test_record_log_event_context_attribute_filtering_inside_transaction(include, exclude, attr, expected, prefix): if expected: - expected_event = {"required_attrs": [".".join((prefix, attr))]} + expected_event = {"required_attrs": [f"{prefix}.{attr}"]} else: - expected_event = {"forgone_attrs": [".".join((prefix, attr))]} + expected_event = {"forgone_attrs": [f"{prefix}.{attr}"]} @override_application_settings( { @@ -326,9 +326,9 @@ def test(): @reset_core_stats_engine() def test_record_log_event_context_attribute_filtering_outside_transaction(include, exclude, attr, expected, prefix): if expected: - expected_event = {"required_attrs": [".".join((prefix, attr))]} + expected_event = {"required_attrs": [f"{prefix}.{attr}"]} else: - expected_event = {"forgone_attrs": [".".join((prefix, attr))]} + expected_event = {"forgone_attrs": [f"{prefix}.{attr}"]} @override_application_settings( { diff --git a/tests/agent_features/test_logs_in_context.py b/tests/agent_features/test_logs_in_context.py index 931f0fa4e5..a0174da01f 100644 --- a/tests/agent_features/test_logs_in_context.py +++ b/tests/agent_features/test_logs_in_context.py @@ -71,7 +71,7 @@ def __str__(self): class NonSerializableObject(): def __str__(self): - return "<%s object>" % self.__class__.__name__ + return f"<{self.__class__.__name__} object>" __repr__ = __str__ diff --git a/tests/agent_features/test_notice_error.py b/tests/agent_features/test_notice_error.py index c3635c6622..e67d822dff 100644 --- a/tests/agent_features/test_notice_error.py +++ b/tests/agent_features/test_notice_error.py @@ -378,7 +378,7 @@ def test_notice_error_strip_message_not_in_allowlist_outside_transaction(): def _raise_errors(num_errors, application=None): for i in range(num_errors): try: - raise RuntimeError("error" + str(i)) + raise RuntimeError(f"error{str(i)}") except RuntimeError: notice_error(application=application) diff --git a/tests/agent_features/test_serverless_mode.py b/tests/agent_features/test_serverless_mode.py index c7dbf1720e..195c8ac602 100644 --- a/tests/agent_features/test_serverless_mode.py +++ b/tests/agent_features/test_serverless_mode.py @@ -38,7 +38,7 @@ def serverless_application(request): orig = settings.serverless_mode.enabled settings.serverless_mode.enabled = True - application_name = "Python Agent Test (test_serverless_mode:%s)" % (request.node.name) + application_name = f"Python Agent Test (test_serverless_mode:{request.node.name})" application = application_instance(application_name) application.activate() diff --git a/tests/agent_features/test_span_events.py b/tests/agent_features/test_span_events.py index 13e725af94..682415f21b 100644 --- a/tests/agent_features/test_span_events.py +++ b/tests/agent_features/test_span_events.py @@ -156,9 +156,9 @@ def _test(): pytest.param("a" * 2001, "raw", "".join(["a"] * 1997 + ["..."]), id="truncate"), pytest.param("a" * 2000, "raw", "".join(["a"] * 2000), id="no_truncate"), pytest.param( - "select * from %s" % "".join(["?"] * 2000), + f"select * from {''.join(['?'] * 2000)}", "obfuscated", - "select * from %s..." % ("".join(["?"] * (2000 - len("select * from ") - 3))), + f"select * from {''.join(['?'] * (2000 - len('select * from ') - 3))}...", id="truncate_obfuscated", ), pytest.param("select 1", "off", ""), @@ -351,7 +351,7 @@ def _test(): "kwarg_override,attribute_override", ( ({"host": "a" * 256}, {"peer.hostname": "a" * 255, "peer.address": "a" * 255}), - ({"port_path_or_id": "a" * 256, "host": "a"}, {"peer.hostname": "a", "peer.address": "a:" + "a" * 253}), + ({"port_path_or_id": "a" * 256, "host": "a"}, {"peer.hostname": "a", "peer.address": f"a:{'a' * 253}"}), ({"database_name": "a" * 256}, {"db.instance": "a" * 255}), ), ) @@ -577,8 +577,8 @@ def test_span_custom_attribute_limit(): for i in range(128): if i < 64: - span_custom_attrs.append("span_attr%i" % i) - txn_custom_attrs.append("txn_attr%i" % i) + span_custom_attrs.append(f"span_attr{i}") + txn_custom_attrs.append(f"txn_attr{i}") unexpected_txn_attrs.extend(span_custom_attrs) span_custom_attrs.extend(txn_custom_attrs[:64]) @@ -594,9 +594,9 @@ def _test(): transaction = current_transaction() for i in range(128): - transaction.add_custom_parameter("txn_attr%i" % i, "txnValue") + transaction.add_custom_parameter(f"txn_attr{i}", "txnValue") if i < 64: - add_custom_span_attribute("span_attr%i" % i, "spanValue") + add_custom_span_attribute(f"span_attr{i}", "spanValue") _test() diff --git a/tests/agent_features/test_stack_trace.py b/tests/agent_features/test_stack_trace.py index e09c9d2147..adc3d24d6b 100644 --- a/tests/agent_features/test_stack_trace.py +++ b/tests/agent_features/test_stack_trace.py @@ -22,7 +22,7 @@ def _format_stack_trace_from_tuples(frames): result = ['Traceback (most recent call last):'] - result.extend(['File "{0}", line {1}, in {2}'.format(*v) for v in frames]) + result.extend([f'File "{v[0]}", line {v[1]}, in {v[2]}' for v in frames]) return result def function0(): diff --git a/tests/agent_features/test_transaction_event_data_and_some_browser_stuff_too.py b/tests/agent_features/test_transaction_event_data_and_some_browser_stuff_too.py index 55711d6949..08ed69dd71 100644 --- a/tests/agent_features/test_transaction_event_data_and_some_browser_stuff_too.py +++ b/tests/agent_features/test_transaction_event_data_and_some_browser_stuff_too.py @@ -96,7 +96,7 @@ def test_capture_attributes_enabled(): browser_attributes["multibyte-utf8"] = _user_attributes["multibyte-utf8"].decode("latin-1") for attr, value in browser_attributes.items(): - assert user_attrs[attr] == value, "attribute %r expected %r, found %r" % (attr, value, user_attrs[attr]) + assert user_attrs[attr] == value, f"attribute {attr!r} expected {value!r}, found {user_attrs[attr]!r}" _test_no_attributes_recorded_settings = {"browser_monitoring.attributes.enabled": True} diff --git a/tests/agent_features/test_w3c_trace_context.py b/tests/agent_features/test_w3c_trace_context.py index 726cf011aa..b18c188bab 100644 --- a/tests/agent_features/test_w3c_trace_context.py +++ b/tests/agent_features/test_w3c_trace_context.py @@ -60,7 +60,7 @@ def target_wsgi_application(environ, start_response): INBOUND_TRACESTATE = \ 'rojo=f06a0ba902b7,congo=t61rcWkgMzE' LONG_TRACESTATE = \ - ','.join(["{}@rojo=f06a0ba902b7".format(x) for x in range(32)]) + ','.join([f"{x}@rojo=f06a0ba902b7" for x in range(32)]) INBOUND_UNTRUSTED_NR_TRACESTATE = \ ('2@nr=0-0-1345936-55632452-27jjj2d8890283b4-b28ce285632jjhl9-' '1-1.1273-1569367663277') @@ -140,10 +140,10 @@ def _test(): @pytest.mark.parametrize('inbound_tracestate,expected', ( ('', None), - (INBOUND_NR_TRACESTATE + "," + INBOUND_TRACESTATE, INBOUND_TRACESTATE), + (f"{INBOUND_NR_TRACESTATE},{INBOUND_TRACESTATE}", INBOUND_TRACESTATE), (INBOUND_TRACESTATE, INBOUND_TRACESTATE), - (LONG_TRACESTATE + ',' + INBOUND_NR_TRACESTATE, - ','.join("{}@rojo=f06a0ba902b7".format(x) for x in range(31))), + (f"{LONG_TRACESTATE},{INBOUND_NR_TRACESTATE}", + ','.join(f"{x}@rojo=f06a0ba902b7" for x in range(31))), ), ids=( 'empty_inbound_payload', 'nr_payload', @@ -216,7 +216,7 @@ def _test(): "parentSpanId": "00f067aa0ba902b7", "parent.transportType": "HTTP"}, [("Supportability/TraceContext/TraceParent/Accept/Success", 1)]), - (INBOUND_TRACEPARENT + ' ', { + (f"{INBOUND_TRACEPARENT} ", { "traceId": "0af7651916cd43dd8448eb211c80319c", "parentSpanId": "00f067aa0ba902b7", "parent.transportType": "HTTP"}, @@ -267,16 +267,16 @@ def _test(): (INBOUND_NR_TRACESTATE, {'trustedParentId': '27ddd2d8890283b4'}), ('garbage', {'parentId': '00f067aa0ba902b7'}), - (INBOUND_TRACESTATE + ',' + INBOUND_NR_TRACESTATE, + (f"{INBOUND_TRACESTATE},{INBOUND_NR_TRACESTATE}", {'parentId': '00f067aa0ba902b7', 'trustedParentId': '27ddd2d8890283b4', 'tracingVendors': 'rojo,congo'}), - (INBOUND_TRACESTATE + ',' + INBOUND_UNTRUSTED_NR_TRACESTATE, + (f"{INBOUND_TRACESTATE},{INBOUND_UNTRUSTED_NR_TRACESTATE}", {'parentId': '00f067aa0ba902b7', 'tracingVendors': 'rojo,congo,2@nr'}), - ('rojo=12345,' + 'v' * 257 + '=x', + (f"rojo=12345,{'v' * 257}=x", {'tracingVendors': 'rojo'}), - ('rojo=12345,k=' + 'v' * 257, + (f"rojo=12345,k={'v' * 257}", {'tracingVendors': 'rojo'}), )) @override_application_settings(_override_settings) diff --git a/tests/agent_streaming/test_streaming_rpc.py b/tests/agent_streaming/test_streaming_rpc.py index 3ab74086ef..8f9a5e70d7 100644 --- a/tests/agent_streaming/test_streaming_rpc.py +++ b/tests/agent_streaming/test_streaming_rpc.py @@ -54,7 +54,7 @@ def test_correct_settings(mock_grpc_server, compression_setting, gRPC_compressio }, ) def _test(): - endpoint = "localhost:%s" % mock_grpc_server + endpoint = f"localhost:{mock_grpc_server}" stream_buffer = StreamBuffer(1) rpc = StreamingRpc( @@ -74,7 +74,7 @@ def _test(): def test_close_before_connect(mock_grpc_server, batching): - endpoint = "localhost:%s" % mock_grpc_server + endpoint = f"localhost:{mock_grpc_server}" stream_buffer = StreamBuffer(0, batching=batching) rpc = StreamingRpc(endpoint, stream_buffer, DEFAULT_METADATA, record_metric, ssl=False) @@ -89,7 +89,7 @@ def test_close_before_connect(mock_grpc_server, batching): def test_close_while_connected(mock_grpc_server, buffer_empty_event, batching): - endpoint = "localhost:%s" % mock_grpc_server + endpoint = f"localhost:{mock_grpc_server}" stream_buffer = StreamBuffer(1, batching=batching) rpc = StreamingRpc(endpoint, stream_buffer, DEFAULT_METADATA, record_metric, ssl=False) @@ -130,7 +130,7 @@ def condition(*args, **kwargs): user_attributes={}, ) - endpoint = "localhost:%s" % mock_grpc_server + endpoint = f"localhost:{mock_grpc_server}" stream_buffer = StreamBuffer(1, batching=batching) rpc = StreamingRpc(endpoint, stream_buffer, DEFAULT_METADATA, record_metric, ssl=False) @@ -158,7 +158,7 @@ def test_rpc_serialization_and_deserialization( ): """StreamingRPC sends deserializable span to correct endpoint.""" - endpoint = "localhost:%s" % mock_grpc_server + endpoint = f"localhost:{mock_grpc_server}" stream_buffer = StreamBuffer(1, batching=batching) span = Span( diff --git a/tests/agent_unittests/test_agent_connect.py b/tests/agent_unittests/test_agent_connect.py index ac5b7fd3f4..ca257b3ddf 100644 --- a/tests/agent_unittests/test_agent_connect.py +++ b/tests/agent_unittests/test_agent_connect.py @@ -70,9 +70,9 @@ def test_logging_connect_supportability_metrics(feature_setting, subfeature_sett ) @validate_internal_metrics( [ - ("Supportability/Logging/Forwarding/Python/%s" % metric_value, 1), - ("Supportability/Logging/LocalDecorating/Python/%s" % metric_value, 1), - ("Supportability/Logging/Metrics/Python/%s" % metric_value, 1), + (f"Supportability/Logging/Forwarding/Python/{metric_value}", 1), + (f"Supportability/Logging/LocalDecorating/Python/{metric_value}", 1), + (f"Supportability/Logging/Metrics/Python/{metric_value}", 1), ] ) def test(): diff --git a/tests/agent_unittests/test_check_environment.py b/tests/agent_unittests/test_check_environment.py index 7f74f7c702..f34884efae 100644 --- a/tests/agent_unittests/test_check_environment.py +++ b/tests/agent_unittests/test_check_environment.py @@ -30,7 +30,7 @@ def test_check_environment_failing(content): os.makedirs(uwsgi_dir) with open(init_file, 'w') as f: for key, value in content.items(): - f.write("%s = %s" % (key, value)) + f.write(f"{key} = {value}") sys.path.insert(0, temp_dir) import uwsgi diff --git a/tests/agent_unittests/test_environment.py b/tests/agent_unittests/test_environment.py index fd485cf74f..58b6bcd29d 100644 --- a/tests/agent_unittests/test_environment.py +++ b/tests/agent_unittests/test_environment.py @@ -48,7 +48,7 @@ def test_plugin_list(): # Check that bogus plugins don't get reported assert "newrelic.hooks.newrelic" not in plugin_list # Check that plugin that should get reported has version info. - assert "pytest (%s)" % (pytest.__version__) in plugin_list + assert f"pytest ({pytest.__version__})" in plugin_list @override_generic_settings(settings, {"package_reporting.enabled": False}) diff --git a/tests/agent_unittests/test_full_uri_payloads.py b/tests/agent_unittests/test_full_uri_payloads.py index 3ba321e34f..e25a26bd26 100644 --- a/tests/agent_unittests/test_full_uri_payloads.py +++ b/tests/agent_unittests/test_full_uri_payloads.py @@ -27,7 +27,7 @@ class FullUriClient(HttpClient): def send_request( self, method="POST", path="/agent_listener/invoke_raw_method", *args, **kwargs ): - path = "https://" + self._host + path + path = f"https://{self._host}{path}" return super(FullUriClient, self).send_request(method, path, *args, **kwargs) diff --git a/tests/agent_unittests/test_http_client.py b/tests/agent_unittests/test_http_client.py index 253e83233c..9d8d398ba2 100644 --- a/tests/agent_unittests/test_http_client.py +++ b/tests/agent_unittests/test_http_client.py @@ -47,7 +47,7 @@ def echo_full_request(self): self.server.connections.append(self.connection) request_line = str(self.requestline).encode("utf-8") - headers = "\n".join("%s: %s" % (k.lower(), v) for k, v in self.headers.items()) + headers = "\n".join(f"{k.lower()}: {v}" for k, v in self.headers.items()) self.send_response(200) self.end_headers() self.wfile.write(request_line) @@ -195,7 +195,7 @@ def test_http_no_payload(server, method): assert connection.pool is None # Verify request line - assert data[0].startswith(method + " /agent_listener/invoke_raw_method ") + assert data[0].startswith(f"{method} /agent_listener/invoke_raw_method ") # Verify headers user_agent_header = "" @@ -230,7 +230,7 @@ def test_non_ok_response(client_cls, server): assert internal_metrics == { "Supportability/Python/Collector/Failures": [1, 0, 0, 0, 0, 0], "Supportability/Python/Collector/Failures/direct": [1, 0, 0, 0, 0, 0], - "Supportability/Python/Collector/HTTPError/%d" % status: [1, 0, 0, 0, 0, 0], + f"Supportability/Python/Collector/HTTPError/{status}": [1, 0, 0, 0, 0, 0], } else: assert not internal_metrics @@ -419,8 +419,8 @@ def test_ssl_via_ssl_proxy(server, auth): if proxy_user: auth_expected = proxy_user if proxy_pass: - auth_expected = auth_expected + ":" + proxy_pass - auth_expected = "Basic " + base64.b64encode(auth_expected.encode("utf-8")).decode("utf-8") + auth_expected = f"{auth_expected}:{proxy_pass}" + auth_expected = f"Basic {base64.b64encode(auth_expected.encode('utf-8')).decode('utf-8')}" assert proxy_auth == auth_expected else: assert not proxy_auth @@ -487,8 +487,8 @@ def test_ssl_via_non_ssl_proxy(insecure_server, auth): if proxy_user: auth_expected = proxy_user if proxy_pass: - auth_expected = auth_expected + ":" + proxy_pass - auth_expected = "Basic " + base64.b64encode(auth_expected.encode("utf-8")).decode("utf-8") + auth_expected = f"{auth_expected}:{proxy_pass}" + auth_expected = f"Basic {base64.b64encode(auth_expected.encode('utf-8')).decode('utf-8')}" assert insecure_server.httpd.connect_headers["proxy-authorization"] == auth_expected else: assert "proxy-authorization" not in insecure_server.httpd.connect_headers @@ -628,8 +628,8 @@ def test_audit_logging(server, insecure_server, client_cls, proxy_host, exceptio connection = "direct" assert internal_metrics == { "Supportability/Python/Collector/Failures": [1, 0, 0, 0, 0, 0], - "Supportability/Python/Collector/Failures/%s" % connection: [1, 0, 0, 0, 0, 0], - "Supportability/Python/Collector/Exception/%s" % exc: [1, 0, 0, 0, 0, 0], + f"Supportability/Python/Collector/Failures/{connection}": [1, 0, 0, 0, 0, 0], + f"Supportability/Python/Collector/Exception/{exc}": [1, 0, 0, 0, 0, 0], } else: assert not internal_metrics diff --git a/tests/agent_unittests/test_region_aware_settings.py b/tests/agent_unittests/test_region_aware_settings.py index 7b47640497..a1449822bb 100644 --- a/tests/agent_unittests/test_region_aware_settings.py +++ b/tests/agent_unittests/test_region_aware_settings.py @@ -19,24 +19,24 @@ """ NO_REGION_KEY = '66c637a29c3982469a3fe8d1982d002c4a' -INI_FILE_NO_REGION_KEY = """ +INI_FILE_NO_REGION_KEY = f""" [newrelic] -license_key = %s -""" % NO_REGION_KEY +license_key = {NO_REGION_KEY} +""" INI_FILE_NO_REGION_KEY = INI_FILE_NO_REGION_KEY.encode('utf-8') EU01_KEY = 'eu01xx66c637a29c3982469a3fe8d1982d002c4a' -INI_FILE_EU01_KEY = """ +INI_FILE_EU01_KEY = f""" [newrelic] -license_key = %s -""" % EU01_KEY +license_key = {EU01_KEY} +""" INI_FILE_EU01_KEY = INI_FILE_EU01_KEY.encode('utf-8') -INI_FILE_HOST_OVERRIDE = """ +INI_FILE_HOST_OVERRIDE = f""" [newrelic] host = staging-collector.newrelic.com -license_key = %s -""" % EU01_KEY +license_key = {EU01_KEY} +""" INI_FILE_HOST_OVERRIDE = INI_FILE_HOST_OVERRIDE.encode('utf-8') STAGING_HOST = 'staging-collector.newrelic.com' diff --git a/tests/agent_unittests/test_sampler_metrics.py b/tests/agent_unittests/test_sampler_metrics.py index cf5e030dfe..a2f666c22d 100644 --- a/tests/agent_unittests/test_sampler_metrics.py +++ b/tests/agent_unittests/test_sampler_metrics.py @@ -52,26 +52,26 @@ def memory_data_source(): PID = os.getpid() EXPECTED_GC_METRICS = ( - "GC/objects/%d/all" % PID, - "GC/objects/%d/generation/0" % PID, - "GC/objects/%d/generation/1" % PID, - "GC/objects/%d/generation/2" % PID, - "GC/collections/%d/all" % PID, - "GC/collections/%d/0" % PID, - "GC/collections/%d/1" % PID, - "GC/collections/%d/2" % PID, - "GC/collected/%d/all" % PID, - "GC/collected/%d/0" % PID, - "GC/collected/%d/1" % PID, - "GC/collected/%d/2" % PID, - "GC/uncollectable/%d/all" % PID, - "GC/uncollectable/%d/0" % PID, - "GC/uncollectable/%d/1" % PID, - "GC/uncollectable/%d/2" % PID, - "GC/time/%d/all" % PID, - "GC/time/%d/0" % PID, - "GC/time/%d/1" % PID, - "GC/time/%d/2" % PID, + f"GC/objects/{PID}/all", + f"GC/objects/{PID}/generation/0", + f"GC/objects/{PID}/generation/1", + f"GC/objects/{PID}/generation/2", + f"GC/collections/{PID}/all", + f"GC/collections/{PID}/0", + f"GC/collections/{PID}/1", + f"GC/collections/{PID}/2", + f"GC/collected/{PID}/all", + f"GC/collected/{PID}/0", + f"GC/collected/{PID}/1", + f"GC/collected/{PID}/2", + f"GC/uncollectable/{PID}/all", + f"GC/uncollectable/{PID}/0", + f"GC/uncollectable/{PID}/1", + f"GC/uncollectable/{PID}/2", + f"GC/time/{PID}/all", + f"GC/time/{PID}/0", + f"GC/time/{PID}/1", + f"GC/time/{PID}/2", ) @@ -144,8 +144,8 @@ def test_cpu_metrics_collection(cpu_data_source): EXPECTED_MEMORY_METRICS = ( "Memory/Physical", "Memory/Physical/Utilization", - "Memory/Physical/%d" % PID, - "Memory/Physical/Utilization/%d" % PID, + f"Memory/Physical/{PID}", + f"Memory/Physical/Utilization/{PID}", ) diff --git a/tests/application_celery/_target_application.py b/tests/application_celery/_target_application.py index 374d677bc9..d5aa6a7a7e 100644 --- a/tests/application_celery/_target_application.py +++ b/tests/application_celery/_target_application.py @@ -55,5 +55,5 @@ def assert_dt(): # Basic checks for DT delegated to task txn = current_transaction() assert txn, "No transaction active." - assert txn.name == "_target_application.assert_dt", "Transaction name does not match: %s" % txn.name + assert txn.name == "_target_application.assert_dt", f"Transaction name does not match: {txn.name}" return 1 diff --git a/tests/application_gearman/test_gearman.py b/tests/application_gearman/test_gearman.py index 72ffde5219..af9fca1293 100644 --- a/tests/application_gearman/test_gearman.py +++ b/tests/application_gearman/test_gearman.py @@ -33,7 +33,7 @@ GEARMAND_HOST = GEARMAND_SETTINGS["host"] GEARMAND_PORT = GEARMAND_SETTINGS["port"] -GEARMAND_ADDR = "%s:%s" % (GEARMAND_HOST, GEARMAND_PORT) +GEARMAND_ADDR = f"{GEARMAND_HOST}:{GEARMAND_PORT}" class GearmanWorker(gearman.GearmanWorker): diff --git a/tests/component_djangorestframework/test_application.py b/tests/component_djangorestframework/test_application.py index 0adef819da..829914aec2 100644 --- a/tests/component_djangorestframework/test_application.py +++ b/tests/component_djangorestframework/test_application.py @@ -63,16 +63,16 @@ def target_application(): ("Python/WSGI/Application", 1), ("Python/WSGI/Response", 1), ("Python/WSGI/Finalize", 1), - (("Function/django.middleware.common:CommonMiddleware%s" % process_request_method), 1), - (("Function/django.contrib.sessions.middleware:SessionMiddleware%s" % process_request_method), 1), - (("Function/django.contrib.auth.middleware:AuthenticationMiddleware%s" % process_request_method), 1), - (("Function/django.contrib.messages.middleware:MessageMiddleware%s" % process_request_method), 1), - (("Function/%s:%s.resolve" % (url_module_path, url_resolver_cls)), 1), - (("Function/django.middleware.csrf:CsrfViewMiddleware%s" % process_view_method), 1), - (("Function/django.contrib.messages.middleware:MessageMiddleware%s" % process_response_method), 1), - (("Function/django.middleware.csrf:CsrfViewMiddleware%s" % process_response_method), 1), - (("Function/django.contrib.sessions.middleware:SessionMiddleware%s" % process_response_method), 1), - (("Function/django.middleware.common:CommonMiddleware%s" % process_response_method), 1), + (f"Function/django.middleware.common:CommonMiddleware{process_request_method}", 1), + (f"Function/django.contrib.sessions.middleware:SessionMiddleware{process_request_method}", 1), + (f"Function/django.contrib.auth.middleware:AuthenticationMiddleware{process_request_method}", 1), + (f"Function/django.contrib.messages.middleware:MessageMiddleware{process_request_method}", 1), + (f"Function/{url_module_path}:{url_resolver_cls}.resolve", 1), + (f"Function/django.middleware.csrf:CsrfViewMiddleware{process_view_method}", 1), + (f"Function/django.contrib.messages.middleware:MessageMiddleware{process_response_method}", 1), + (f"Function/django.middleware.csrf:CsrfViewMiddleware{process_response_method}", 1), + (f"Function/django.contrib.sessions.middleware:SessionMiddleware{process_response_method}", 1), + (f"Function/django.middleware.common:CommonMiddleware{process_response_method}", 1), ] _test_application_index_scoped_metrics = list(_scoped_metrics) @@ -132,7 +132,7 @@ def test_application_view_handle_error(status, should_record, use_global_exc_han "urls:ViewHandleError.get", scoped_metrics=_test_application_view_handle_error_scoped_metrics ) def _test(): - response = target_application.get("/view_handle_error/%s/%s/" % (status, use_global_exc_handler), status=status) + response = target_application.get(f"/view_handle_error/{status}/{use_global_exc_handler}/", status=status) if use_global_exc_handler: response.mustcontain("exception was handled global") else: @@ -143,7 +143,7 @@ def _test(): _test_api_view_view_name_get = "urls:wrapped_view.get" _test_api_view_scoped_metrics_get = list(_scoped_metrics) -_test_api_view_scoped_metrics_get.append(("Function/%s" % _test_api_view_view_name_get, 1)) +_test_api_view_scoped_metrics_get.append((f"Function/{_test_api_view_view_name_get}", 1)) @validate_transaction_errors(errors=[]) @@ -156,7 +156,7 @@ def test_api_view_get(target_application): _test_api_view_view_name_post = "urls:wrapped_view.http_method_not_allowed" _test_api_view_scoped_metrics_post = list(_scoped_metrics) -_test_api_view_scoped_metrics_post.append(("Function/%s" % _test_api_view_view_name_post, 1)) +_test_api_view_scoped_metrics_post.append((f"Function/{_test_api_view_view_name_post}", 1)) @validate_transaction_errors(errors=["rest_framework.exceptions:MethodNotAllowed"]) diff --git a/tests/component_flask_rest/_test_application.py b/tests/component_flask_rest/_test_application.py index 44003de4cc..cbd23fab2b 100644 --- a/tests/component_flask_rest/_test_application.py +++ b/tests/component_flask_rest/_test_application.py @@ -39,8 +39,7 @@ def get(self, exception, code): elif 'CustomException' in exception: e = CustomException() else: - raise AssertionError('Unexpected exception received: %s' % - exception) + raise AssertionError(f'Unexpected exception received: {exception}') e.code = code raise e diff --git a/tests/component_flask_rest/test_application.py b/tests/component_flask_rest/test_application.py index f2ecfab7bd..58b3db46ea 100644 --- a/tests/component_flask_rest/test_application.py +++ b/tests/component_flask_rest/test_application.py @@ -89,7 +89,7 @@ def test_application_raises(exception, status_code, ignore_status_code, propagat @validate_transaction_metrics("_test_application:exception", scoped_metrics=_test_application_raises_scoped_metrics) def _test(): try: - application.get("/exception/%s/%i" % (exception, status_code), status=status_code, expect_errors=True) + application.get(f"/exception/{exception}/{status_code}", status=status_code, expect_errors=True) except Exception as e: assert propagate_exceptions diff --git a/tests/component_graphqlserver/test_graphql.py b/tests/component_graphqlserver/test_graphql.py index 098f509708..5cb24e2848 100644 --- a/tests/component_graphqlserver/test_graphql.py +++ b/tests/component_graphqlserver/test_graphql.py @@ -78,9 +78,9 @@ def test_basic(target_application): from graphql_server import __version__ as graphql_server_version FRAMEWORK_METRICS = [ - ("Python/Framework/GraphQL/%s" % graphql_version, 1), - ("Python/Framework/GraphQLServer/%s" % graphql_server_version, 1), - ("Python/Framework/%s/%s" % (framework, version), 1), + (f"Python/Framework/GraphQL/{graphql_version}", 1), + (f"Python/Framework/GraphQLServer/{graphql_server_version}", 1), + (f"Python/Framework/{framework}/{version}", 1), ] @validate_transaction_metrics( @@ -101,9 +101,9 @@ def test_query_and_mutation(target_application): from graphql_server import __version__ as graphql_server_version FRAMEWORK_METRICS = [ - ("Python/Framework/GraphQL/%s" % graphql_version, 1), - ("Python/Framework/GraphQLServer/%s" % graphql_server_version, 1), - ("Python/Framework/%s/%s" % (framework, version), 1), + (f"Python/Framework/GraphQL/{graphql_version}", 1), + (f"Python/Framework/GraphQLServer/{graphql_server_version}", 1), + (f"Python/Framework/{framework}/{version}", 1), ] _test_query_scoped_metrics = [ ("GraphQL/resolve/GraphQLServer/storage", 1), @@ -219,8 +219,8 @@ def test_exception_in_middleware(target_application): # Metrics _test_exception_scoped_metrics = [ - ("GraphQL/operation/GraphQLServer/query/MyQuery/%s" % field, 1), - ("GraphQL/resolve/GraphQLServer/%s" % field, 1), + (f"GraphQL/operation/GraphQLServer/query/MyQuery/{field}", 1), + (f"GraphQL/resolve/GraphQLServer/{field}", 1), ] _test_exception_rollup_metrics = [ ("Errors/all", 1), @@ -260,19 +260,19 @@ def _test(): @dt_enabled def test_exception_in_resolver(target_application, field): framework, version, target_application = target_application - query = "query MyQuery { %s }" % field + query = f"query MyQuery {{ {field} }}" txn_name = "framework_graphql._target_schema_sync:resolve_error" # Metrics _test_exception_scoped_metrics = [ - ("GraphQL/operation/GraphQLServer/query/MyQuery/%s" % field, 1), - ("GraphQL/resolve/GraphQLServer/%s" % field, 1), + (f"GraphQL/operation/GraphQLServer/query/MyQuery/{field}", 1), + (f"GraphQL/resolve/GraphQLServer/{field}", 1), ] _test_exception_rollup_metrics = [ ("Errors/all", 1), ("Errors/allWeb", 1), - ("Errors/WebTransaction/GraphQL/%s" % txn_name, 1), + (f"Errors/WebTransaction/GraphQL/{txn_name}", 1), ] + _test_exception_scoped_metrics # Attributes @@ -333,7 +333,7 @@ def test_exception_in_validation(target_application, is_graphql_2, query, exc_cl _test_exception_rollup_metrics = [ ("Errors/all", 1), ("Errors/allWeb", 1), - ("Errors/WebTransaction/GraphQL/%s" % txn_name, 1), + (f"Errors/WebTransaction/GraphQL/{txn_name}", 1), ] + _test_exception_scoped_metrics # Attributes @@ -495,7 +495,7 @@ def test_deepest_unique_path(target_application, query, expected_path): if expected_path == "/error": txn_name = "framework_graphql._target_schema_sync:resolve_error" else: - txn_name = "query/%s" % expected_path + txn_name = f"query/{expected_path}" @validate_transaction_metrics( txn_name, diff --git a/tests/component_tastypie/test_application.py b/tests/component_tastypie/test_application.py index 9515ffc232..9622890131 100644 --- a/tests/component_tastypie/test_application.py +++ b/tests/component_tastypie/test_application.py @@ -103,7 +103,7 @@ def test_not_found(api_version, tastypie_full_debug): ) def _test_not_found(): with TastyPieFullDebugMode(tastypie_full_debug) as debug_status: - test_application.get("/api/%s/simple/NotFound/" % api_version, status=debug_status) + test_application.get(f"/api/{api_version}/simple/NotFound/", status=debug_status) _test_not_found() @@ -121,7 +121,7 @@ def _test_not_found(): ) def test_object_does_not_exist(api_version, tastypie_full_debug): with TastyPieFullDebugMode(tastypie_full_debug): - test_application.get("/api/%s/simple/ObjectDoesNotExist/" % api_version, status=404) + test_application.get(f"/api/{api_version}/simple/ObjectDoesNotExist/", status=404) _test_application_raises_zerodivision_exceptions = ["builtins:ZeroDivisionError"] @@ -144,7 +144,7 @@ def test_raises_zerodivision(api_version, tastypie_full_debug): ) def _test_raises_zerodivision(): with TastyPieFullDebugMode(tastypie_full_debug): - test_application.get("/api/%s/simple/ZeroDivisionError/" % api_version, status=500) + test_application.get(f"/api/{api_version}/simple/ZeroDivisionError/", status=500) _test_raises_zerodivision() @@ -171,7 +171,7 @@ def test_record_404_errors(api_version, tastypie_full_debug): ) def _test_not_found(): with TastyPieFullDebugMode(tastypie_full_debug) as debug_status: - test_application.get("/api/%s/simple/NotFound/" % api_version, status=debug_status) + test_application.get(f"/api/{api_version}/simple/NotFound/", status=debug_status) _test_not_found() @@ -186,4 +186,4 @@ def test_ended_txn_name(api_version, tastypie_full_debug): end_of_transaction() with TastyPieFullDebugMode(tastypie_full_debug) as debug_status: - test_application.get("/api/%s/simple/NotFound/" % api_version, status=debug_status) + test_application.get(f"/api/{api_version}/simple/NotFound/", status=debug_status) diff --git a/tests/cross_agent/test_cat_map.py b/tests/cross_agent/test_cat_map.py index d7ba6ec4f0..b20485c83b 100644 --- a/tests/cross_agent/test_cat_map.py +++ b/tests/cross_agent/test_cat_map.py @@ -200,7 +200,7 @@ def run_cat_test(): "txn": txn_name, "guid": guid, "old_cat": str(old_cat), - "server_url": "http://localhost:%d" % server.port, + "server_url": f"http://localhost:{server.port}", }, ) diff --git a/tests/cross_agent/test_collector_hostname.py b/tests/cross_agent/test_collector_hostname.py index 714959a369..a43e91262c 100644 --- a/tests/cross_agent/test_collector_hostname.py +++ b/tests/cross_agent/test_collector_hostname.py @@ -63,9 +63,9 @@ def _test_collector_hostname( os.environ["NEW_RELIC_LICENSE_KEY"] = env_key if config_file_key: - ini_contents += "\nlicense_key = %s" % config_file_key + ini_contents += f"\nlicense_key = {config_file_key}" if config_override_host: - ini_contents += "\nhost = %s" % config_override_host + ini_contents += f"\nhost = {config_override_host}" import newrelic.config as config import newrelic.core.config as core_config diff --git a/tests/cross_agent/test_distributed_tracing.py b/tests/cross_agent/test_distributed_tracing.py index 060fe8a864..715d22fbef 100644 --- a/tests/cross_agent/test_distributed_tracing.py +++ b/tests/cross_agent/test_distributed_tracing.py @@ -88,7 +88,7 @@ def assert_payload(payload, payload_assertions, major_version, minor_version): # payload['d']['ac'] -> payload['d.ac'] d = payload.pop("d") for key, value in d.items(): - payload["d.%s" % key] = value + payload[f"d.{key}"] = value for expected in payload_assertions.get("expected", []): assert expected in payload diff --git a/tests/cross_agent/test_lambda_event_source.py b/tests/cross_agent/test_lambda_event_source.py index e695a41665..bea041f1a3 100644 --- a/tests/cross_agent/test_lambda_event_source.py +++ b/tests/cross_agent/test_lambda_event_source.py @@ -35,7 +35,7 @@ def _load_tests(): for test in json.loads(fh.read()): test_name = test.pop("name") - test_file = test_name + ".json" + test_file = f"{test_name}.json" path = os.path.join(FIXTURE_DIR, "lambda_event_source", test_file) with open(path, "r") as fh: events[test_name] = json.loads(fh.read()) diff --git a/tests/cross_agent/test_w3c_trace_context.py b/tests/cross_agent/test_w3c_trace_context.py index e897528c5c..b10ec60818 100644 --- a/tests/cross_agent/test_w3c_trace_context.py +++ b/tests/cross_agent/test_w3c_trace_context.py @@ -104,7 +104,7 @@ def validate_outbound_payload(actual, expected, trusted_account_key): traceparent = value.split("-") elif key == "tracestate": vendors = W3CTraceState.decode(value) - nr_entry = vendors.pop(trusted_account_key + "@nr", "") + nr_entry = vendors.pop(f"{trusted_account_key}@nr", "") tracestate = nr_entry.split("-") exact_values = expected.get("exact", {}) expected_attrs = expected.get("expected", []) @@ -240,7 +240,7 @@ def test_trace_context( @override_compute_sampled(force_sampled_true) def _test(): return test_application.get( - "/" + test_name, + f"/{test_name}", headers=inbound_headers, extra_environ=extra_environ, ) diff --git a/tests/datastore_aiomcache/test_aiomcache.py b/tests/datastore_aiomcache/test_aiomcache.py index 15f3f8d49a..9641c6d70e 100644 --- a/tests/datastore_aiomcache/test_aiomcache.py +++ b/tests/datastore_aiomcache/test_aiomcache.py @@ -60,7 +60,7 @@ def test_bt_set_get_delete(loop): set_background_task(True) client = aiomcache.Client(host=MEMCACHED_HOST, port=MEMCACHED_PORT) - key = (MEMCACHED_NAMESPACE + "key").encode() + key = f"{MEMCACHED_NAMESPACE}key".encode() data = "value".encode() loop.run_until_complete(client.set(key, data)) @@ -98,7 +98,7 @@ def test_wt_set_get_delete(loop): set_background_task(False) client = aiomcache.Client(host=MEMCACHED_HOST, port=MEMCACHED_PORT) - key = (MEMCACHED_NAMESPACE + "key").encode() + key = f"{MEMCACHED_NAMESPACE}key".encode() data = "value".encode() loop.run_until_complete(client.set(key, data)) diff --git a/tests/datastore_aioredis/conftest.py b/tests/datastore_aioredis/conftest.py index 57f2b79bb5..895b700deb 100644 --- a/tests/datastore_aioredis/conftest.py +++ b/tests/datastore_aioredis/conftest.py @@ -70,7 +70,7 @@ def client(request, loop): else: if request.param == "Redis": return loop.run_until_complete( - aioredis.create_redis("redis://%s:%d" % (DB_SETTINGS["host"], DB_SETTINGS["port"]), db=0) + aioredis.create_redis(f"redis://{DB_SETTINGS['host']}:{DB_SETTINGS['port']}", db=0) ) elif request.param == "StrictRedis": pytest.skip("StrictRedis not implemented.") @@ -80,4 +80,4 @@ def client(request, loop): @pytest.fixture(scope="session") def key(): - return "AIOREDIS-TEST-" + str(os.getpid()) + return f"AIOREDIS-TEST-{str(os.getpid())}" diff --git a/tests/datastore_aioredis/test_custom_conn_pool.py b/tests/datastore_aioredis/test_custom_conn_pool.py index 415cded094..e976f5c728 100644 --- a/tests/datastore_aioredis/test_custom_conn_pool.py +++ b/tests/datastore_aioredis/test_custom_conn_pool.py @@ -85,7 +85,7 @@ async def execute(self, *args, **kwargs): _host = instance_hostname(DB_SETTINGS["host"]) _port = DB_SETTINGS["port"] -_instance_metric_name = "Datastore/instance/Redis/%s/%s" % (_host, _port) +_instance_metric_name = f"Datastore/instance/Redis/{_host}/{_port}" _enable_rollup_metrics.append((_instance_metric_name, 3)) diff --git a/tests/datastore_aioredis/test_execute_command.py b/tests/datastore_aioredis/test_execute_command.py index b600abea5a..b470f64b5c 100644 --- a/tests/datastore_aioredis/test_execute_command.py +++ b/tests/datastore_aioredis/test_execute_command.py @@ -54,7 +54,7 @@ _host = instance_hostname(DB_SETTINGS["host"]) _port = DB_SETTINGS["port"] -_instance_metric_name = "Datastore/instance/Redis/%s/%s" % (_host, _port) +_instance_metric_name = f"Datastore/instance/Redis/{_host}/{_port}" _enable_rollup_metrics.append((_instance_metric_name, 1)) diff --git a/tests/datastore_aioredis/test_get_and_set.py b/tests/datastore_aioredis/test_get_and_set.py index cbddf6091b..3001b41800 100644 --- a/tests/datastore_aioredis/test_get_and_set.py +++ b/tests/datastore_aioredis/test_get_and_set.py @@ -57,7 +57,7 @@ _host = instance_hostname(DB_SETTINGS["host"]) _port = DB_SETTINGS["port"] -_instance_metric_name = "Datastore/instance/Redis/%s/%s" % (_host, _port) +_instance_metric_name = f"Datastore/instance/Redis/{_host}/{_port}" _enable_rollup_metrics.append((_instance_metric_name, 2)) diff --git a/tests/datastore_aioredis/test_multiple_dbs.py b/tests/datastore_aioredis/test_multiple_dbs.py index d490c1f580..45cb067ce3 100644 --- a/tests/datastore_aioredis/test_multiple_dbs.py +++ b/tests/datastore_aioredis/test_multiple_dbs.py @@ -81,8 +81,8 @@ _host_2 = instance_hostname(redis_instance_2["host"]) _port_2 = redis_instance_2["port"] - instance_metric_name_1 = "Datastore/instance/Redis/%s/%s" % (_host_1, _port_1) - instance_metric_name_2 = "Datastore/instance/Redis/%s/%s" % (_host_2, _port_2) + instance_metric_name_1 = f"Datastore/instance/Redis/{_host_1}/{_port_1}" + instance_metric_name_2 = f"Datastore/instance/Redis/{_host_2}/{_port_2}" _enable_rollup_metrics.extend( [ @@ -125,10 +125,10 @@ def client_set(request, loop): # noqa if request.param == "Redis": return ( loop.run_until_complete( - aioredis.create_redis("redis://%s:%d" % (DB_SETTINGS[0]["host"], DB_SETTINGS[0]["port"]), db=0) + aioredis.create_redis(f"redis://{DB_SETTINGS[0]['host']}:{DB_SETTINGS[0]['port']}", db=0) ), loop.run_until_complete( - aioredis.create_redis("redis://%s:%d" % (DB_SETTINGS[1]["host"], DB_SETTINGS[1]["port"]), db=0) + aioredis.create_redis(f"redis://{DB_SETTINGS[1]['host']}:{DB_SETTINGS[1]['port']}", db=0) ), ) elif request.param == "StrictRedis": @@ -190,7 +190,7 @@ def test_concurrent_calls(client_set, loop): # noqa import asyncio async def exercise_concurrent(): - await asyncio.gather(*(client.set("key-%d" % i, i) for i, client in enumerate(client_set))) - await asyncio.gather(*(client.get("key-%d" % i) for i, client in enumerate(client_set))) + await asyncio.gather(*(client.set(f"key-{i}", i) for i, client in enumerate(client_set))) + await asyncio.gather(*(client.get(f"key-{i}") for i, client in enumerate(client_set))) loop.run_until_complete(exercise_concurrent()) diff --git a/tests/datastore_aioredis/test_span_event.py b/tests/datastore_aioredis/test_span_event.py index 1c9227e54a..7423fb9750 100644 --- a/tests/datastore_aioredis/test_span_event.py +++ b/tests/datastore_aioredis/test_span_event.py @@ -70,7 +70,7 @@ def test_span_events(client, instance_enabled, db_instance_enabled, loop): hostname = instance_hostname(DB_SETTINGS["host"]) exact_agents.update( { - "peer.address": "%s:%s" % (hostname, DB_SETTINGS["port"]), + "peer.address": f"{hostname}:{DB_SETTINGS['port']}", "peer.hostname": hostname, } ) diff --git a/tests/datastore_aioredis/test_uninstrumented_methods.py b/tests/datastore_aioredis/test_uninstrumented_methods.py index 7858709c14..eeb04a996f 100644 --- a/tests/datastore_aioredis/test_uninstrumented_methods.py +++ b/tests/datastore_aioredis/test_uninstrumented_methods.py @@ -91,4 +91,4 @@ def test_uninstrumented_methods(client): is_wrapped = lambda m: hasattr(getattr(client, m), "__wrapped__") uninstrumented = {m for m in methods - IGNORED_METHODS if not is_wrapped(m)} - assert not uninstrumented, "Uninstrumented methods: %s" % sorted(uninstrumented) + assert not uninstrumented, f"Uninstrumented methods: {sorted(uninstrumented)}" diff --git a/tests/datastore_aredis/test_custom_conn_pool.py b/tests/datastore_aredis/test_custom_conn_pool.py index c2594c2dbf..22d6b34f05 100644 --- a/tests/datastore_aredis/test_custom_conn_pool.py +++ b/tests/datastore_aredis/test_custom_conn_pool.py @@ -83,7 +83,7 @@ def release(self, connection): _host = instance_hostname(DB_SETTINGS['host']) _port = DB_SETTINGS['port'] -_instance_metric_name = 'Datastore/instance/Redis/%s/%s' % (_host, _port) +_instance_metric_name = f'Datastore/instance/Redis/{_host}/{_port}' _enable_rollup_metrics.append( (_instance_metric_name, 3) diff --git a/tests/datastore_aredis/test_execute_command.py b/tests/datastore_aredis/test_execute_command.py index c5b0fc3323..e040bc57f6 100644 --- a/tests/datastore_aredis/test_execute_command.py +++ b/tests/datastore_aredis/test_execute_command.py @@ -58,7 +58,7 @@ _host = instance_hostname(DB_SETTINGS['host']) _port = DB_SETTINGS['port'] -_instance_metric_name = 'Datastore/instance/Redis/%s/%s' % (_host, _port) +_instance_metric_name = f'Datastore/instance/Redis/{_host}/{_port}' _enable_rollup_metrics.append( (_instance_metric_name, 1) diff --git a/tests/datastore_aredis/test_get_and_set.py b/tests/datastore_aredis/test_get_and_set.py index 2eeee947bc..d94777cf9c 100644 --- a/tests/datastore_aredis/test_get_and_set.py +++ b/tests/datastore_aredis/test_get_and_set.py @@ -58,7 +58,7 @@ _host = instance_hostname(DB_SETTINGS['host']) _port = DB_SETTINGS['port'] -_instance_metric_name = 'Datastore/instance/Redis/%s/%s' % (_host, _port) +_instance_metric_name = f'Datastore/instance/Redis/{_host}/{_port}' _enable_rollup_metrics.append( (_instance_metric_name, 2) diff --git a/tests/datastore_aredis/test_multiple_dbs.py b/tests/datastore_aredis/test_multiple_dbs.py index cb4cbac5b2..73d6bd8d3a 100644 --- a/tests/datastore_aredis/test_multiple_dbs.py +++ b/tests/datastore_aredis/test_multiple_dbs.py @@ -80,8 +80,8 @@ host_2 = instance_hostname(redis_2["host"]) port_2 = redis_2["port"] - instance_metric_name_1 = "Datastore/instance/Redis/%s/%s" % (host_1, port_1) - instance_metric_name_2 = "Datastore/instance/Redis/%s/%s" % (host_2, port_2) + instance_metric_name_1 = f"Datastore/instance/Redis/{host_1}/{port_1}" + instance_metric_name_2 = f"Datastore/instance/Redis/{host_2}/{port_2}" _enable_rollup_metrics.extend( [ @@ -172,7 +172,7 @@ def test_concurrent_calls(loop): clients = (client_1, client_2) async def exercise_concurrent(): - await asyncio.gather(*(client.set("key-%d" % i, i) for i, client in enumerate(clients))) - await asyncio.gather(*(client.get("key-%d" % i) for i, client in enumerate(clients))) + await asyncio.gather(*(client.set(f"key-{i}", i) for i, client in enumerate(clients))) + await asyncio.gather(*(client.get(f"key-{i}") for i, client in enumerate(clients))) loop.run_until_complete(exercise_concurrent()) diff --git a/tests/datastore_aredis/test_span_event.py b/tests/datastore_aredis/test_span_event.py index 2bd238bdae..db4a8a897b 100644 --- a/tests/datastore_aredis/test_span_event.py +++ b/tests/datastore_aredis/test_span_event.py @@ -79,7 +79,7 @@ def test_span_events(instance_enabled, db_instance_enabled, loop): settings = _enable_instance_settings.copy() hostname = instance_hostname(DB_SETTINGS['host']) exact_agents.update({ - 'peer.address': '%s:%s' % (hostname, DB_SETTINGS['port']), + 'peer.address': f"{hostname}:{DB_SETTINGS['port']}", 'peer.hostname': hostname, }) else: diff --git a/tests/datastore_aredis/test_uninstrumented_methods.py b/tests/datastore_aredis/test_uninstrumented_methods.py index 38901e5c5d..e4b9c90042 100644 --- a/tests/datastore_aredis/test_uninstrumented_methods.py +++ b/tests/datastore_aredis/test_uninstrumented_methods.py @@ -45,4 +45,4 @@ def test_uninstrumented_methods(): is_wrapped = lambda m: hasattr(getattr(strict_redis_client, m), "__wrapped__") uninstrumented = {m for m in methods - IGNORED_METHODS if not is_wrapped(m)} - assert not uninstrumented, "Uninstrumented methods: %s" % sorted(uninstrumented) + assert not uninstrumented, f"Uninstrumented methods: {sorted(uninstrumented)}" diff --git a/tests/datastore_asyncpg/test_multiple_dbs.py b/tests/datastore_asyncpg/test_multiple_dbs.py index 9d7a3de95e..afc6324fe2 100644 --- a/tests/datastore_asyncpg/test_multiple_dbs.py +++ b/tests/datastore_asyncpg/test_multiple_dbs.py @@ -85,8 +85,8 @@ _host_2 = instance_hostname(_postgresql_2["host"]) _port_2 = _postgresql_2["port"] - _instance_metric_name_1 = "Datastore/instance/Postgres/%s/%s" % (_host_1, _port_1) - _instance_metric_name_2 = "Datastore/instance/Postgres/%s/%s" % (_host_2, _port_2) + _instance_metric_name_1 = f"Datastore/instance/Postgres/{_host_1}/{_port_1}" + _instance_metric_name_2 = f"Datastore/instance/Postgres/{_host_2}/{_port_2}" _enable_rollup_metrics.extend( [ diff --git a/tests/datastore_asyncpg/test_query.py b/tests/datastore_asyncpg/test_query.py index 6deb7ca9a8..bccafbdfd5 100644 --- a/tests/datastore_asyncpg/test_query.py +++ b/tests/datastore_asyncpg/test_query.py @@ -38,7 +38,7 @@ if ASYNCPG_VERSION < (0, 11): CONNECT_METRICS = () else: - CONNECT_METRICS = ((PG_PREFIX + "connect", 1),) + CONNECT_METRICS = ((f"{PG_PREFIX}connect", 1),) @pytest.fixture @@ -59,7 +59,7 @@ def conn(event_loop): @validate_transaction_metrics( "test_single", background_task=True, - scoped_metrics=((PG_PREFIX + "select", 1),), + scoped_metrics=((f"{PG_PREFIX}select", 1),), rollup_metrics=(("Datastore/all", 1),), ) @validate_tt_collector_json(datastore_params={"port_path_or_id": str(DB_SETTINGS["port"])}) @@ -75,8 +75,8 @@ def test_single(event_loop, method, conn): "test_prepared_single", background_task=True, scoped_metrics=( - (PG_PREFIX + "prepare", 1), - (PG_PREFIX + "select", 1), + (f"{PG_PREFIX}prepare", 1), + (f"{PG_PREFIX}select", 1), ), rollup_metrics=(("Datastore/all", 2),), ) @@ -91,7 +91,7 @@ def test_prepared_single(event_loop, method, conn): @validate_transaction_metrics( "test_prepare", background_task=True, - scoped_metrics=((PG_PREFIX + "prepare", 1),), + scoped_metrics=((f"{PG_PREFIX}prepare", 1),), rollup_metrics=(("Datastore/all", 1),), ) @background_task(name="test_prepare") @@ -102,9 +102,9 @@ def test_prepare(event_loop, conn): @pytest.fixture def table(event_loop, conn): - table_name = "table_%d" % os.getpid() + table_name = f"table_{os.getpid()}" - event_loop.run_until_complete(conn.execute("""create table %s (a integer, b real, c text)""" % table_name)) + event_loop.run_until_complete(conn.execute(f"""create table {table_name} (a integer, b real, c text)""")) return table_name @@ -114,8 +114,8 @@ def table(event_loop, conn): "test_copy", background_task=True, scoped_metrics=( - (PG_PREFIX + "prepare", 1), - (PG_PREFIX + "copy", 3), + (f"{PG_PREFIX}prepare", 1), + (f"{PG_PREFIX}copy", 3), ), rollup_metrics=(("Datastore/all", 4),), ) @@ -137,8 +137,8 @@ async def amain(): "test_select_many", background_task=True, scoped_metrics=( - (PG_PREFIX + "prepare", 1), - (PG_PREFIX + "select", 1), + (f"{PG_PREFIX}prepare", 1), + (f"{PG_PREFIX}select", 1), ), rollup_metrics=(("Datastore/all", 2),), ) @@ -152,9 +152,9 @@ def test_select_many(event_loop, conn): "test_transaction", background_task=True, scoped_metrics=( - (PG_PREFIX + "begin", 1), - (PG_PREFIX + "select", 1), - (PG_PREFIX + "commit", 1), + (f"{PG_PREFIX}begin", 1), + (f"{PG_PREFIX}select", 1), + (f"{PG_PREFIX}commit", 1), ), rollup_metrics=(("Datastore/all", 3),), ) @@ -172,10 +172,10 @@ async def amain(): "test_cursor", background_task=True, scoped_metrics=( - (PG_PREFIX + "begin", 1), - (PG_PREFIX + "prepare", 2), - (PG_PREFIX + "select", 3), - (PG_PREFIX + "commit", 1), + (f"{PG_PREFIX}begin", 1), + (f"{PG_PREFIX}prepare", 2), + (f"{PG_PREFIX}select", 3), + (f"{PG_PREFIX}commit", 1), ), rollup_metrics=(("Datastore/all", 7),), ) @@ -201,7 +201,7 @@ async def amain(): background_task=True, rollup_metrics=[ ( - "Datastore/instance/Postgres/" + instance_hostname("localhost") + "//.s.PGSQL.THIS_FILE_BETTER_NOT_EXIST", + f"Datastore/instance/Postgres/{instance_hostname('localhost')}//.s.PGSQL.THIS_FILE_BETTER_NOT_EXIST", 1, ) ], @@ -220,7 +220,7 @@ def test_unix_socket_connect(event_loop): @validate_transaction_metrics( "test_pool_acquire", background_task=True, - scoped_metrics=((PG_PREFIX + "connect", 2),), + scoped_metrics=((f"{PG_PREFIX}connect", 2),), ) @background_task(name="test_pool_acquire") def test_pool_acquire(event_loop): diff --git a/tests/datastore_bmemcached/test_memcache.py b/tests/datastore_bmemcached/test_memcache.py index 2f87da113d..cb43d63e29 100644 --- a/tests/datastore_bmemcached/test_memcache.py +++ b/tests/datastore_bmemcached/test_memcache.py @@ -30,7 +30,7 @@ MEMCACHED_HOST = DB_SETTINGS["host"] MEMCACHED_PORT = DB_SETTINGS["port"] MEMCACHED_NAMESPACE = str(os.getpid()) -MEMCACHED_ADDR = "%s:%s" % (MEMCACHED_HOST, MEMCACHED_PORT) +MEMCACHED_ADDR = f"{MEMCACHED_HOST}:{MEMCACHED_PORT}" _test_bt_set_get_delete_scoped_metrics = [ ("Datastore/operation/Memcached/set", 1), @@ -60,7 +60,7 @@ def test_bt_set_get_delete(): set_background_task(True) client = bmemcached.Client([MEMCACHED_ADDR]) - key = MEMCACHED_NAMESPACE + "key" + key = f"{MEMCACHED_NAMESPACE}key" client.set(key, "value") value = client.get(key) @@ -97,7 +97,7 @@ def test_wt_set_get_delete(): set_background_task(False) client = bmemcached.Client([MEMCACHED_ADDR]) - key = MEMCACHED_NAMESPACE + "key" + key = f"{MEMCACHED_NAMESPACE}key" client.set(key, "value") value = client.get(key) diff --git a/tests/datastore_elasticsearch/conftest.py b/tests/datastore_elasticsearch/conftest.py index 4377112e60..e70dde884f 100644 --- a/tests/datastore_elasticsearch/conftest.py +++ b/tests/datastore_elasticsearch/conftest.py @@ -39,7 +39,7 @@ ES_VERSION = tuple([int(n) for n in get_package_version("elasticsearch").split(".")]) ES_SETTINGS = elasticsearch_settings()[0] ES_MULTIPLE_SETTINGS = elasticsearch_settings() -ES_URL = "http://%s:%s" % (ES_SETTINGS["host"], ES_SETTINGS["port"]) +ES_URL = f"http://{ES_SETTINGS['host']}:{ES_SETTINGS['port']}" @pytest.fixture(scope="session") diff --git a/tests/datastore_elasticsearch/test_elasticsearch.py b/tests/datastore_elasticsearch/test_elasticsearch.py index d2c892ea92..294118192a 100644 --- a/tests/datastore_elasticsearch/test_elasticsearch.py +++ b/tests/datastore_elasticsearch/test_elasticsearch.py @@ -138,7 +138,7 @@ def is_importable(module_path): _host = instance_hostname(ES_SETTINGS["host"]) _port = ES_SETTINGS["port"] -_instance_metric_name = "Datastore/instance/Elasticsearch/%s/%s" % (_host, _port) +_instance_metric_name = f"Datastore/instance/Elasticsearch/{_host}/{_port}" _enable_rollup_metrics.append((_instance_metric_name, _all_count)) diff --git a/tests/datastore_elasticsearch/test_instrumented_methods.py b/tests/datastore_elasticsearch/test_instrumented_methods.py index 4ad88c2a58..7c38bcaa8b 100644 --- a/tests/datastore_elasticsearch/test_instrumented_methods.py +++ b/tests/datastore_elasticsearch/test_instrumented_methods.py @@ -71,7 +71,7 @@ def client(client): ], ) def test_method_on_client_datastore_trace_inputs(client, sub_module, method, args, kwargs, expected_index): - expected_operation = "%s.%s" % (sub_module, method) if sub_module else method + expected_operation = f"{sub_module}.{method}" if sub_module else method @validate_datastore_trace_inputs(target=expected_index, operation=expected_operation) @background_task() @@ -93,7 +93,7 @@ def is_wrapped(m): methods = {m for m in dir(_object) if not m[0] == "_"} uninstrumented = {m for m in (methods - ignored_methods) if not is_wrapped(m)} - assert not uninstrumented, "There are uninstrumented methods: %s" % uninstrumented + assert not uninstrumented, f"There are uninstrumented methods: {uninstrumented}" @RUN_IF_V8 diff --git a/tests/datastore_elasticsearch/test_mget.py b/tests/datastore_elasticsearch/test_mget.py index f3f7c09790..5058146fe6 100644 --- a/tests/datastore_elasticsearch/test_mget.py +++ b/tests/datastore_elasticsearch/test_mget.py @@ -68,8 +68,8 @@ host_2 = instance_hostname(es_2["host"]) port_2 = es_2["port"] - instance_metric_name_1 = "Datastore/instance/Elasticsearch/%s/%s" % (host_1, port_1) - instance_metric_name_2 = "Datastore/instance/Elasticsearch/%s/%s" % (host_2, port_2) + instance_metric_name_1 = f"Datastore/instance/Elasticsearch/{host_1}/{port_1}" + instance_metric_name_2 = f"Datastore/instance/Elasticsearch/{host_2}/{port_2}" _enable_rollup_metrics.extend( [ @@ -88,7 +88,7 @@ @pytest.fixture(scope="module") def client(): - urls = ["http://%s:%s" % (db["host"], db["port"]) for db in ES_MULTIPLE_SETTINGS] + urls = [f"http://{db['host']}:{db['port']}" for db in ES_MULTIPLE_SETTINGS] # When selecting a connection from the pool, use the round robin method. # This is actually the default already. Using round robin will ensure that # doing two db calls will mean elastic search is talking to two different diff --git a/tests/datastore_elasticsearch/test_multiple_dbs.py b/tests/datastore_elasticsearch/test_multiple_dbs.py index 71c47b1685..b427c90a12 100644 --- a/tests/datastore_elasticsearch/test_multiple_dbs.py +++ b/tests/datastore_elasticsearch/test_multiple_dbs.py @@ -61,8 +61,8 @@ host_2 = instance_hostname(es_2["host"]) port_2 = es_2["port"] - instance_metric_name_1 = "Datastore/instance/Elasticsearch/%s/%s" % (host_1, port_1) - instance_metric_name_2 = "Datastore/instance/Elasticsearch/%s/%s" % (host_2, port_2) + instance_metric_name_1 = f"Datastore/instance/Elasticsearch/{host_1}/{port_1}" + instance_metric_name_2 = f"Datastore/instance/Elasticsearch/{host_2}/{port_2}" _enable_rollup_metrics.extend( [ @@ -104,7 +104,7 @@ def _exercise_es(es): @background_task() def test_multiple_dbs_enabled(): for db in ES_MULTIPLE_SETTINGS: - es_url = "http://%s:%s" % (db["host"], db["port"]) + es_url = f"http://{db['host']}:{db['port']}" client = Elasticsearch(es_url) _exercise_es(client) @@ -120,6 +120,6 @@ def test_multiple_dbs_enabled(): @background_task() def test_multiple_dbs_disabled(): for db in ES_MULTIPLE_SETTINGS: - es_url = "http://%s:%s" % (db["host"], db["port"]) + es_url = f"http://{db['host']}:{db['port']}" client = Elasticsearch(es_url) _exercise_es(client) diff --git a/tests/datastore_firestore/conftest.py b/tests/datastore_firestore/conftest.py index ca54a08383..6fd1550753 100644 --- a/tests/datastore_firestore/conftest.py +++ b/tests/datastore_firestore/conftest.py @@ -62,7 +62,7 @@ def instance_info(): @pytest.fixture(scope="session") def client(): - os.environ["FIRESTORE_EMULATOR_HOST"] = "%s:%d" % (FIRESTORE_HOST, FIRESTORE_PORT) + os.environ["FIRESTORE_EMULATOR_HOST"] = f"{FIRESTORE_HOST}:{FIRESTORE_PORT}" client = Client() # Ensure connection is available client.collection("healthcheck").document("healthcheck").set({}, retry=None, timeout=5) @@ -71,14 +71,14 @@ def client(): @pytest.fixture(scope="function") def collection(client): - collection_ = client.collection("firestore_collection_" + str(uuid.uuid4())) + collection_ = client.collection(f"firestore_collection_{str(uuid.uuid4())}") yield collection_ client.recursive_delete(collection_) @pytest.fixture(scope="session") def async_client(loop): - os.environ["FIRESTORE_EMULATOR_HOST"] = "%s:%d" % (FIRESTORE_HOST, FIRESTORE_PORT) + os.environ["FIRESTORE_EMULATOR_HOST"] = f"{FIRESTORE_HOST}:{FIRESTORE_PORT}" client = AsyncClient() loop.run_until_complete( client.collection("healthcheck").document("healthcheck").set({}, retry=None, timeout=5) diff --git a/tests/datastore_firestore/test_async_batching.py b/tests/datastore_firestore/test_async_batching.py index 39e532a041..5e6fbd3c7d 100644 --- a/tests/datastore_firestore/test_async_batching.py +++ b/tests/datastore_firestore/test_async_batching.py @@ -47,7 +47,7 @@ def test_firestore_async_write_batch(loop, exercise_async_write_batch, instance_ _test_rollup_metrics = [ ("Datastore/all", 1), ("Datastore/allOther", 1), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 1), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 1), ] @validate_database_duration() diff --git a/tests/datastore_firestore/test_async_client.py b/tests/datastore_firestore/test_async_client.py index 1c7518bf03..236d9c2161 100644 --- a/tests/datastore_firestore/test_async_client.py +++ b/tests/datastore_firestore/test_async_client.py @@ -52,7 +52,7 @@ def test_firestore_async_client(loop, exercise_async_client, instance_info): _test_rollup_metrics = [ ("Datastore/all", 2), ("Datastore/allOther", 2), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 2), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 2), ] @validate_database_duration() diff --git a/tests/datastore_firestore/test_async_collections.py b/tests/datastore_firestore/test_async_collections.py index 214ee2939c..c1658d18b3 100644 --- a/tests/datastore_firestore/test_async_collections.py +++ b/tests/datastore_firestore/test_async_collections.py @@ -45,10 +45,10 @@ async def _exercise_async_collections(): def test_firestore_async_collections(loop, exercise_async_collections, async_collection, instance_info): _test_scoped_metrics = [ - ("Datastore/statement/Firestore/%s/stream" % async_collection.id, 1), - ("Datastore/statement/Firestore/%s/get" % async_collection.id, 1), - ("Datastore/statement/Firestore/%s/list_documents" % async_collection.id, 1), - ("Datastore/statement/Firestore/%s/add" % async_collection.id, 2), + (f"Datastore/statement/Firestore/{async_collection.id}/stream", 1), + (f"Datastore/statement/Firestore/{async_collection.id}/get", 1), + (f"Datastore/statement/Firestore/{async_collection.id}/list_documents", 1), + (f"Datastore/statement/Firestore/{async_collection.id}/add", 2), ] _test_rollup_metrics = [ @@ -58,7 +58,7 @@ def test_firestore_async_collections(loop, exercise_async_collections, async_col ("Datastore/operation/Firestore/list_documents", 1), ("Datastore/all", 5), ("Datastore/allOther", 5), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 5), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 5), ] @validate_database_duration() diff --git a/tests/datastore_firestore/test_async_documents.py b/tests/datastore_firestore/test_async_documents.py index c906932085..2a0d5e9b81 100644 --- a/tests/datastore_firestore/test_async_documents.py +++ b/tests/datastore_firestore/test_async_documents.py @@ -67,7 +67,7 @@ def test_firestore_async_documents(loop, exercise_async_documents, instance_info ("Datastore/operation/Firestore/delete", 1), ("Datastore/all", 7), ("Datastore/allOther", 7), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 7), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 7), ] @validate_database_duration() diff --git a/tests/datastore_firestore/test_async_query.py b/tests/datastore_firestore/test_async_query.py index 1bc579b7f8..4d0267e90c 100644 --- a/tests/datastore_firestore/test_async_query.py +++ b/tests/datastore_firestore/test_async_query.py @@ -53,8 +53,8 @@ async def _exercise_async_query(): def test_firestore_async_query(loop, exercise_async_query, async_collection, instance_info): _test_scoped_metrics = [ - ("Datastore/statement/Firestore/%s/stream" % async_collection.id, 1), - ("Datastore/statement/Firestore/%s/get" % async_collection.id, 1), + (f"Datastore/statement/Firestore/{async_collection.id}/stream", 1), + (f"Datastore/statement/Firestore/{async_collection.id}/get", 1), ] _test_rollup_metrics = [ @@ -62,7 +62,7 @@ def test_firestore_async_query(loop, exercise_async_query, async_collection, ins ("Datastore/operation/Firestore/stream", 1), ("Datastore/all", 2), ("Datastore/allOther", 2), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 2), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 2), ] # @validate_database_duration() @@ -109,8 +109,8 @@ async def _exercise_async_aggregation_query(): def test_firestore_async_aggregation_query(loop, exercise_async_aggregation_query, async_collection, instance_info): _test_scoped_metrics = [ - ("Datastore/statement/Firestore/%s/stream" % async_collection.id, 1), - ("Datastore/statement/Firestore/%s/get" % async_collection.id, 1), + (f"Datastore/statement/Firestore/{async_collection.id}/stream", 1), + (f"Datastore/statement/Firestore/{async_collection.id}/get", 1), ] _test_rollup_metrics = [ @@ -118,7 +118,7 @@ def test_firestore_async_aggregation_query(loop, exercise_async_aggregation_quer ("Datastore/operation/Firestore/stream", 1), ("Datastore/all", 2), ("Datastore/allOther", 2), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 2), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 2), ] @validate_database_duration() @@ -202,9 +202,9 @@ def test_firestore_async_collection_group( loop, exercise_async_collection_group, async_collection, patch_partition_queries, instance_info ): _test_scoped_metrics = [ - ("Datastore/statement/Firestore/%s/get" % async_collection.id, 3), - ("Datastore/statement/Firestore/%s/stream" % async_collection.id, 1), - ("Datastore/statement/Firestore/%s/get_partitions" % async_collection.id, 1), + (f"Datastore/statement/Firestore/{async_collection.id}/get", 3), + (f"Datastore/statement/Firestore/{async_collection.id}/stream", 1), + (f"Datastore/statement/Firestore/{async_collection.id}/get_partitions", 1), ] _test_rollup_metrics = [ @@ -213,7 +213,7 @@ def test_firestore_async_collection_group( ("Datastore/operation/Firestore/get_partitions", 1), ("Datastore/all", 5), ("Datastore/allOther", 5), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 5), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 5), ] @validate_database_duration() diff --git a/tests/datastore_firestore/test_async_transaction.py b/tests/datastore_firestore/test_async_transaction.py index 2b8646ec5b..37a5cc76bd 100644 --- a/tests/datastore_firestore/test_async_transaction.py +++ b/tests/datastore_firestore/test_async_transaction.py @@ -29,7 +29,7 @@ @pytest.fixture(autouse=True) def sample_data(collection): for x in range(1, 4): - collection.add({"x": x}, "doc%d" % x) + collection.add({"x": x}, f"doc{x}") @pytest.fixture() @@ -56,7 +56,7 @@ async def _exercise(async_transaction): with pytest.raises( TypeError ): # get_all is currently broken. It attempts to await an async_generator instead of consuming it. - all_docs = async_transaction.get_all([async_collection.document("doc%d" % x) for x in range(1, 4)]) + all_docs = async_transaction.get_all([async_collection.document(f"doc{x}") for x in range(1, 4)]) assert len([_ async for _ in all_docs]) == 3 # set and delete methods @@ -92,8 +92,8 @@ def test_firestore_async_transaction_commit(loop, exercise_async_transaction_com _test_scoped_metrics = [ ("Datastore/operation/Firestore/commit", 1), # ("Datastore/operation/Firestore/get_all", 2), - # ("Datastore/statement/Firestore/%s/stream" % async_collection.id, 1), - ("Datastore/statement/Firestore/%s/list_documents" % async_collection.id, 1), + # (f"Datastore/statement/Firestore/{async_collection.id}/stream", 1), + (f"Datastore/statement/Firestore/{async_collection.id}/list_documents", 1), ] _test_rollup_metrics = [ @@ -101,7 +101,7 @@ def test_firestore_async_transaction_commit(loop, exercise_async_transaction_com ("Datastore/operation/Firestore/list_documents", 1), ("Datastore/all", 2), # Should be 5 if not for broken APIs ("Datastore/allOther", 2), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 2), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 2), ] @validate_database_duration() @@ -123,14 +123,14 @@ def test_firestore_async_transaction_rollback( ): _test_scoped_metrics = [ ("Datastore/operation/Firestore/rollback", 1), - ("Datastore/statement/Firestore/%s/list_documents" % async_collection.id, 1), + (f"Datastore/statement/Firestore/{async_collection.id}/list_documents", 1), ] _test_rollup_metrics = [ ("Datastore/operation/Firestore/list_documents", 1), ("Datastore/all", 2), ("Datastore/allOther", 2), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 2), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 2), ] @validate_database_duration() diff --git a/tests/datastore_firestore/test_batching.py b/tests/datastore_firestore/test_batching.py index 07964338c0..67b1b28a0b 100644 --- a/tests/datastore_firestore/test_batching.py +++ b/tests/datastore_firestore/test_batching.py @@ -49,7 +49,7 @@ def test_firestore_write_batch(exercise_write_batch, instance_info): _test_rollup_metrics = [ ("Datastore/all", 1), ("Datastore/allOther", 1), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 1), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 1), ] @validate_database_duration() @@ -101,7 +101,7 @@ def test_firestore_bulk_write_batch(exercise_bulk_write_batch, instance_info): _test_rollup_metrics = [ ("Datastore/all", 1), ("Datastore/allOther", 1), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 1), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 1), ] @validate_database_duration() diff --git a/tests/datastore_firestore/test_client.py b/tests/datastore_firestore/test_client.py index 81fbd181c7..3e00d4d335 100644 --- a/tests/datastore_firestore/test_client.py +++ b/tests/datastore_firestore/test_client.py @@ -51,7 +51,7 @@ def test_firestore_client(exercise_client, instance_info): _test_rollup_metrics = [ ("Datastore/all", 2), ("Datastore/allOther", 2), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 2), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 2), ] @validate_database_duration() diff --git a/tests/datastore_firestore/test_collections.py b/tests/datastore_firestore/test_collections.py index 2e58bbe950..8597cdd5e9 100644 --- a/tests/datastore_firestore/test_collections.py +++ b/tests/datastore_firestore/test_collections.py @@ -45,10 +45,10 @@ def _exercise_collections(): def test_firestore_collections(exercise_collections, collection, instance_info): _test_scoped_metrics = [ - ("Datastore/statement/Firestore/%s/stream" % collection.id, 1), - ("Datastore/statement/Firestore/%s/get" % collection.id, 1), - ("Datastore/statement/Firestore/%s/list_documents" % collection.id, 1), - ("Datastore/statement/Firestore/%s/add" % collection.id, 2), + (f"Datastore/statement/Firestore/{collection.id}/stream", 1), + (f"Datastore/statement/Firestore/{collection.id}/get", 1), + (f"Datastore/statement/Firestore/{collection.id}/list_documents", 1), + (f"Datastore/statement/Firestore/{collection.id}/add", 2), ] _test_rollup_metrics = [ @@ -58,7 +58,7 @@ def test_firestore_collections(exercise_collections, collection, instance_info): ("Datastore/operation/Firestore/list_documents", 1), ("Datastore/all", 5), ("Datastore/allOther", 5), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 5), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 5), ] @validate_database_duration() diff --git a/tests/datastore_firestore/test_documents.py b/tests/datastore_firestore/test_documents.py index ae6b94edd8..11a737cbc8 100644 --- a/tests/datastore_firestore/test_documents.py +++ b/tests/datastore_firestore/test_documents.py @@ -67,7 +67,7 @@ def test_firestore_documents(exercise_documents, instance_info): ("Datastore/operation/Firestore/delete", 1), ("Datastore/all", 7), ("Datastore/allOther", 7), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 7), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 7), ] @validate_database_duration() diff --git a/tests/datastore_firestore/test_query.py b/tests/datastore_firestore/test_query.py index 6f1643c5b9..6ec576e4a5 100644 --- a/tests/datastore_firestore/test_query.py +++ b/tests/datastore_firestore/test_query.py @@ -51,8 +51,8 @@ def _exercise_query(): def test_firestore_query(exercise_query, collection, instance_info): _test_scoped_metrics = [ - ("Datastore/statement/Firestore/%s/stream" % collection.id, 1), - ("Datastore/statement/Firestore/%s/get" % collection.id, 1), + (f"Datastore/statement/Firestore/{collection.id}/stream", 1), + (f"Datastore/statement/Firestore/{collection.id}/get", 1), ] _test_rollup_metrics = [ @@ -60,7 +60,7 @@ def test_firestore_query(exercise_query, collection, instance_info): ("Datastore/operation/Firestore/stream", 1), ("Datastore/all", 2), ("Datastore/allOther", 2), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 2), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 2), ] @validate_database_duration() @@ -107,8 +107,8 @@ def _exercise_aggregation_query(): def test_firestore_aggregation_query(exercise_aggregation_query, collection, instance_info): _test_scoped_metrics = [ - ("Datastore/statement/Firestore/%s/stream" % collection.id, 1), - ("Datastore/statement/Firestore/%s/get" % collection.id, 1), + (f"Datastore/statement/Firestore/{collection.id}/stream", 1), + (f"Datastore/statement/Firestore/{collection.id}/get", 1), ] _test_rollup_metrics = [ @@ -116,7 +116,7 @@ def test_firestore_aggregation_query(exercise_aggregation_query, collection, ins ("Datastore/operation/Firestore/stream", 1), ("Datastore/all", 2), ("Datastore/allOther", 2), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 2), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 2), ] @validate_database_duration() @@ -193,9 +193,9 @@ def _exercise_collection_group(): def test_firestore_collection_group(exercise_collection_group, client, collection, instance_info): _test_scoped_metrics = [ - ("Datastore/statement/Firestore/%s/get" % collection.id, 3), - ("Datastore/statement/Firestore/%s/stream" % collection.id, 1), - ("Datastore/statement/Firestore/%s/get_partitions" % collection.id, 1), + (f"Datastore/statement/Firestore/{collection.id}/get", 3), + (f"Datastore/statement/Firestore/{collection.id}/stream", 1), + (f"Datastore/statement/Firestore/{collection.id}/get_partitions", 1), ] _test_rollup_metrics = [ @@ -204,7 +204,7 @@ def test_firestore_collection_group(exercise_collection_group, client, collectio ("Datastore/operation/Firestore/get_partitions", 1), ("Datastore/all", 5), ("Datastore/allOther", 5), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 5), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 5), ] @validate_database_duration() diff --git a/tests/datastore_firestore/test_transaction.py b/tests/datastore_firestore/test_transaction.py index 59d496a00a..495c61e767 100644 --- a/tests/datastore_firestore/test_transaction.py +++ b/tests/datastore_firestore/test_transaction.py @@ -28,7 +28,7 @@ @pytest.fixture(autouse=True) def sample_data(collection): for x in range(1, 4): - collection.add({"x": x}, "doc%d" % x) + collection.add({"x": x}, f"doc{x}") @pytest.fixture() @@ -46,7 +46,7 @@ def _exercise(transaction): assert len([_ for _ in transaction.get(query)]) == 1 # get_all on a list of DocumentReferences - all_docs = transaction.get_all([collection.document("doc%d" % x) for x in range(1, 4)]) + all_docs = transaction.get_all([collection.document(f"doc{x}") for x in range(1, 4)]) assert len([_ for _ in all_docs]) == 3 # set and delete methods @@ -82,8 +82,8 @@ def test_firestore_transaction_commit(exercise_transaction_commit, collection, i _test_scoped_metrics = [ ("Datastore/operation/Firestore/commit", 1), ("Datastore/operation/Firestore/get_all", 2), - ("Datastore/statement/Firestore/%s/stream" % collection.id, 1), - ("Datastore/statement/Firestore/%s/list_documents" % collection.id, 1), + (f"Datastore/statement/Firestore/{collection.id}/stream", 1), + (f"Datastore/statement/Firestore/{collection.id}/list_documents", 1), ] _test_rollup_metrics = [ @@ -91,7 +91,7 @@ def test_firestore_transaction_commit(exercise_transaction_commit, collection, i ("Datastore/operation/Firestore/list_documents", 1), ("Datastore/all", 5), ("Datastore/allOther", 5), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 5), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 5), ] @validate_database_duration() @@ -111,14 +111,14 @@ def _test(): def test_firestore_transaction_rollback(exercise_transaction_rollback, collection, instance_info): _test_scoped_metrics = [ ("Datastore/operation/Firestore/rollback", 1), - ("Datastore/statement/Firestore/%s/list_documents" % collection.id, 1), + (f"Datastore/statement/Firestore/{collection.id}/list_documents", 1), ] _test_rollup_metrics = [ ("Datastore/operation/Firestore/list_documents", 1), ("Datastore/all", 2), ("Datastore/allOther", 2), - ("Datastore/instance/Firestore/%s/%s" % (instance_info["host"], instance_info["port_path_or_id"]), 2), + (f"Datastore/instance/Firestore/{instance_info['host']}/{instance_info['port_path_or_id']}", 2), ] @validate_database_duration() diff --git a/tests/datastore_memcache/conftest.py b/tests/datastore_memcache/conftest.py index 7812cea264..e63f9c0274 100644 --- a/tests/datastore_memcache/conftest.py +++ b/tests/datastore_memcache/conftest.py @@ -43,7 +43,7 @@ def memcached_multi(): """Generate keys that will go onto different servers""" DB_SETTINGS = memcached_settings() - db_servers = ["%s:%s" % (s["host"], s["port"]) for s in DB_SETTINGS] + db_servers = [f"{s['host']}:{s['port']}" for s in DB_SETTINGS] clients = [memcache.Client([s]) for s in db_servers] client_all = memcache.Client(db_servers) diff --git a/tests/datastore_memcache/test_memcache.py b/tests/datastore_memcache/test_memcache.py index a66c114eef..dba37d6071 100644 --- a/tests/datastore_memcache/test_memcache.py +++ b/tests/datastore_memcache/test_memcache.py @@ -23,7 +23,7 @@ from newrelic.common.object_wrapper import wrap_function_wrapper DB_SETTINGS = memcached_settings()[0] -MEMCACHED_ADDR = '%s:%s' % (DB_SETTINGS['host'], DB_SETTINGS['port']) +MEMCACHED_ADDR = f"{DB_SETTINGS['host']}:{DB_SETTINGS['port']}" # Settings @@ -59,7 +59,7 @@ _host = instance_hostname(DB_SETTINGS['host']) _port = DB_SETTINGS['port'] -_instance_metric_name = 'Datastore/instance/Memcached/%s/%s' % (_host, _port) +_instance_metric_name = f'Datastore/instance/Memcached/{_host}/{_port}' _enable_rollup_metrics.append( (_instance_metric_name, 3) @@ -72,7 +72,7 @@ # Query def _exercise_db(client): - key = DB_SETTINGS['namespace'] + 'key' + key = f"{DB_SETTINGS['namespace']}key" client.set(key, 'value') value = client.get(key) client.delete(key) diff --git a/tests/datastore_memcache/test_multiple_dbs.py b/tests/datastore_memcache/test_multiple_dbs.py index dbc3ea2b3b..c2b656775a 100644 --- a/tests/datastore_memcache/test_multiple_dbs.py +++ b/tests/datastore_memcache/test_multiple_dbs.py @@ -65,10 +65,8 @@ host_2 = instance_hostname(memcached_2['host']) port_2 = memcached_2['port'] - instance_metric_name_1 = 'Datastore/instance/Memcached/%s/%s' % (host_1, - port_1) - instance_metric_name_2 = 'Datastore/instance/Memcached/%s/%s' % (host_2, - port_2) + instance_metric_name_1 = f'Datastore/instance/Memcached/{host_1}/{port_1}' + instance_metric_name_2 = f'Datastore/instance/Memcached/{host_2}/{port_2}' _enable_rollup_metrics.extend([ (instance_metric_name_1, None), @@ -89,7 +87,7 @@ def exercise_memcached(client, multi_dict): @pytest.mark.skipif(len(DB_MULTIPLE_SETTINGS) < 2, reason='Test environment not configured with multiple databases.') @override_application_settings(_enable_instance_settings) -@validate_transaction_metrics(transaction_metric_prefix+'_enabled', +@validate_transaction_metrics(f"{transaction_metric_prefix}_enabled", scoped_metrics=_enable_scoped_metrics, rollup_metrics=_enable_rollup_metrics, background_task=True) @@ -98,7 +96,7 @@ def test_multiple_datastores_enabled(memcached_multi): memcached1 = DB_MULTIPLE_SETTINGS[0] memcached2 = DB_MULTIPLE_SETTINGS[1] settings = [memcached1, memcached2] - servers = ["%s:%s" % (x['host'], x['port']) for x in settings] + servers = [f"{x['host']}:{x['port']}" for x in settings] client = memcache.Client(servers=servers) @@ -107,7 +105,7 @@ def test_multiple_datastores_enabled(memcached_multi): @pytest.mark.skipif(len(DB_MULTIPLE_SETTINGS) < 2, reason='Test environment not configured with multiple databases.') @override_application_settings(_disable_instance_settings) -@validate_transaction_metrics(transaction_metric_prefix+'_disabled', +@validate_transaction_metrics(f"{transaction_metric_prefix}_disabled", scoped_metrics=_disable_scoped_metrics, rollup_metrics=_disable_rollup_metrics, background_task=True) @@ -116,7 +114,7 @@ def test_multiple_datastores_disabled(memcached_multi): memcached1 = DB_MULTIPLE_SETTINGS[0] memcached2 = DB_MULTIPLE_SETTINGS[1] settings = [memcached1, memcached2] - servers = ["%s:%s" % (x['host'], x['port']) for x in settings] + servers = [f"{x['host']}:{x['port']}" for x in settings] client = memcache.Client(servers=servers) diff --git a/tests/datastore_memcache/test_span_event.py b/tests/datastore_memcache/test_span_event.py index a8da4d0e56..cea8a06e14 100644 --- a/tests/datastore_memcache/test_span_event.py +++ b/tests/datastore_memcache/test_span_event.py @@ -25,7 +25,7 @@ from newrelic.api.background_task import background_task DB_SETTINGS = memcached_settings()[0] -MEMCACHED_ADDR = '%s:%s' % (DB_SETTINGS['host'], DB_SETTINGS['port']) +MEMCACHED_ADDR = f"{DB_SETTINGS['host']}:{DB_SETTINGS['port']}" # Settings @@ -44,7 +44,7 @@ # Query def _exercise_db(client): - key = DB_SETTINGS['namespace'] + 'key' + key = f"{DB_SETTINGS['namespace']}key" client.set(key, 'value') value = client.get(key) client.delete(key) @@ -73,7 +73,7 @@ def test_span_events(instance_enabled): settings = _enable_instance_settings hostname = instance_hostname(DB_SETTINGS['host']) exact_agents.update({ - 'peer.address': '%s:%s' % (hostname, DB_SETTINGS['port']), + 'peer.address': f"{hostname}:{DB_SETTINGS['port']}", 'peer.hostname': hostname, }) else: diff --git a/tests/datastore_mysql/conftest.py b/tests/datastore_mysql/conftest.py index bc241617d4..fa2b0df727 100644 --- a/tests/datastore_mysql/conftest.py +++ b/tests/datastore_mysql/conftest.py @@ -39,4 +39,4 @@ @pytest.fixture(scope="session") def table_name(): - return str("datastore_mysql_%d" % os.getpid()) + return str(f"datastore_mysql_{os.getpid()}") diff --git a/tests/datastore_mysql/test_database.py b/tests/datastore_mysql/test_database.py index d14e11a41f..d0cbfbdaaa 100644 --- a/tests/datastore_mysql/test_database.py +++ b/tests/datastore_mysql/test_database.py @@ -28,7 +28,7 @@ DB_SETTINGS = mysql_settings() DB_SETTINGS = DB_SETTINGS[0] DB_NAMESPACE = DB_SETTINGS["namespace"] -DB_PROCEDURE = "hello_" + DB_NAMESPACE +DB_PROCEDURE = f"hello_{DB_NAMESPACE}" mysql_version = get_package_version_tuple("mysql.connector") @@ -39,13 +39,13 @@ _test_execute_via_cursor_scoped_metrics = [ (_connector_metric_name, 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/select" % DB_NAMESPACE, 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/insert" % DB_NAMESPACE, 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/update" % DB_NAMESPACE, 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/delete" % DB_NAMESPACE, 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/select", 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/insert", 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/update", 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/delete", 1), ("Datastore/operation/MySQL/drop", 2), ("Datastore/operation/MySQL/create", 2), - ("Datastore/statement/MySQL/%s/call" % DB_PROCEDURE, 1), + (f"Datastore/statement/MySQL/{DB_PROCEDURE}/call", 1), ("Datastore/operation/MySQL/commit", 2), ("Datastore/operation/MySQL/rollback", 1), ] @@ -56,20 +56,20 @@ ("Datastore/MySQL/all", 13), ("Datastore/MySQL/allOther", 13), ("Datastore/operation/MySQL/select", 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/select" % DB_NAMESPACE, 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/select", 1), ("Datastore/operation/MySQL/insert", 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/insert" % DB_NAMESPACE, 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/insert", 1), ("Datastore/operation/MySQL/update", 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/update" % DB_NAMESPACE, 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/update", 1), ("Datastore/operation/MySQL/delete", 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/delete" % DB_NAMESPACE, 1), - ("Datastore/statement/MySQL/%s/call" % DB_PROCEDURE, 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/delete", 1), + (f"Datastore/statement/MySQL/{DB_PROCEDURE}/call", 1), ("Datastore/operation/MySQL/call", 1), ("Datastore/operation/MySQL/drop", 2), ("Datastore/operation/MySQL/create", 2), ("Datastore/operation/MySQL/commit", 2), ("Datastore/operation/MySQL/rollback", 1), - ("Datastore/instance/MySQL/%s/%s" % (instance_hostname(DB_SETTINGS["host"]), DB_SETTINGS["port"]), 12), + (f"Datastore/instance/MySQL/{instance_hostname(DB_SETTINGS['host'])}/{DB_SETTINGS['port']}", 12), ] @@ -99,37 +99,36 @@ def test_execute_via_cursor(table_name): cursor = connection.cursor() - cursor.execute("""drop table if exists `%s`""" % table_name) + cursor.execute(f"""drop table if exists `{table_name}`""") - cursor.execute("""create table %s """ """(a integer, b real, c text)""" % table_name) + cursor.execute(f"""create table {table_name} (a integer, b real, c text)""") cursor.executemany( - """insert into `%s` """ % table_name + """values (%(a)s, %(b)s, %(c)s)""", + f"insert into `{table_name}` values (%(a)s, %(b)s, %(c)s)", [{"a": 1, "b": 1.0, "c": "1.0"}, {"a": 2, "b": 2.2, "c": "2.2"}, {"a": 3, "b": 3.3, "c": "3.3"}], ) - cursor.execute("""select * from %s""" % table_name) + cursor.execute(f"""select * from {table_name}""") for row in cursor: pass cursor.execute( - """update `%s` """ % table_name + """set a=%(a)s, b=%(b)s, c=%(c)s where a=%(old_a)s""", + f"update `{table_name}` set a=%(a)s, b=%(b)s, c=%(c)s where a=%(old_a)s", {"a": 4, "b": 4.0, "c": "4.0", "old_a": 1}, ) - cursor.execute("""delete from `%s` where a=2""" % table_name) + cursor.execute(f"""delete from `{table_name}` where a=2""") - cursor.execute("""drop procedure if exists %s""" % DB_PROCEDURE) + cursor.execute(f"""drop procedure if exists {DB_PROCEDURE}""") cursor.execute( - """CREATE PROCEDURE %s() + f"""CREATE PROCEDURE {DB_PROCEDURE}() BEGIN SELECT 'Hello World!'; END""" - % DB_PROCEDURE ) - cursor.callproc("%s" % DB_PROCEDURE) + cursor.callproc(f"{DB_PROCEDURE}") connection.commit() connection.rollback() @@ -138,13 +137,13 @@ def test_execute_via_cursor(table_name): _test_connect_using_alias_scoped_metrics = [ (_connector_metric_name, 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/select" % DB_NAMESPACE, 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/insert" % DB_NAMESPACE, 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/update" % DB_NAMESPACE, 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/delete" % DB_NAMESPACE, 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/select", 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/insert", 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/update", 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/delete", 1), ("Datastore/operation/MySQL/drop", 2), ("Datastore/operation/MySQL/create", 2), - ("Datastore/statement/MySQL/%s/call" % DB_PROCEDURE, 1), + (f"Datastore/statement/MySQL/{DB_PROCEDURE}/call", 1), ("Datastore/operation/MySQL/commit", 2), ("Datastore/operation/MySQL/rollback", 1), ] @@ -155,20 +154,20 @@ def test_execute_via_cursor(table_name): ("Datastore/MySQL/all", 13), ("Datastore/MySQL/allOther", 13), ("Datastore/operation/MySQL/select", 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/select" % DB_NAMESPACE, 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/select", 1), ("Datastore/operation/MySQL/insert", 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/insert" % DB_NAMESPACE, 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/insert", 1), ("Datastore/operation/MySQL/update", 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/update" % DB_NAMESPACE, 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/update", 1), ("Datastore/operation/MySQL/delete", 1), - ("Datastore/statement/MySQL/datastore_mysql_%s/delete" % DB_NAMESPACE, 1), - ("Datastore/statement/MySQL/%s/call" % DB_PROCEDURE, 1), + (f"Datastore/statement/MySQL/datastore_mysql_{DB_NAMESPACE}/delete", 1), + (f"Datastore/statement/MySQL/{DB_PROCEDURE}/call", 1), ("Datastore/operation/MySQL/call", 1), ("Datastore/operation/MySQL/drop", 2), ("Datastore/operation/MySQL/create", 2), ("Datastore/operation/MySQL/commit", 2), ("Datastore/operation/MySQL/rollback", 1), - ("Datastore/instance/MySQL/%s/%s" % (instance_hostname(DB_SETTINGS["host"]), DB_SETTINGS["port"]), 12), + (f"Datastore/instance/MySQL/{instance_hostname(DB_SETTINGS['host'])}/{DB_SETTINGS['port']}", 12), ] @@ -192,37 +191,36 @@ def test_connect_using_alias(table_name): cursor = connection.cursor() - cursor.execute("""drop table if exists `%s`""" % table_name) + cursor.execute(f"""drop table if exists `{table_name}`""") - cursor.execute("""create table %s """ """(a integer, b real, c text)""" % table_name) + cursor.execute(f"""create table {table_name} (a integer, b real, c text)""") cursor.executemany( - """insert into `%s` """ % table_name + """values (%(a)s, %(b)s, %(c)s)""", + f"insert into `{table_name}` values (%(a)s, %(b)s, %(c)s)", [{"a": 1, "b": 1.0, "c": "1.0"}, {"a": 2, "b": 2.2, "c": "2.2"}, {"a": 3, "b": 3.3, "c": "3.3"}], ) - cursor.execute("""select * from %s""" % table_name) + cursor.execute(f"""select * from {table_name}""") for row in cursor: pass cursor.execute( - """update `%s` """ % table_name + """set a=%(a)s, b=%(b)s, c=%(c)s where a=%(old_a)s""", + f"update `{table_name}` set a=%(a)s, b=%(b)s, c=%(c)s where a=%(old_a)s", {"a": 4, "b": 4.0, "c": "4.0", "old_a": 1}, ) - cursor.execute("""delete from `%s` where a=2""" % table_name) + cursor.execute(f"""delete from `{table_name}` where a=2""") - cursor.execute("""drop procedure if exists %s""" % DB_PROCEDURE) + cursor.execute(f"""drop procedure if exists {DB_PROCEDURE}""") cursor.execute( - """CREATE PROCEDURE %s() + f"""CREATE PROCEDURE {DB_PROCEDURE}() BEGIN SELECT 'Hello World!'; END""" - % DB_PROCEDURE ) - cursor.callproc("%s" % DB_PROCEDURE) + cursor.callproc(f"{DB_PROCEDURE}") connection.commit() connection.rollback() diff --git a/tests/datastore_postgresql/test_database.py b/tests/datastore_postgresql/test_database.py index cf432d1742..c7c9ba6dc3 100644 --- a/tests/datastore_postgresql/test_database.py +++ b/tests/datastore_postgresql/test_database.py @@ -30,10 +30,10 @@ ("Function/postgresql.driver.dbapi20:connect", 1), ("Function/postgresql.driver.dbapi20:Connection.__enter__", 1), ("Function/postgresql.driver.dbapi20:Connection.__exit__", 1), - ("Datastore/statement/Postgres/%s/select" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/insert" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/update" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/delete" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/update", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/delete", 1), ("Datastore/statement/Postgres/now/call", 1), ("Datastore/statement/Postgres/pg_sleep/call", 1), ("Datastore/operation/Postgres/drop", 1), @@ -49,13 +49,13 @@ ("Datastore/Postgres/all", 14), ("Datastore/Postgres/allOther", 14), ("Datastore/operation/Postgres/select", 1), - ("Datastore/statement/Postgres/%s/select" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), ("Datastore/operation/Postgres/insert", 1), - ("Datastore/statement/Postgres/%s/insert" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 1), ("Datastore/operation/Postgres/update", 1), - ("Datastore/statement/Postgres/%s/update" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/update", 1), ("Datastore/operation/Postgres/delete", 1), - ("Datastore/statement/Postgres/%s/delete" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/delete", 1), ("Datastore/operation/Postgres/drop", 1), ("Datastore/operation/Postgres/create", 1), ("Datastore/statement/Postgres/now/call", 1), @@ -64,7 +64,7 @@ ("Datastore/operation/Postgres/commit", 3), ("Datastore/operation/Postgres/rollback", 1), ("Datastore/operation/Postgres/other", 1), - ("Datastore/instance/Postgres/%s/%s" % (instance_hostname(DB_SETTINGS["host"]), DB_SETTINGS["port"]), 13), + (f"Datastore/instance/Postgres/{instance_hostname(DB_SETTINGS['host'])}/{DB_SETTINGS['port']}", 13), ("Function/postgresql.driver.dbapi20:connect", 1), ("Function/postgresql.driver.dbapi20:Connection.__enter__", 1), ("Function/postgresql.driver.dbapi20:Connection.__exit__", 1), @@ -89,29 +89,27 @@ def test_execute_via_cursor(): ) as connection: cursor = connection.cursor() - cursor.execute("""drop table if exists %s""" % DB_SETTINGS["table_name"]) + cursor.execute(f"""drop table if exists {DB_SETTINGS['table_name']}""") - cursor.execute("""create table %s """ % DB_SETTINGS["table_name"] + """(a integer, b real, c text)""") + cursor.execute(f"create table {DB_SETTINGS['table_name']} (a integer, b real, c text)") cursor.executemany( - """insert into %s """ % DB_SETTINGS["table_name"] + """values (%s, %s, %s)""", + f"insert into {DB_SETTINGS['table_name']} values (%s, %s, %s)", [(1, 1.0, "1.0"), (2, 2.2, "2.2"), (3, 3.3, "3.3")], ) - cursor.execute("""select * from %s""" % DB_SETTINGS["table_name"]) + cursor.execute(f"""select * from {DB_SETTINGS['table_name']}""") cursor.execute( - """with temporaryTable (averageValue) as (select avg(b) from %s) """ % DB_SETTINGS["table_name"] - + """select * from %s,temporaryTable """ % DB_SETTINGS["table_name"] - + """where %s.b > temporaryTable.averageValue""" % DB_SETTINGS["table_name"] + f"with temporaryTable (averageValue) as (select avg(b) from {DB_SETTINGS['table_name']}) select * from {DB_SETTINGS['table_name']},temporaryTable where {DB_SETTINGS['table_name']}.b > temporaryTable.averageValue" ) cursor.execute( - """update %s """ % DB_SETTINGS["table_name"] + """set a=%s, b=%s, c=%s where a=%s""", + f"update {DB_SETTINGS['table_name']} set a=%s, b=%s, c=%s where a=%s", (4, 4.0, "4.0", 1), ) - cursor.execute("""delete from %s where a=2""" % DB_SETTINGS["table_name"]) + cursor.execute(f"""delete from {DB_SETTINGS['table_name']} where a=2""") connection.commit() diff --git a/tests/datastore_psycopg/test_connection.py b/tests/datastore_psycopg/test_connection.py index f0d9d00263..c554e30acf 100644 --- a/tests/datastore_psycopg/test_connection.py +++ b/tests/datastore_psycopg/test_connection.py @@ -47,11 +47,11 @@ ("Datastore/operation/Postgres/create", 2), ("Datastore/operation/Postgres/drop", 1), ("Datastore/operation/Postgres/rollback", 1), - ("Datastore/statement/Postgres/%s/call" % DB_SETTINGS["procedure_name"], 1), - ("Datastore/statement/Postgres/%s/delete" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/insert" % DB_SETTINGS["table_name"], 3), - ("Datastore/statement/Postgres/%s/select" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/update" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['procedure_name']}/call", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/delete", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 3), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/update", 1), ) _base_rollup_metrics = ( @@ -68,11 +68,11 @@ ("Datastore/operation/Postgres/rollback", 1), ("Datastore/operation/Postgres/select", 1), ("Datastore/operation/Postgres/update", 1), - ("Datastore/statement/Postgres/%s/call" % DB_SETTINGS["procedure_name"], 1), - ("Datastore/statement/Postgres/%s/delete" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/insert" % DB_SETTINGS["table_name"], 3), - ("Datastore/statement/Postgres/%s/select" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/update" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['procedure_name']}/call", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/delete", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 3), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/update", 1), ) _disable_scoped_metrics = list(_base_scoped_metrics) @@ -84,7 +84,7 @@ _host = instance_hostname(DB_SETTINGS["host"]) _port = DB_SETTINGS["port"] -_instance_metric_name = "Datastore/instance/Postgres/%s/%s" % (_host, _port) +_instance_metric_name = f"Datastore/instance/Postgres/{_host}/{_port}" _enable_rollup_metrics.append((_instance_metric_name, 13)) @@ -93,17 +93,17 @@ # Query async def _execute(connection, row_type, wrapper): - sql = "drop table if exists %s" % DB_SETTINGS["table_name"] + sql = f"drop table if exists {DB_SETTINGS['table_name']}" await maybe_await(connection.execute(wrapper(sql))) - sql = "create table %s (a integer, b real, c text)" % DB_SETTINGS["table_name"] + sql = f"create table {DB_SETTINGS['table_name']} (a integer, b real, c text)" await maybe_await(connection.execute(wrapper(sql))) for params in [(1, 1.0, "1.0"), (2, 2.2, "2.2"), (3, 3.3, "3.3")]: - sql = "insert into %s " % DB_SETTINGS["table_name"] + "values (%s, %s, %s)" + sql = f"insert into {DB_SETTINGS['table_name']} values (%s, %s, %s)" await maybe_await(connection.execute(wrapper(sql), params)) - sql = "select * from %s" % DB_SETTINGS["table_name"] + sql = f"select * from {DB_SETTINGS['table_name']}" cursor = await maybe_await(connection.execute(wrapper(sql))) if hasattr(cursor, "__aiter__"): @@ -114,22 +114,21 @@ async def _execute(connection, row_type, wrapper): assert isinstance(row, row_type) # Reuse cursor to ensure it is also wrapped - sql = "update %s" % DB_SETTINGS["table_name"] + " set a=%s, b=%s, c=%s where a=%s" + sql = f"update {DB_SETTINGS['table_name']} set a=%s, b=%s, c=%s where a=%s" params = (4, 4.0, "4.0", 1) await maybe_await(cursor.execute(wrapper(sql), params)) - sql = "delete from %s where a=2" % DB_SETTINGS["table_name"] + sql = f"delete from {DB_SETTINGS['table_name']} where a=2" await maybe_await(connection.execute(wrapper(sql))) await maybe_await(connection.commit()) await maybe_await( connection.execute( - "create or replace procedure %s() \nlanguage plpgsql as $$ begin perform now(); end; $$" - % DB_SETTINGS["procedure_name"] + f"create or replace procedure {DB_SETTINGS['procedure_name']}() \nlanguage plpgsql as $$ begin perform now(); end; $$" ) ) - await maybe_await(connection.execute("call %s()" % DB_SETTINGS["procedure_name"])) + await maybe_await(connection.execute(f"call {DB_SETTINGS['procedure_name']}()")) await maybe_await(connection.rollback()) await maybe_await(connection.commit()) diff --git a/tests/datastore_psycopg/test_cursor.py b/tests/datastore_psycopg/test_cursor.py index 3f93300121..77b8a1e1fb 100644 --- a/tests/datastore_psycopg/test_cursor.py +++ b/tests/datastore_psycopg/test_cursor.py @@ -47,11 +47,11 @@ ("Datastore/operation/Postgres/create", 2), ("Datastore/operation/Postgres/drop", 1), ("Datastore/operation/Postgres/rollback", 1), - ("Datastore/statement/Postgres/%s/call" % DB_SETTINGS["procedure_name"], 1), - ("Datastore/statement/Postgres/%s/delete" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/insert" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/select" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/update" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['procedure_name']}/call", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/delete", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/update", 1), ) _base_rollup_metrics = ( @@ -68,11 +68,11 @@ ("Datastore/operation/Postgres/rollback", 1), ("Datastore/operation/Postgres/select", 1), ("Datastore/operation/Postgres/update", 1), - ("Datastore/statement/Postgres/%s/call" % DB_SETTINGS["procedure_name"], 1), - ("Datastore/statement/Postgres/%s/delete" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/insert" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/select" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/update" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['procedure_name']}/call", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/delete", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/update", 1), ) _disable_scoped_metrics = list(_base_scoped_metrics) @@ -84,7 +84,7 @@ _host = instance_hostname(DB_SETTINGS["host"]) _port = DB_SETTINGS["port"] -_instance_metric_name = "Datastore/instance/Postgres/%s/%s" % (_host, _port) +_instance_metric_name = f"Datastore/instance/Postgres/{_host}/{_port}" _enable_rollup_metrics.append((_instance_metric_name, 11)) @@ -93,17 +93,17 @@ # Query async def _execute(connection, cursor, row_type, wrapper): - sql = "drop table if exists %s" % DB_SETTINGS["table_name"] + sql = f"drop table if exists {DB_SETTINGS['table_name']}" await maybe_await(cursor.execute(wrapper(sql))) - sql = "create table %s (a integer, b real, c text)" % DB_SETTINGS["table_name"] + sql = f"create table {DB_SETTINGS['table_name']} (a integer, b real, c text)" await maybe_await(cursor.execute(wrapper(sql))) - sql = "insert into %s " % DB_SETTINGS["table_name"] + "values (%s, %s, %s)" + sql = f"insert into {DB_SETTINGS['table_name']} values (%s, %s, %s)" params = [(1, 1.0, "1.0"), (2, 2.2, "2.2"), (3, 3.3, "3.3")] await maybe_await(cursor.executemany(wrapper(sql), params)) - sql = "select * from %s" % DB_SETTINGS["table_name"] + sql = f"select * from {DB_SETTINGS['table_name']}" await maybe_await(cursor.execute(wrapper(sql))) if hasattr(cursor, "__aiter__"): @@ -114,22 +114,21 @@ async def _execute(connection, cursor, row_type, wrapper): for row in cursor: assert isinstance(row, row_type) - sql = "update %s" % DB_SETTINGS["table_name"] + " set a=%s, b=%s, c=%s where a=%s" + sql = f"update {DB_SETTINGS['table_name']} set a=%s, b=%s, c=%s where a=%s" params = (4, 4.0, "4.0", 1) await maybe_await(cursor.execute(wrapper(sql), params)) - sql = "delete from %s where a=2" % DB_SETTINGS["table_name"] + sql = f"delete from {DB_SETTINGS['table_name']} where a=2" await maybe_await(cursor.execute(wrapper(sql))) await maybe_await(connection.commit()) await maybe_await( cursor.execute( - "create or replace procedure %s() \nlanguage plpgsql as $$ begin perform now(); end; $$" - % DB_SETTINGS["procedure_name"] + f"create or replace procedure {DB_SETTINGS['procedure_name']}() \nlanguage plpgsql as $$ begin perform now(); end; $$" ) ) - await maybe_await(cursor.execute("call %s()" % DB_SETTINGS["procedure_name"])) + await maybe_await(cursor.execute(f"call {DB_SETTINGS['procedure_name']}()")) await maybe_await(connection.rollback()) await maybe_await(connection.commit()) diff --git a/tests/datastore_psycopg/test_multiple_dbs.py b/tests/datastore_psycopg/test_multiple_dbs.py index ec69a2b9e7..ae720c6611 100644 --- a/tests/datastore_psycopg/test_multiple_dbs.py +++ b/tests/datastore_psycopg/test_multiple_dbs.py @@ -68,8 +68,8 @@ _host_2 = instance_hostname(_postgresql_2["host"]) _port_2 = _postgresql_2["port"] -_instance_metric_name_1 = "Datastore/instance/Postgres/%s/%s" % (_host_1, _port_1) -_instance_metric_name_2 = "Datastore/instance/Postgres/%s/%s" % (_host_2, _port_2) +_instance_metric_name_1 = f"Datastore/instance/Postgres/{_host_1}/{_port_1}" +_instance_metric_name_2 = f"Datastore/instance/Postgres/{_host_2}/{_port_2}" _enable_rollup_metrics.extend( [ @@ -100,9 +100,9 @@ async def _exercise_db(multiple_connections): connection = multiple_connections[1] try: cursor = connection.cursor() - await maybe_await(cursor.execute("drop table if exists %s" % DB_SETTINGS["table_name"])) + await maybe_await(cursor.execute(f"drop table if exists {DB_SETTINGS['table_name']}")) await maybe_await( - cursor.execute("create table %s " % DB_SETTINGS["table_name"] + "(a integer, b real, c text)") + cursor.execute(f"create table {DB_SETTINGS['table_name']} (a integer, b real, c text)") ) await maybe_await(connection.commit()) finally: diff --git a/tests/datastore_psycopg/test_obfuscation.py b/tests/datastore_psycopg/test_obfuscation.py index d764bc2510..1a656967a9 100644 --- a/tests/datastore_psycopg/test_obfuscation.py +++ b/tests/datastore_psycopg/test_obfuscation.py @@ -26,9 +26,9 @@ def cursor(loop, connection): try: cursor = connection.cursor() - loop.run_until_complete(maybe_await(cursor.execute("drop table if exists %s" % DB_SETTINGS["table_name"]))) + loop.run_until_complete(maybe_await(cursor.execute(f"drop table if exists {DB_SETTINGS['table_name']}"))) loop.run_until_complete( - maybe_await(cursor.execute("create table %s (b text, c text)" % DB_SETTINGS["table_name"])) + maybe_await(cursor.execute(f"create table {DB_SETTINGS['table_name']} (b text, c text)")) ) yield cursor @@ -39,16 +39,16 @@ def cursor(loop, connection): _quoting_style_tests = [ ( - "SELECT * FROM %s WHERE b='2'" % DB_SETTINGS["table_name"], - "SELECT * FROM %s WHERE b=?" % DB_SETTINGS["table_name"], + f"SELECT * FROM {DB_SETTINGS['table_name']} WHERE b='2'", + f"SELECT * FROM {DB_SETTINGS['table_name']} WHERE b=?", ), ( - "SELECT * FROM %s WHERE b=$func$2$func$" % DB_SETTINGS["table_name"], - "SELECT * FROM %s WHERE b=?" % DB_SETTINGS["table_name"], + f"SELECT * FROM {DB_SETTINGS['table_name']} WHERE b=$func$2$func$", + f"SELECT * FROM {DB_SETTINGS['table_name']} WHERE b=?", ), ( - "SELECT * FROM %s WHERE b=U&'2'" % DB_SETTINGS["table_name"], - "SELECT * FROM %s WHERE b=U&?" % DB_SETTINGS["table_name"], + f"SELECT * FROM {DB_SETTINGS['table_name']} WHERE b=U&'2'", + f"SELECT * FROM {DB_SETTINGS['table_name']} WHERE b=U&?", ), ] @@ -65,8 +65,8 @@ def test(): _parameter_tests = [ ( - "SELECT * FROM " + DB_SETTINGS["table_name"] + " where b=%s", - "SELECT * FROM " + DB_SETTINGS["table_name"] + " where b=%s", + f"SELECT * FROM {DB_SETTINGS['table_name']} where b=%s", + f"SELECT * FROM {DB_SETTINGS['table_name']} where b=%s", ), ] @@ -98,23 +98,23 @@ def any_length_explain_plan(node): _test_explain_plans = [ ( - "SELECT (b, c) FROM %s ; SELECT (b, c) FROM %s" % (DB_SETTINGS["table_name"], DB_SETTINGS["table_name"]), + f"SELECT (b, c) FROM {DB_SETTINGS['table_name']} ; SELECT (b, c) FROM {DB_SETTINGS['table_name']}", no_explain_plan, ), ( - "SELECT (b, c) FROM %s ; SELECT (b, c) FROM %s;" % (DB_SETTINGS["table_name"], DB_SETTINGS["table_name"]), + f"SELECT (b, c) FROM {DB_SETTINGS['table_name']} ; SELECT (b, c) FROM {DB_SETTINGS['table_name']};", no_explain_plan, ), - ("SELECT (b, c) FROM %s WHERE b=';'" % DB_SETTINGS["table_name"], no_explain_plan), - (";SELECT (b, c) FROM %s" % DB_SETTINGS["table_name"], no_explain_plan), - ("SELECT (b, c) FROM %s" % DB_SETTINGS["table_name"], any_length_explain_plan), - ("SELECT (b, c) FROM %s;" % DB_SETTINGS["table_name"], any_length_explain_plan), + (f"SELECT (b, c) FROM {DB_SETTINGS['table_name']} WHERE b=';'", no_explain_plan), + (f";SELECT (b, c) FROM {DB_SETTINGS['table_name']}", no_explain_plan), + (f"SELECT (b, c) FROM {DB_SETTINGS['table_name']}", any_length_explain_plan), + (f"SELECT (b, c) FROM {DB_SETTINGS['table_name']};", any_length_explain_plan), ( - "SELECT (b, c) FROM %s;;;;;;" % DB_SETTINGS["table_name"], + f"SELECT (b, c) FROM {DB_SETTINGS['table_name']};;;;;;", any_length_explain_plan, ), ( - "SELECT (b, c) FROM %s;\n\n" % DB_SETTINGS["table_name"], + f"SELECT (b, c) FROM {DB_SETTINGS['table_name']};\n\n", any_length_explain_plan, ), ] @@ -127,8 +127,8 @@ def test_obfuscation_explain_plans(loop, connection, sql, validator): async def test(): try: cursor = connection.cursor() - await maybe_await(cursor.execute("drop table if exists %s" % DB_SETTINGS["table_name"])) - await maybe_await(cursor.execute("create table %s (b text, c text)" % DB_SETTINGS["table_name"])) + await maybe_await(cursor.execute(f"drop table if exists {DB_SETTINGS['table_name']}")) + await maybe_await(cursor.execute(f"create table {DB_SETTINGS['table_name']} (b text, c text)")) await maybe_await(cursor.execute(sql)) diff --git a/tests/datastore_psycopg/test_register.py b/tests/datastore_psycopg/test_register.py index 575ffac7b2..4a5113baea 100644 --- a/tests/datastore_psycopg/test_register.py +++ b/tests/datastore_psycopg/test_register.py @@ -53,25 +53,25 @@ async def coro(): @background_task() def test_register_range(loop, connection): async def test(): - type_name = "floatrange_" + str(os.getpid()) + type_name = f"floatrange_{str(os.getpid())}" - create_sql = "CREATE TYPE %s AS RANGE (" % type_name + "subtype = float8," "subtype_diff = float8mi)" + create_sql = f"CREATE TYPE {type_name} AS RANGE (subtype = float8,subtype_diff = float8mi)" cursor = connection.cursor() - await maybe_await(cursor.execute("DROP TYPE if exists %s" % type_name)) + await maybe_await(cursor.execute(f"DROP TYPE if exists {type_name}")) await maybe_await(cursor.execute(create_sql)) range_type_info = await maybe_await(psycopg.types.range.RangeInfo.fetch(connection, type_name)) range_type_info.register(connection) - await maybe_await(cursor.execute("DROP TYPE if exists %s" % type_name)) + await maybe_await(cursor.execute(f"DROP TYPE if exists {type_name}")) await maybe_await(cursor.execute(create_sql)) range_type_info = await maybe_await(psycopg.types.range.RangeInfo.fetch(connection, type_name)) range_type_info.register(cursor) - await maybe_await(cursor.execute("DROP TYPE if exists %s" % type_name)) + await maybe_await(cursor.execute(f"DROP TYPE if exists {type_name}")) if hasattr(connection, "__aenter__"): diff --git a/tests/datastore_psycopg/test_rollback.py b/tests/datastore_psycopg/test_rollback.py index 5befa79628..57f01ac3f6 100644 --- a/tests/datastore_psycopg/test_rollback.py +++ b/tests/datastore_psycopg/test_rollback.py @@ -54,7 +54,7 @@ _host = instance_hostname(DB_SETTINGS["host"]) _port = DB_SETTINGS["port"] -_instance_metric_name = "Datastore/instance/Postgres/%s/%s" % (_host, _port) +_instance_metric_name = f"Datastore/instance/Postgres/{_host}/{_port}" _enable_rollup_metrics.append((_instance_metric_name, 1)) diff --git a/tests/datastore_psycopg/test_span_event.py b/tests/datastore_psycopg/test_span_event.py index 622cf706df..df68f79662 100644 --- a/tests/datastore_psycopg/test_span_event.py +++ b/tests/datastore_psycopg/test_span_event.py @@ -74,7 +74,7 @@ def test_span_events(loop, connection, instance_enabled, db_instance_enabled): hostname = instance_hostname(DB_SETTINGS["host"]) exact_agents.update( { - "peer.address": "%s:%s" % (hostname, DB_SETTINGS["port"]), + "peer.address": f"{hostname}:{DB_SETTINGS['port']}", "peer.hostname": hostname, } ) diff --git a/tests/datastore_psycopg2/test_async.py b/tests/datastore_psycopg2/test_async.py index 7af9adc6a5..ca281084e5 100644 --- a/tests/datastore_psycopg2/test_async.py +++ b/tests/datastore_psycopg2/test_async.py @@ -44,8 +44,8 @@ # Metrics _base_scoped_metrics = ( - ('Datastore/statement/Postgres/%s/select' % DB_SETTINGS['table_name'], 1), - ('Datastore/statement/Postgres/%s/insert' % DB_SETTINGS['table_name'], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 1), ('Datastore/operation/Postgres/drop', 1), ('Datastore/operation/Postgres/create', 1) ) @@ -56,9 +56,9 @@ ('Datastore/Postgres/all', 5), ('Datastore/Postgres/allOther', 5), ('Datastore/operation/Postgres/select', 1), - ('Datastore/statement/Postgres/%s/select' % DB_SETTINGS['table_name'], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), ('Datastore/operation/Postgres/insert', 1), - ('Datastore/statement/Postgres/%s/insert' % DB_SETTINGS['table_name'], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 1), ('Datastore/operation/Postgres/drop', 1), ('Datastore/operation/Postgres/create', 1) ) @@ -75,7 +75,7 @@ _host = instance_hostname(DB_SETTINGS['host']) _port = DB_SETTINGS['port'] -_instance_metric_name = 'Datastore/instance/Postgres/%s/%s' % (_host, _port) +_instance_metric_name = f'Datastore/instance/Postgres/{_host}/{_port}' _enable_rollup_metrics.append( (_instance_metric_name, 4) @@ -102,18 +102,16 @@ def _exercise_db(async_keyword): wait(async_conn) async_cur = async_conn.cursor() - async_cur.execute("""drop table if exists %s""" % DB_SETTINGS['table_name']) + async_cur.execute(f"""drop table if exists {DB_SETTINGS['table_name']}""") wait(async_cur.connection) - async_cur.execute("""create table %s """ % DB_SETTINGS['table_name'] + - """(a integer, b real, c text)""") + async_cur.execute(f"create table {DB_SETTINGS['table_name']} (a integer, b real, c text)") wait(async_cur.connection) - async_cur.execute("""insert into %s """ % DB_SETTINGS['table_name'] + - """values (%s, %s, %s)""", (1, 1.0, '1.0')) + async_cur.execute(f"insert into {DB_SETTINGS['table_name']} values (%s, %s, %s)", (1, 1.0, '1.0')) wait(async_cur.connection) - async_cur.execute("""select * from %s""" % DB_SETTINGS['table_name']) + async_cur.execute(f"""select * from {DB_SETTINGS['table_name']}""") wait(async_cur.connection) for row in async_cur: diff --git a/tests/datastore_psycopg2/test_cursor.py b/tests/datastore_psycopg2/test_cursor.py index 8f4dcbd1d9..a534e8e643 100644 --- a/tests/datastore_psycopg2/test_cursor.py +++ b/tests/datastore_psycopg2/test_cursor.py @@ -42,10 +42,10 @@ # Metrics _base_scoped_metrics = ( ('Function/psycopg2:connect', 1), - ('Datastore/statement/Postgres/%s/select' % DB_SETTINGS['table_name'], 1), - ('Datastore/statement/Postgres/%s/insert' % DB_SETTINGS['table_name'], 1), - ('Datastore/statement/Postgres/%s/update' % DB_SETTINGS['table_name'], 1), - ('Datastore/statement/Postgres/%s/delete' % DB_SETTINGS['table_name'], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/update", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/delete", 1), ('Datastore/statement/Postgres/now/call', 1), ('Datastore/statement/Postgres/pg_sleep/call', 1), ('Datastore/operation/Postgres/drop', 1), @@ -59,10 +59,10 @@ ('Datastore/allOther', 12), ('Datastore/Postgres/all', 12), ('Datastore/Postgres/allOther', 12), - ('Datastore/statement/Postgres/%s/select' % DB_SETTINGS['table_name'], 1), - ('Datastore/statement/Postgres/%s/insert' % DB_SETTINGS['table_name'], 1), - ('Datastore/statement/Postgres/%s/update' % DB_SETTINGS['table_name'], 1), - ('Datastore/statement/Postgres/%s/delete' % DB_SETTINGS['table_name'], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/update", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/delete", 1), ('Datastore/operation/Postgres/select', 1), ('Datastore/operation/Postgres/insert', 1), ('Datastore/operation/Postgres/update', 1), @@ -85,7 +85,7 @@ _host = instance_hostname(DB_SETTINGS['host']) _port = DB_SETTINGS['port'] -_instance_metric_name = 'Datastore/instance/Postgres/%s/%s' % (_host, _port) +_instance_metric_name = f'Datastore/instance/Postgres/{_host}/{_port}' _enable_rollup_metrics.append( (_instance_metric_name, 11) @@ -103,27 +103,27 @@ def _execute(connection, cursor, row_type, wrapper): psycopg2.extensions.register_type(unicode_type, connection) psycopg2.extensions.register_type(unicode_type, cursor) - sql = """drop table if exists %s""" % DB_SETTINGS["table_name"] + sql = f"""drop table if exists {DB_SETTINGS['table_name']}""" cursor.execute(wrapper(sql)) - sql = """create table %s (a integer, b real, c text)""" % DB_SETTINGS["table_name"] + sql = f"""create table {DB_SETTINGS['table_name']} (a integer, b real, c text)""" cursor.execute(wrapper(sql)) - sql = """insert into %s """ % DB_SETTINGS["table_name"] + """values (%s, %s, %s)""" + sql = f"insert into {DB_SETTINGS['table_name']} values (%s, %s, %s)" params = [(1, 1.0, '1.0'), (2, 2.2, '2.2'), (3, 3.3, '3.3')] cursor.executemany(wrapper(sql), params) - sql = """select * from %s""" % DB_SETTINGS["table_name"] + sql = f"""select * from {DB_SETTINGS['table_name']}""" cursor.execute(wrapper(sql)) for row in cursor: assert isinstance(row, row_type) - sql = """update %s""" % DB_SETTINGS["table_name"] + """ set a=%s, b=%s, c=%s where a=%s""" + sql = f"update {DB_SETTINGS['table_name']} set a=%s, b=%s, c=%s where a=%s" params = (4, 4.0, '4.0', 1) cursor.execute(wrapper(sql), params) - sql = """delete from %s where a=2""" % DB_SETTINGS["table_name"] + sql = f"""delete from {DB_SETTINGS['table_name']} where a=2""" cursor.execute(wrapper(sql)) connection.commit() diff --git a/tests/datastore_psycopg2/test_multiple_dbs.py b/tests/datastore_psycopg2/test_multiple_dbs.py index afbdd66f2d..f69288cb85 100644 --- a/tests/datastore_psycopg2/test_multiple_dbs.py +++ b/tests/datastore_psycopg2/test_multiple_dbs.py @@ -71,10 +71,8 @@ _host_2 = instance_hostname(_postgresql_2['host']) _port_2 = _postgresql_2['port'] - _instance_metric_name_1 = 'Datastore/instance/Postgres/%s/%s' % ( - _host_1, _port_1) - _instance_metric_name_2 = 'Datastore/instance/Postgres/%s/%s' % ( - _host_2, _port_2) + _instance_metric_name_1 = f'Datastore/instance/Postgres/{_host_1}/{_port_1}' + _instance_metric_name_2 = f'Datastore/instance/Postgres/{_host_2}/{_port_2}' _enable_rollup_metrics.extend([ (_instance_metric_name_1, 2), @@ -111,9 +109,8 @@ def _exercise_db(): port=postgresql2['port']) try: cursor = connection.cursor() - cursor.execute("""drop table if exists %s""" % postgresql2["table_name"]) - cursor.execute("""create table %s """ % postgresql2["table_name"] + - """(a integer, b real, c text)""") + cursor.execute(f"""drop table if exists {postgresql2['table_name']}""") + cursor.execute(f"create table {postgresql2['table_name']} (a integer, b real, c text)") connection.commit() finally: connection.close() diff --git a/tests/datastore_psycopg2/test_obfuscation.py b/tests/datastore_psycopg2/test_obfuscation.py index 90f15d375b..69d2e00142 100644 --- a/tests/datastore_psycopg2/test_obfuscation.py +++ b/tests/datastore_psycopg2/test_obfuscation.py @@ -37,8 +37,8 @@ def psycopg2_cursor(): try: cursor = connection.cursor() - cursor.execute("drop table if exists %s" % DB_SETTINGS["table_name"]) - cursor.execute("create table %s (b text, c text)" % DB_SETTINGS["table_name"]) + cursor.execute(f"drop table if exists {DB_SETTINGS['table_name']}") + cursor.execute(f"create table {DB_SETTINGS['table_name']} (b text, c text)") yield cursor @@ -48,16 +48,16 @@ def psycopg2_cursor(): _quoting_style_tests = [ ( - "SELECT * FROM %s WHERE b='2'" % DB_SETTINGS["table_name"], - "SELECT * FROM %s WHERE b=?" % DB_SETTINGS["table_name"], + f"SELECT * FROM {DB_SETTINGS['table_name']} WHERE b='2'", + f"SELECT * FROM {DB_SETTINGS['table_name']} WHERE b=?", ), ( - "SELECT * FROM %s WHERE b=$func$2$func$" % DB_SETTINGS["table_name"], - "SELECT * FROM %s WHERE b=?" % DB_SETTINGS["table_name"], + f"SELECT * FROM {DB_SETTINGS['table_name']} WHERE b=$func$2$func$", + f"SELECT * FROM {DB_SETTINGS['table_name']} WHERE b=?", ), ( - "SELECT * FROM %s WHERE b=U&'2'" % DB_SETTINGS["table_name"], - "SELECT * FROM %s WHERE b=U&?" % DB_SETTINGS["table_name"], + f"SELECT * FROM {DB_SETTINGS['table_name']} WHERE b=U&'2'", + f"SELECT * FROM {DB_SETTINGS['table_name']} WHERE b=U&?", ), ] @@ -74,8 +74,8 @@ def test(): _parameter_tests = [ ( - "SELECT * FROM " + DB_SETTINGS["table_name"] + " where b=%s", - "SELECT * FROM " + DB_SETTINGS["table_name"] + " where b=%s", + f"SELECT * FROM {DB_SETTINGS['table_name']} where b=%s", + f"SELECT * FROM {DB_SETTINGS['table_name']} where b=%s", ), ] @@ -107,25 +107,23 @@ def any_length_explain_plan(node): _test_explain_plans = [ ( - "SELECT (b, c) FROM %s ; SELECT (b, c) FROM %s" - % (DB_SETTINGS["table_name"], DB_SETTINGS["table_name"]), + f"SELECT (b, c) FROM {DB_SETTINGS['table_name']} ; SELECT (b, c) FROM {DB_SETTINGS['table_name']}", no_explain_plan, ), ( - "SELECT (b, c) FROM %s ; SELECT (b, c) FROM %s;" - % (DB_SETTINGS["table_name"], DB_SETTINGS["table_name"]), + f"SELECT (b, c) FROM {DB_SETTINGS['table_name']} ; SELECT (b, c) FROM {DB_SETTINGS['table_name']};", no_explain_plan, ), - ("SELECT (b, c) FROM %s WHERE b=';'" % DB_SETTINGS["table_name"], no_explain_plan), - (";SELECT (b, c) FROM %s" % DB_SETTINGS["table_name"], no_explain_plan), - ("SELECT (b, c) FROM %s" % DB_SETTINGS["table_name"], any_length_explain_plan), - ("SELECT (b, c) FROM %s;" % DB_SETTINGS["table_name"], any_length_explain_plan), + (f"SELECT (b, c) FROM {DB_SETTINGS['table_name']} WHERE b=';'", no_explain_plan), + (f";SELECT (b, c) FROM {DB_SETTINGS['table_name']}", no_explain_plan), + (f"SELECT (b, c) FROM {DB_SETTINGS['table_name']}", any_length_explain_plan), + (f"SELECT (b, c) FROM {DB_SETTINGS['table_name']};", any_length_explain_plan), ( - "SELECT (b, c) FROM %s;;;;;;" % DB_SETTINGS["table_name"], + f"SELECT (b, c) FROM {DB_SETTINGS['table_name']};;;;;;", any_length_explain_plan, ), ( - "SELECT (b, c) FROM %s;\n\n" % DB_SETTINGS["table_name"], + f"SELECT (b, c) FROM {DB_SETTINGS['table_name']};\n\n", any_length_explain_plan, ), ] @@ -148,9 +146,9 @@ def test(): try: cursor = connection.cursor() - cursor.execute("drop table if exists %s" % DB_SETTINGS["table_name"]) + cursor.execute(f"drop table if exists {DB_SETTINGS['table_name']}") cursor.execute( - "create table %s (b text, c text)" % DB_SETTINGS["table_name"] + f"create table {DB_SETTINGS['table_name']} (b text, c text)" ) cursor.execute(sql) diff --git a/tests/datastore_psycopg2/test_register.py b/tests/datastore_psycopg2/test_register.py index b5450c3588..61cba82758 100644 --- a/tests/datastore_psycopg2/test_register.py +++ b/tests/datastore_psycopg2/test_register.py @@ -48,24 +48,22 @@ def test_register_range(): password=DB_SETTINGS['password'], host=DB_SETTINGS['host'], port=DB_SETTINGS['port']) as connection: - type_name = "floatrange_" + str(os.getpid()) + type_name = f"floatrange_{str(os.getpid())}" - create_sql = ('CREATE TYPE %s AS RANGE (' % type_name + - 'subtype = float8,' - 'subtype_diff = float8mi)') + create_sql = f"CREATE TYPE {type_name} AS RANGE (subtype = float8,subtype_diff = float8mi)" cursor = connection.cursor() - cursor.execute("DROP TYPE if exists %s" % type_name) + cursor.execute(f"DROP TYPE if exists {type_name}") cursor.execute(create_sql) psycopg2.extras.register_range(type_name, psycopg2.extras.NumericRange, connection) - cursor.execute("DROP TYPE if exists %s" % type_name) + cursor.execute(f"DROP TYPE if exists {type_name}") cursor.execute(create_sql) psycopg2.extras.register_range(type_name, psycopg2.extras.NumericRange, cursor) - cursor.execute("DROP TYPE if exists %s" % type_name) + cursor.execute(f"DROP TYPE if exists {type_name}") diff --git a/tests/datastore_psycopg2/test_rollback.py b/tests/datastore_psycopg2/test_rollback.py index 0a23b1005e..248edee43b 100644 --- a/tests/datastore_psycopg2/test_rollback.py +++ b/tests/datastore_psycopg2/test_rollback.py @@ -57,7 +57,7 @@ _host = instance_hostname(DB_SETTINGS['host']) _port = DB_SETTINGS['port'] -_instance_metric_name = 'Datastore/instance/Postgres/%s/%s' % (_host, _port) +_instance_metric_name = f'Datastore/instance/Postgres/{_host}/{_port}' _enable_rollup_metrics.append( (_instance_metric_name, 1) diff --git a/tests/datastore_psycopg2/test_span_event.py b/tests/datastore_psycopg2/test_span_event.py index 0834061c70..019194ce01 100644 --- a/tests/datastore_psycopg2/test_span_event.py +++ b/tests/datastore_psycopg2/test_span_event.py @@ -82,7 +82,7 @@ def test_span_events(instance_enabled, db_instance_enabled): settings = _enable_instance_settings.copy() hostname = instance_hostname(DB_SETTINGS['host']) exact_agents.update({ - 'peer.address': '%s:%s' % (hostname, DB_SETTINGS['port']), + 'peer.address': f"{hostname}:{DB_SETTINGS['port']}", 'peer.hostname': hostname, }) else: diff --git a/tests/datastore_psycopg2cffi/test_database.py b/tests/datastore_psycopg2cffi/test_database.py index 0b3ff87d3d..9b6bb64d29 100644 --- a/tests/datastore_psycopg2cffi/test_database.py +++ b/tests/datastore_psycopg2cffi/test_database.py @@ -40,10 +40,10 @@ ("Function/psycopg2cffi:connect", 1), ("Function/psycopg2cffi._impl.connection:Connection.__enter__", 1), ("Function/psycopg2cffi._impl.connection:Connection.__exit__", 1), - ("Datastore/statement/Postgres/%s/select" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/insert" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/update" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/delete" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/update", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/delete", 1), ("Datastore/statement/Postgres/now/call", 1), ("Datastore/statement/Postgres/pg_sleep/call", 1), ("Datastore/operation/Postgres/drop", 1), @@ -58,13 +58,13 @@ ("Datastore/Postgres/all", 13), ("Datastore/Postgres/allOther", 13), ("Datastore/operation/Postgres/select", 1), - ("Datastore/statement/Postgres/%s/select" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), ("Datastore/operation/Postgres/insert", 1), - ("Datastore/statement/Postgres/%s/insert" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 1), ("Datastore/operation/Postgres/update", 1), - ("Datastore/statement/Postgres/%s/update" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/update", 1), ("Datastore/operation/Postgres/delete", 1), - ("Datastore/statement/Postgres/%s/delete" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/delete", 1), ("Datastore/operation/Postgres/drop", 1), ("Datastore/operation/Postgres/create", 1), ("Datastore/statement/Postgres/now/call", 1), @@ -72,7 +72,7 @@ ("Datastore/operation/Postgres/call", 2), ("Datastore/operation/Postgres/commit", 3), ("Datastore/operation/Postgres/rollback", 1), - ("Datastore/instance/Postgres/%s/%s" % (instance_hostname(DB_SETTINGS["host"]), DB_SETTINGS["port"]), 12), + (f"Datastore/instance/Postgres/{instance_hostname(DB_SETTINGS['host'])}/{DB_SETTINGS['port']}", 12), ] @@ -98,26 +98,26 @@ def test_execute_via_cursor(): psycopg2cffi.extensions.register_type(psycopg2cffi.extensions.UNICODE, connection) psycopg2cffi.extensions.register_type(psycopg2cffi.extensions.UNICODE, cursor) - cursor.execute("""drop table if exists %s""" % DB_SETTINGS["table_name"]) + cursor.execute(f"""drop table if exists {DB_SETTINGS['table_name']}""") - cursor.execute("""create table %s """ % DB_SETTINGS["table_name"] + """(a integer, b real, c text)""") + cursor.execute(f"create table {DB_SETTINGS['table_name']} (a integer, b real, c text)") cursor.executemany( - """insert into %s """ % DB_SETTINGS["table_name"] + """values (%s, %s, %s)""", + f"insert into {DB_SETTINGS['table_name']} values (%s, %s, %s)", [(1, 1.0, "1.0"), (2, 2.2, "2.2"), (3, 3.3, "3.3")], ) - cursor.execute("""select * from %s""" % DB_SETTINGS["table_name"]) + cursor.execute(f"""select * from {DB_SETTINGS['table_name']}""") for row in cursor: pass cursor.execute( - """update %s""" % DB_SETTINGS["table_name"] + """ set a=%s, b=%s, """ """c=%s where a=%s""", + f"update {DB_SETTINGS['table_name']} set a=%s, b=%s, c=%s where a=%s", (4, 4.0, "4.0", 1), ) - cursor.execute("""delete from %s where a=2""" % DB_SETTINGS["table_name"]) + cursor.execute(f"""delete from {DB_SETTINGS['table_name']} where a=2""") connection.commit() @@ -168,8 +168,8 @@ def test_rollback_on_exception(): _test_async_mode_scoped_metrics = [ ("Function/psycopg2cffi:connect", 1), - ("Datastore/statement/Postgres/%s/select" % DB_SETTINGS["table_name"], 1), - ("Datastore/statement/Postgres/%s/insert" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 1), ("Datastore/operation/Postgres/drop", 1), ("Datastore/operation/Postgres/create", 1), ] @@ -180,12 +180,12 @@ def test_rollback_on_exception(): ("Datastore/Postgres/all", 5), ("Datastore/Postgres/allOther", 5), ("Datastore/operation/Postgres/select", 1), - ("Datastore/statement/Postgres/%s/select" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/select", 1), ("Datastore/operation/Postgres/insert", 1), - ("Datastore/statement/Postgres/%s/insert" % DB_SETTINGS["table_name"], 1), + (f"Datastore/statement/Postgres/{DB_SETTINGS['table_name']}/insert", 1), ("Datastore/operation/Postgres/drop", 1), ("Datastore/operation/Postgres/create", 1), - ("Datastore/instance/Postgres/%s/%s" % (instance_hostname(DB_SETTINGS["host"]), DB_SETTINGS["port"]), 4), + (f"Datastore/instance/Postgres/{instance_hostname(DB_SETTINGS['host'])}/{DB_SETTINGS['port']}", 4), ] @@ -222,16 +222,16 @@ def test_async_mode(): wait(async_conn) async_cur = async_conn.cursor() - async_cur.execute("""drop table if exists %s""" % DB_SETTINGS["table_name"]) + async_cur.execute(f"""drop table if exists {DB_SETTINGS['table_name']}""") wait(async_cur.connection) - async_cur.execute("""create table %s """ % DB_SETTINGS["table_name"] + """(a integer, b real, c text)""") + async_cur.execute(f"create table {DB_SETTINGS['table_name']} (a integer, b real, c text)") wait(async_cur.connection) - async_cur.execute("""insert into %s """ % DB_SETTINGS["table_name"] + """values (%s, %s, %s)""", (1, 1.0, "1.0")) + async_cur.execute(f"insert into {DB_SETTINGS['table_name']} values (%s, %s, %s)", (1, 1.0, "1.0")) wait(async_cur.connection) - async_cur.execute("""select * from %s""" % DB_SETTINGS["table_name"]) + async_cur.execute(f"""select * from {DB_SETTINGS['table_name']}""") wait(async_cur.connection) for row in async_cur: diff --git a/tests/datastore_pylibmc/test_memcache.py b/tests/datastore_pylibmc/test_memcache.py index 64da33416d..dd4cb9413e 100644 --- a/tests/datastore_pylibmc/test_memcache.py +++ b/tests/datastore_pylibmc/test_memcache.py @@ -27,7 +27,7 @@ MEMCACHED_PORT = DB_SETTINGS["port"] MEMCACHED_NAMESPACE = DB_SETTINGS["namespace"] -MEMCACHED_ADDR = "%s:%s" % (MEMCACHED_HOST, MEMCACHED_PORT) +MEMCACHED_ADDR = f"{MEMCACHED_HOST}:{MEMCACHED_PORT}" _test_bt_set_get_delete_scoped_metrics = [ ("Datastore/operation/Memcached/set", 1), @@ -57,7 +57,7 @@ def test_bt_set_get_delete(): set_background_task(True) client = pylibmc.Client([MEMCACHED_ADDR]) - key = MEMCACHED_NAMESPACE + "key" + key = f"{MEMCACHED_NAMESPACE}key" client.set(key, "value") value = client.get(key) @@ -94,7 +94,7 @@ def test_wt_set_get_delete(): set_background_task(False) client = pylibmc.Client([MEMCACHED_ADDR]) - key = MEMCACHED_NAMESPACE + "key" + key = f"{MEMCACHED_NAMESPACE}key" client.set(key, "value") value = client.get(key) diff --git a/tests/datastore_pymemcache/test_memcache.py b/tests/datastore_pymemcache/test_memcache.py index 3100db5b7f..7fad815b41 100644 --- a/tests/datastore_pymemcache/test_memcache.py +++ b/tests/datastore_pymemcache/test_memcache.py @@ -59,7 +59,7 @@ def test_bt_set_get_delete(): set_background_task(True) client = pymemcache.client.Client(MEMCACHED_ADDR) - key = MEMCACHED_NAMESPACE + "key" + key = f"{MEMCACHED_NAMESPACE}key" client.set(key, b"value") value = client.get(key) @@ -96,7 +96,7 @@ def test_wt_set_get_delete(): set_background_task(False) client = pymemcache.client.Client(MEMCACHED_ADDR) - key = MEMCACHED_NAMESPACE + "key" + key = f"{MEMCACHED_NAMESPACE}key" client.set(key, b"value") value = client.get(key) diff --git a/tests/datastore_pymongo/test_pymongo.py b/tests/datastore_pymongo/test_pymongo.py index a91bf09b56..507b556cb7 100644 --- a/tests/datastore_pymongo/test_pymongo.py +++ b/tests/datastore_pymongo/test_pymongo.py @@ -115,51 +115,51 @@ def _exercise_mongo(db): _test_pymongo_scoped_metrics_v3 = [ ("Function/pymongo.mongo_client:MongoClient.__init__", 1), - ("Datastore/statement/MongoDB/%s/create_index" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find" % MONGODB_COLLECTION, 3), - ("Datastore/statement/MongoDB/%s/find_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/save" % MONGODB_COLLECTION, 3), - ("Datastore/statement/MongoDB/%s" % MONGODB_COLLECTION + "/initialize_unordered_bulk_op", 1), - ("Datastore/statement/MongoDB/%s" % MONGODB_COLLECTION + "/initialize_ordered_bulk_op", 1), - ("Datastore/statement/MongoDB/%s/parallel_scan" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/insert_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/bulk_write" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/insert_many" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/replace_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/update_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/delete_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/delete_many" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_raw_batches" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/create_indexes" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/list_indexes" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/aggregate" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/aggregate_raw_batches" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_one_and_delete" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_one_and_replace" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_one_and_update" % MONGODB_COLLECTION, 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/create_index", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find", 3), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/save", 3), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/initialize_unordered_bulk_op", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/initialize_ordered_bulk_op", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/parallel_scan", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/insert_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/bulk_write", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/insert_many", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/replace_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/update_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/delete_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/delete_many", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_raw_batches", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/create_indexes", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/list_indexes", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/aggregate", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/aggregate_raw_batches", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one_and_delete", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one_and_replace", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one_and_update", 1), ] _test_pymongo_scoped_metrics_v4 = [ ("Function/pymongo.mongo_client:MongoClient.__init__", 1), - ("Datastore/statement/MongoDB/%s/create_index" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find" % MONGODB_COLLECTION, 3), - ("Datastore/statement/MongoDB/%s/find_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/insert_one" % MONGODB_COLLECTION, 4), - ("Datastore/statement/MongoDB/%s/bulk_write" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/insert_many" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/replace_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/update_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/delete_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/delete_many" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_raw_batches" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/create_indexes" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/list_indexes" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/aggregate" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/aggregate_raw_batches" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_one_and_delete" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_one_and_replace" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_one_and_update" % MONGODB_COLLECTION, 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/create_index", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find", 3), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/insert_one", 4), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/bulk_write", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/insert_many", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/replace_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/update_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/delete_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/delete_many", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_raw_batches", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/create_indexes", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/list_indexes", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/aggregate", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/aggregate_raw_batches", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one_and_delete", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one_and_replace", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one_and_update", 1), ] _test_pymongo_rollup_metrics_v3 = [ @@ -171,17 +171,17 @@ def _exercise_mongo(db): ("Datastore/operation/MongoDB/create_index", 1), ("Datastore/operation/MongoDB/find", 3), ("Datastore/operation/MongoDB/find_one", 1), - ("Datastore/statement/MongoDB/%s/create_index" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find" % MONGODB_COLLECTION, 3), - ("Datastore/statement/MongoDB/%s/find_one" % MONGODB_COLLECTION, 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/create_index", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find", 3), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one", 1), ("Datastore/operation/MongoDB/save", 3), ("Datastore/operation/MongoDB/initialize_unordered_bulk_op", 1), ("Datastore/operation/MongoDB/initialize_ordered_bulk_op", 1), ("Datastore/operation/MongoDB/parallel_scan", 1), - ("Datastore/statement/MongoDB/%s/save" % MONGODB_COLLECTION, 3), - (("Datastore/statement/MongoDB/%s" % MONGODB_COLLECTION + "/initialize_unordered_bulk_op"), 1), - (("Datastore/statement/MongoDB/%s" % MONGODB_COLLECTION + "/initialize_ordered_bulk_op"), 1), - ("Datastore/statement/MongoDB/%s/parallel_scan" % MONGODB_COLLECTION, 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/save", 3), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/initialize_unordered_bulk_op", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/initialize_ordered_bulk_op", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/parallel_scan", 1), ("Datastore/operation/MongoDB/bulk_write", 1), ("Datastore/operation/MongoDB/insert_one", 1), ("Datastore/operation/MongoDB/insert_many", 1), @@ -197,21 +197,21 @@ def _exercise_mongo(db): ("Datastore/operation/MongoDB/find_one_and_delete", 1), ("Datastore/operation/MongoDB/find_one_and_replace", 1), ("Datastore/operation/MongoDB/find_one_and_update", 1), - ("Datastore/statement/MongoDB/%s/bulk_write" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/insert_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/insert_many" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/replace_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/update_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/delete_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/delete_many" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_raw_batches" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/create_indexes" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/list_indexes" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/aggregate" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/aggregate_raw_batches" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_one_and_delete" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_one_and_replace" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_one_and_update" % MONGODB_COLLECTION, 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/bulk_write", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/insert_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/insert_many", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/replace_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/update_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/delete_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/delete_many", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_raw_batches", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/create_indexes", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/list_indexes", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/aggregate", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/aggregate_raw_batches", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one_and_delete", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one_and_replace", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one_and_update", 1), ] _test_pymongo_rollup_metrics_v4 = [ @@ -223,9 +223,9 @@ def _exercise_mongo(db): ("Datastore/operation/MongoDB/create_index", 1), ("Datastore/operation/MongoDB/find", 3), ("Datastore/operation/MongoDB/find_one", 1), - ("Datastore/statement/MongoDB/%s/create_index" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find" % MONGODB_COLLECTION, 3), - ("Datastore/statement/MongoDB/%s/find_one" % MONGODB_COLLECTION, 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/create_index", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find", 3), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one", 1), ("Datastore/operation/MongoDB/bulk_write", 1), ("Datastore/operation/MongoDB/insert_one", 4), ("Datastore/operation/MongoDB/insert_many", 1), @@ -241,21 +241,21 @@ def _exercise_mongo(db): ("Datastore/operation/MongoDB/find_one_and_delete", 1), ("Datastore/operation/MongoDB/find_one_and_replace", 1), ("Datastore/operation/MongoDB/find_one_and_update", 1), - ("Datastore/statement/MongoDB/%s/bulk_write" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/insert_one" % MONGODB_COLLECTION, 4), - ("Datastore/statement/MongoDB/%s/insert_many" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/replace_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/update_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/delete_one" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/delete_many" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_raw_batches" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/create_indexes" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/list_indexes" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/aggregate" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/aggregate_raw_batches" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_one_and_delete" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_one_and_replace" % MONGODB_COLLECTION, 1), - ("Datastore/statement/MongoDB/%s/find_one_and_update" % MONGODB_COLLECTION, 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/bulk_write", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/insert_one", 4), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/insert_many", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/replace_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/update_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/delete_one", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/delete_many", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_raw_batches", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/create_indexes", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/list_indexes", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/aggregate", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/aggregate_raw_batches", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one_and_delete", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one_and_replace", 1), + (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one_and_update", 1), ] diff --git a/tests/datastore_pymssql/test_database.py b/tests/datastore_pymssql/test_database.py index bdbf75c15f..ca11863b3b 100644 --- a/tests/datastore_pymssql/test_database.py +++ b/tests/datastore_pymssql/test_database.py @@ -22,35 +22,34 @@ from newrelic.api.background_task import background_task DB_SETTINGS = mssql_settings()[0] -TABLE_NAME = "datastore_pymssql_" + DB_SETTINGS["namespace"] -PROCEDURE_NAME = "hello_" + DB_SETTINGS["namespace"] +TABLE_NAME = f"datastore_pymssql_{DB_SETTINGS['namespace']}" +PROCEDURE_NAME = f"hello_{DB_SETTINGS['namespace']}" def execute_db_calls_with_cursor(cursor): - cursor.execute("""drop table if exists %s""" % TABLE_NAME) + cursor.execute(f"""drop table if exists {TABLE_NAME}""") - cursor.execute("""create table %s """ % TABLE_NAME + """(a integer, b real, c text)""") + cursor.execute(f"create table {TABLE_NAME} (a integer, b real, c text)") cursor.executemany( - """insert into %s """ % TABLE_NAME + """values (%s, %s, %s)""", + f"insert into {TABLE_NAME} values (%s, %s, %s)", [(1, 1.0, "1.0"), (2, 2.2, "2.2"), (3, 3.3, "3.3")], ) - cursor.execute("""select * from %s""" % TABLE_NAME) + cursor.execute(f"""select * from {TABLE_NAME}""") for row in cursor: pass - cursor.execute("""update %s""" % TABLE_NAME + """ set a=%s, b=%s, """ """c=%s where a=%s""", (4, 4.0, "4.0", 1)) + cursor.execute(f"update {TABLE_NAME} set a=%s, b=%s, c=%s where a=%s", (4, 4.0, "4.0", 1)) - cursor.execute("""delete from %s where a=2""" % TABLE_NAME) - cursor.execute("""drop procedure if exists %s""" % PROCEDURE_NAME) + cursor.execute(f"""delete from {TABLE_NAME} where a=2""") + cursor.execute(f"""drop procedure if exists {PROCEDURE_NAME}""") cursor.execute( - """CREATE PROCEDURE %s AS + f"""CREATE PROCEDURE {PROCEDURE_NAME} AS BEGIN SELECT 'Hello World!'; END""" - % PROCEDURE_NAME ) cursor.callproc(PROCEDURE_NAME) @@ -58,13 +57,13 @@ def execute_db_calls_with_cursor(cursor): _test_scoped_metrics = [ ("Function/pymssql._pymssql:connect", 1), - ("Datastore/statement/MSSQL/%s/select" % TABLE_NAME, 1), - ("Datastore/statement/MSSQL/%s/insert" % TABLE_NAME, 1), - ("Datastore/statement/MSSQL/%s/update" % TABLE_NAME, 1), - ("Datastore/statement/MSSQL/%s/delete" % TABLE_NAME, 1), + (f"Datastore/statement/MSSQL/{TABLE_NAME}/select", 1), + (f"Datastore/statement/MSSQL/{TABLE_NAME}/insert", 1), + (f"Datastore/statement/MSSQL/{TABLE_NAME}/update", 1), + (f"Datastore/statement/MSSQL/{TABLE_NAME}/delete", 1), ("Datastore/operation/MSSQL/drop", 2), ("Datastore/operation/MSSQL/create", 2), - ("Datastore/statement/MSSQL/%s/call" % PROCEDURE_NAME, 1), + (f"Datastore/statement/MSSQL/{PROCEDURE_NAME}/call", 1), ("Datastore/operation/MSSQL/commit", 2), ("Datastore/operation/MSSQL/rollback", 1), ] @@ -74,15 +73,15 @@ def execute_db_calls_with_cursor(cursor): ("Datastore/allOther", 13), ("Datastore/MSSQL/all", 13), ("Datastore/MSSQL/allOther", 13), - ("Datastore/statement/MSSQL/%s/select" % TABLE_NAME, 1), - ("Datastore/statement/MSSQL/%s/insert" % TABLE_NAME, 1), - ("Datastore/statement/MSSQL/%s/update" % TABLE_NAME, 1), - ("Datastore/statement/MSSQL/%s/delete" % TABLE_NAME, 1), + (f"Datastore/statement/MSSQL/{TABLE_NAME}/select", 1), + (f"Datastore/statement/MSSQL/{TABLE_NAME}/insert", 1), + (f"Datastore/statement/MSSQL/{TABLE_NAME}/update", 1), + (f"Datastore/statement/MSSQL/{TABLE_NAME}/delete", 1), ("Datastore/operation/MSSQL/select", 1), ("Datastore/operation/MSSQL/insert", 1), ("Datastore/operation/MSSQL/update", 1), ("Datastore/operation/MSSQL/delete", 1), - ("Datastore/statement/MSSQL/%s/call" % PROCEDURE_NAME, 1), + (f"Datastore/statement/MSSQL/{PROCEDURE_NAME}/call", 1), ("Datastore/operation/MSSQL/call", 1), ("Datastore/operation/MSSQL/drop", 2), ("Datastore/operation/MSSQL/create", 2), diff --git a/tests/datastore_pymysql/test_database.py b/tests/datastore_pymysql/test_database.py index ad4db1d9c1..ef55592f0c 100644 --- a/tests/datastore_pymysql/test_database.py +++ b/tests/datastore_pymysql/test_database.py @@ -25,38 +25,37 @@ from newrelic.api.background_task import background_task DB_SETTINGS = mysql_settings()[0] -TABLE_NAME = "datastore_pymysql_" + DB_SETTINGS["namespace"] -PROCEDURE_NAME = "hello_" + DB_SETTINGS["namespace"] +TABLE_NAME = f"datastore_pymysql_{DB_SETTINGS['namespace']}" +PROCEDURE_NAME = f"hello_{DB_SETTINGS['namespace']}" HOST = instance_hostname(DB_SETTINGS["host"]) PORT = DB_SETTINGS["port"] def execute_db_calls_with_cursor(cursor): - cursor.execute("""drop table if exists %s""" % TABLE_NAME) + cursor.execute(f"""drop table if exists {TABLE_NAME}""") - cursor.execute("""create table %s """ % TABLE_NAME + """(a integer, b real, c text)""") + cursor.execute(f"create table {TABLE_NAME} (a integer, b real, c text)") cursor.executemany( - """insert into %s """ % TABLE_NAME + """values (%s, %s, %s)""", + f"insert into {TABLE_NAME} values (%s, %s, %s)", [(1, 1.0, "1.0"), (2, 2.2, "2.2"), (3, 3.3, "3.3")], ) - cursor.execute("""select * from %s""" % TABLE_NAME) + cursor.execute(f"""select * from {TABLE_NAME}""") for row in cursor: pass - cursor.execute("""update %s""" % TABLE_NAME + """ set a=%s, b=%s, """ """c=%s where a=%s""", (4, 4.0, "4.0", 1)) + cursor.execute(f"update {TABLE_NAME} set a=%s, b=%s, c=%s where a=%s", (4, 4.0, "4.0", 1)) - cursor.execute("""delete from %s where a=2""" % TABLE_NAME) - cursor.execute("""drop procedure if exists %s""" % PROCEDURE_NAME) + cursor.execute(f"""delete from {TABLE_NAME} where a=2""") + cursor.execute(f"""drop procedure if exists {PROCEDURE_NAME}""") cursor.execute( - """CREATE PROCEDURE %s() + f"""CREATE PROCEDURE {PROCEDURE_NAME}() BEGIN SELECT 'Hello World!'; END""" - % PROCEDURE_NAME ) cursor.callproc(PROCEDURE_NAME) @@ -64,13 +63,13 @@ def execute_db_calls_with_cursor(cursor): _test_execute_via_cursor_scoped_metrics = [ ("Function/pymysql:Connect", 1), - ("Datastore/statement/MySQL/%s/select" % TABLE_NAME, 1), - ("Datastore/statement/MySQL/%s/insert" % TABLE_NAME, 1), - ("Datastore/statement/MySQL/%s/update" % TABLE_NAME, 1), - ("Datastore/statement/MySQL/%s/delete" % TABLE_NAME, 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/select", 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/insert", 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/update", 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/delete", 1), ("Datastore/operation/MySQL/drop", 2), ("Datastore/operation/MySQL/create", 2), - ("Datastore/statement/MySQL/%s/call" % PROCEDURE_NAME, 1), + (f"Datastore/statement/MySQL/{PROCEDURE_NAME}/call", 1), ("Datastore/operation/MySQL/commit", 2), ("Datastore/operation/MySQL/rollback", 1), ] @@ -80,21 +79,21 @@ def execute_db_calls_with_cursor(cursor): ("Datastore/allOther", 13), ("Datastore/MySQL/all", 13), ("Datastore/MySQL/allOther", 13), - ("Datastore/statement/MySQL/%s/select" % TABLE_NAME, 1), - ("Datastore/statement/MySQL/%s/insert" % TABLE_NAME, 1), - ("Datastore/statement/MySQL/%s/update" % TABLE_NAME, 1), - ("Datastore/statement/MySQL/%s/delete" % TABLE_NAME, 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/select", 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/insert", 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/update", 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/delete", 1), ("Datastore/operation/MySQL/select", 1), ("Datastore/operation/MySQL/insert", 1), ("Datastore/operation/MySQL/update", 1), ("Datastore/operation/MySQL/delete", 1), - ("Datastore/statement/MySQL/%s/call" % PROCEDURE_NAME, 1), + (f"Datastore/statement/MySQL/{PROCEDURE_NAME}/call", 1), ("Datastore/operation/MySQL/call", 1), ("Datastore/operation/MySQL/drop", 2), ("Datastore/operation/MySQL/create", 2), ("Datastore/operation/MySQL/commit", 2), ("Datastore/operation/MySQL/rollback", 1), - ("Datastore/instance/MySQL/%s/%s" % (HOST, PORT), 12), + (f"Datastore/instance/MySQL/{HOST}/{PORT}", 12), ] @@ -125,13 +124,13 @@ def test_execute_via_cursor(): _test_execute_via_cursor_context_mangaer_scoped_metrics = [ ("Function/pymysql:Connect", 1), - ("Datastore/statement/MySQL/%s/select" % TABLE_NAME, 1), - ("Datastore/statement/MySQL/%s/insert" % TABLE_NAME, 1), - ("Datastore/statement/MySQL/%s/update" % TABLE_NAME, 1), - ("Datastore/statement/MySQL/%s/delete" % TABLE_NAME, 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/select", 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/insert", 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/update", 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/delete", 1), ("Datastore/operation/MySQL/drop", 2), ("Datastore/operation/MySQL/create", 2), - ("Datastore/statement/MySQL/%s/call" % PROCEDURE_NAME, 1), + (f"Datastore/statement/MySQL/{PROCEDURE_NAME}/call", 1), ("Datastore/operation/MySQL/commit", 2), ("Datastore/operation/MySQL/rollback", 1), ] @@ -141,21 +140,21 @@ def test_execute_via_cursor(): ("Datastore/allOther", 13), ("Datastore/MySQL/all", 13), ("Datastore/MySQL/allOther", 13), - ("Datastore/statement/MySQL/%s/select" % TABLE_NAME, 1), - ("Datastore/statement/MySQL/%s/insert" % TABLE_NAME, 1), - ("Datastore/statement/MySQL/%s/update" % TABLE_NAME, 1), - ("Datastore/statement/MySQL/%s/delete" % TABLE_NAME, 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/select", 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/insert", 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/update", 1), + (f"Datastore/statement/MySQL/{TABLE_NAME}/delete", 1), ("Datastore/operation/MySQL/select", 1), ("Datastore/operation/MySQL/insert", 1), ("Datastore/operation/MySQL/update", 1), ("Datastore/operation/MySQL/delete", 1), - ("Datastore/statement/MySQL/%s/call" % PROCEDURE_NAME, 1), + (f"Datastore/statement/MySQL/{PROCEDURE_NAME}/call", 1), ("Datastore/operation/MySQL/call", 1), ("Datastore/operation/MySQL/drop", 2), ("Datastore/operation/MySQL/create", 2), ("Datastore/operation/MySQL/commit", 2), ("Datastore/operation/MySQL/rollback", 1), - ("Datastore/instance/MySQL/%s/%s" % (HOST, PORT), 12), + (f"Datastore/instance/MySQL/{HOST}/{PORT}", 12), ] diff --git a/tests/datastore_pyodbc/test_pyodbc.py b/tests/datastore_pyodbc/test_pyodbc.py index 5a810be5f4..96f9b12416 100644 --- a/tests/datastore_pyodbc/test_pyodbc.py +++ b/tests/datastore_pyodbc/test_pyodbc.py @@ -56,20 +56,20 @@ def test_execute_via_cursor(pyodbc_driver): ) ) as connection: cursor = connection.cursor() - cursor.execute("""drop table if exists %s""" % DB_SETTINGS["table_name"]) - cursor.execute("""create table %s """ % DB_SETTINGS["table_name"] + """(a integer, b real, c text)""") + cursor.execute(f"""drop table if exists {DB_SETTINGS['table_name']}""") + cursor.execute(f"create table {DB_SETTINGS['table_name']} (a integer, b real, c text)") cursor.executemany( - """insert into %s """ % DB_SETTINGS["table_name"] + """values (?, ?, ?)""", + f"insert into {DB_SETTINGS['table_name']} values (?, ?, ?)", [(1, 1.0, "1.0"), (2, 2.2, "2.2"), (3, 3.3, "3.3")], ) - cursor.execute("""select * from %s""" % DB_SETTINGS["table_name"]) + cursor.execute(f"""select * from {DB_SETTINGS['table_name']}""") for row in cursor: pass cursor.execute( - """update %s """ % DB_SETTINGS["table_name"] + """set a=?, b=?, c=? where a=?""", + f"update {DB_SETTINGS['table_name']} set a=?, b=?, c=? where a=?", (4, 4.0, "4.0", 1), ) - cursor.execute("""delete from %s where a=2""" % DB_SETTINGS["table_name"]) + cursor.execute(f"""delete from {DB_SETTINGS['table_name']} where a=2""") connection.commit() cursor.execute("SELECT now()") diff --git a/tests/datastore_pysolr/test_solr.py b/tests/datastore_pysolr/test_solr.py index e17117117e..b47fa4e11b 100644 --- a/tests/datastore_pysolr/test_solr.py +++ b/tests/datastore_pysolr/test_solr.py @@ -24,21 +24,21 @@ DB_SETTINGS = solr_settings()[0] SOLR_HOST = DB_SETTINGS["host"] SOLR_PORT = DB_SETTINGS["port"] -SOLR_URL = "http://%s:%s/solr/collection" % (DB_SETTINGS["host"], DB_SETTINGS["port"]) +SOLR_URL = f"http://{DB_SETTINGS['host']}:{DB_SETTINGS['port']}/solr/collection" def _exercise_solr(solr): # Construct document names within namespace documents = ["pysolr_doc_1", "pysolr_doc_2"] - documents = [x + "_" + DB_SETTINGS["namespace"] for x in documents] + documents = [f"{x}_{DB_SETTINGS['namespace']}" for x in documents] solr.add([{"id": x} for x in documents]) - solr.search("id:%s" % documents[0]) + solr.search(f"id:{documents[0]}") solr.delete(id=documents[0]) # Delete all documents. - solr.delete(q="id:*_%s" % DB_SETTINGS["namespace"]) + solr.delete(q=f"id:*_{DB_SETTINGS['namespace']}") _test_solr_search_scoped_metrics = [ diff --git a/tests/datastore_redis/test_asyncio.py b/tests/datastore_redis/test_asyncio.py index f46e8515e5..47eec2d1d9 100644 --- a/tests/datastore_redis/test_asyncio.py +++ b/tests/datastore_redis/test_asyncio.py @@ -48,7 +48,7 @@ ("Datastore/Redis/allOther", datastore_all_metric_count), ("Datastore/operation/Redis/publish", 3), ( - "Datastore/instance/Redis/%s/%s" % (instance_hostname(DB_SETTINGS["host"]), DB_SETTINGS["port"]), + f"Datastore/instance/Redis/{instance_hostname(DB_SETTINGS['host'])}/{DB_SETTINGS['port']}", datastore_all_metric_count, ), ] @@ -74,7 +74,7 @@ ("Datastore/operation/Redis/get", 1), ("Datastore/operation/Redis/set", 1), ("Datastore/operation/Redis/client_list", 1), - ("Datastore/instance/Redis/%s/%s" % (instance_hostname(DB_SETTINGS["host"]), DB_SETTINGS["port"]), 3), + (f"Datastore/instance/Redis/{instance_hostname(DB_SETTINGS['host'])}/{DB_SETTINGS['port']}", 3), ] diff --git a/tests/datastore_redis/test_custom_conn_pool.py b/tests/datastore_redis/test_custom_conn_pool.py index 42e5e08921..70954f2ce3 100644 --- a/tests/datastore_redis/test_custom_conn_pool.py +++ b/tests/datastore_redis/test_custom_conn_pool.py @@ -92,7 +92,7 @@ def disconnect(self): _host = instance_hostname(DB_SETTINGS["host"]) _port = DB_SETTINGS["port"] -_instance_metric_name = "Datastore/instance/Redis/%s/%s" % (_host, _port) +_instance_metric_name = f"Datastore/instance/Redis/{_host}/{_port}" instance_metric_count = 5 if REDIS_PY_VERSION >= (5, 0) else 3 diff --git a/tests/datastore_redis/test_execute_command.py b/tests/datastore_redis/test_execute_command.py index 741bc50345..ebc52d32e0 100644 --- a/tests/datastore_redis/test_execute_command.py +++ b/tests/datastore_redis/test_execute_command.py @@ -57,7 +57,7 @@ _host = instance_hostname(DB_SETTINGS['host']) _port = DB_SETTINGS['port'] -_instance_metric_name = 'Datastore/instance/Redis/%s/%s' % (_host, _port) +_instance_metric_name = f'Datastore/instance/Redis/{_host}/{_port}' instance_metric_count = 3 if REDIS_PY_VERSION >= (5, 0) else 1 diff --git a/tests/datastore_redis/test_generators.py b/tests/datastore_redis/test_generators.py index f747838e19..13593c1576 100644 --- a/tests/datastore_redis/test_generators.py +++ b/tests/datastore_redis/test_generators.py @@ -72,7 +72,7 @@ _host = instance_hostname(DB_SETTINGS["host"]) _port = DB_SETTINGS["port"] -_instance_metric_name = "Datastore/instance/Redis/%s/%s" % (_host, _port) +_instance_metric_name = f"Datastore/instance/Redis/{_host}/{_port}" _enable_rollup_metrics.append((_instance_metric_name, 8)) diff --git a/tests/datastore_redis/test_get_and_set.py b/tests/datastore_redis/test_get_and_set.py index 720433ae3f..3a38ef37e1 100644 --- a/tests/datastore_redis/test_get_and_set.py +++ b/tests/datastore_redis/test_get_and_set.py @@ -54,7 +54,7 @@ _host = instance_hostname(DB_SETTINGS['host']) _port = DB_SETTINGS['port'] -_instance_metric_name = 'Datastore/instance/Redis/%s/%s' % (_host, _port) +_instance_metric_name = f'Datastore/instance/Redis/{_host}/{_port}' _enable_rollup_metrics.append( (_instance_metric_name, 2) diff --git a/tests/datastore_redis/test_multiple_dbs.py b/tests/datastore_redis/test_multiple_dbs.py index 9a5e299f04..f183e5cc17 100644 --- a/tests/datastore_redis/test_multiple_dbs.py +++ b/tests/datastore_redis/test_multiple_dbs.py @@ -74,8 +74,8 @@ host_2 = instance_hostname(redis_2['host']) port_2 = redis_2['port'] - instance_metric_name_1 = 'Datastore/instance/Redis/%s/%s' % (host_1, port_1) - instance_metric_name_2 = 'Datastore/instance/Redis/%s/%s' % (host_2, port_2) + instance_metric_name_1 = f'Datastore/instance/Redis/{host_1}/{port_1}' + instance_metric_name_2 = f'Datastore/instance/Redis/{host_2}/{port_2}' instance_metric_name_1_count = 2 if REDIS_PY_VERSION >= (5, 0) else 2 instance_metric_name_2_count = 3 if REDIS_PY_VERSION >= (5, 0) else 1 diff --git a/tests/datastore_redis/test_span_event.py b/tests/datastore_redis/test_span_event.py index 27103e971f..36293a6f8b 100644 --- a/tests/datastore_redis/test_span_event.py +++ b/tests/datastore_redis/test_span_event.py @@ -78,7 +78,7 @@ def test_span_events(instance_enabled, db_instance_enabled): settings = _enable_instance_settings.copy() hostname = instance_hostname(DB_SETTINGS['host']) exact_agents.update({ - 'peer.address': '%s:%s' % (hostname, DB_SETTINGS['port']), + 'peer.address': f"{hostname}:{DB_SETTINGS['port']}", 'peer.hostname': hostname, }) else: diff --git a/tests/datastore_redis/test_uninstrumented_methods.py b/tests/datastore_redis/test_uninstrumented_methods.py index c0be684b2f..57dd749ec3 100644 --- a/tests/datastore_redis/test_uninstrumented_methods.py +++ b/tests/datastore_redis/test_uninstrumented_methods.py @@ -117,4 +117,4 @@ def test_uninstrumented_methods(client): is_wrapped = lambda m: hasattr(getattr(module_client, m), "__wrapped__") uninstrumented |= {m for m in module_methods - IGNORED_METHODS if not is_wrapped(m)} - assert not uninstrumented, "Uninstrumented methods: %s" % sorted(uninstrumented) + assert not uninstrumented, f"Uninstrumented methods: {sorted(uninstrumented)}" diff --git a/tests/datastore_rediscluster/test_uninstrumented_rediscluster_methods.py b/tests/datastore_rediscluster/test_uninstrumented_rediscluster_methods.py index ae211aa318..d4c02c690c 100644 --- a/tests/datastore_rediscluster/test_uninstrumented_rediscluster_methods.py +++ b/tests/datastore_rediscluster/test_uninstrumented_rediscluster_methods.py @@ -165,4 +165,4 @@ def test_uninstrumented_methods(): is_wrapped = lambda m: hasattr(getattr(module_client, m), "__wrapped__") uninstrumented |= {m for m in module_methods - IGNORED_METHODS if not is_wrapped(m)} - assert not uninstrumented, "Uninstrumented methods: %s" % sorted(uninstrumented) + assert not uninstrumented, f"Uninstrumented methods: {sorted(uninstrumented)}" diff --git a/tests/datastore_solrpy/test_solr.py b/tests/datastore_solrpy/test_solr.py index 56dcce62bf..3e02f0e401 100644 --- a/tests/datastore_solrpy/test_solr.py +++ b/tests/datastore_solrpy/test_solr.py @@ -24,18 +24,18 @@ DB_SETTINGS = solr_settings()[0] SOLR_HOST = DB_SETTINGS["host"] SOLR_PORT = DB_SETTINGS["port"] -SOLR_URL = "http://%s:%s/solr/collection" % (DB_SETTINGS["host"], DB_SETTINGS["port"]) +SOLR_URL = f"http://{DB_SETTINGS['host']}:{DB_SETTINGS['port']}/solr/collection" def _exercise_solr(solr): # Construct document names within namespace documents = ["pysolr_doc_1", "pysolr_doc_2"] - documents = [x + "_" + DB_SETTINGS["namespace"] for x in documents] + documents = [f"{x}_{DB_SETTINGS['namespace']}" for x in documents] solr.add_many([{"id": x} for x in documents]) solr.commit() - solr.query("id:%s" % documents[0]).results - solr.delete("id:*_%s" % DB_SETTINGS["namespace"]) + solr.query(f"id:{documents[0]}").results + solr.delete(f"id:*_{DB_SETTINGS['namespace']}") solr.commit() @@ -51,7 +51,7 @@ def _exercise_solr(solr): ("Datastore/allOther", 5), ("Datastore/Solr/all", 5), ("Datastore/Solr/allOther", 5), - ("Datastore/instance/Solr/%s/%s" % (instance_hostname(SOLR_HOST), SOLR_PORT), 3), + (f"Datastore/instance/Solr/{instance_hostname(SOLR_HOST)}/{SOLR_PORT}", 3), ("Datastore/operation/Solr/add_many", 1), ("Datastore/operation/Solr/query", 1), ("Datastore/operation/Solr/commit", 2), diff --git a/tests/external_aiobotocore/test_aiobotocore_dynamodb.py b/tests/external_aiobotocore/test_aiobotocore_dynamodb.py index a38cb384a9..e4b59513e7 100644 --- a/tests/external_aiobotocore/test_aiobotocore_dynamodb.py +++ b/tests/external_aiobotocore/test_aiobotocore_dynamodb.py @@ -30,14 +30,14 @@ TEST_TABLE = "python-agent-test" _dynamodb_scoped_metrics = [ - ("Datastore/statement/DynamoDB/%s/create_table" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/put_item" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/get_item" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/update_item" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/query" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/scan" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/delete_item" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/delete_table" % TEST_TABLE, 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/create_table", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/put_item", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/get_item", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/update_item", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/query", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/scan", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/delete_item", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/delete_table", 1), ] _dynamodb_rollup_metrics = [ @@ -75,7 +75,7 @@ async def _test(): async with session.create_client( "dynamodb", region_name="us-east-1", - endpoint_url="http://localhost:%d" % PORT, + endpoint_url=f"http://localhost:{PORT}", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, ) as client: diff --git a/tests/external_aiobotocore/test_aiobotocore_s3.py b/tests/external_aiobotocore/test_aiobotocore_s3.py index 7db5379c4d..8c8e7eeab8 100644 --- a/tests/external_aiobotocore/test_aiobotocore_s3.py +++ b/tests/external_aiobotocore/test_aiobotocore_s3.py @@ -30,25 +30,25 @@ TEST_BUCKET = "python-agent-test" FILENAME = "dummy.bin" FOLDER = "aiobotocore" -ENDPOINT = "localhost:%s" % PORT -KEY = "{}/{}".format(FOLDER, FILENAME) -EXPECTED_BUCKET_URL = "http://%s/%s" % (ENDPOINT, TEST_BUCKET) -EXPECTED_KEY_URL = EXPECTED_BUCKET_URL + "/" + KEY +ENDPOINT = f"localhost:{PORT}" +KEY = f"{FOLDER}/{FILENAME}" +EXPECTED_BUCKET_URL = f"http://{ENDPOINT}/{TEST_BUCKET}" +EXPECTED_KEY_URL = f"{EXPECTED_BUCKET_URL}/{KEY}" _s3_scoped_metrics = [ - ("External/%s/aiobotocore/GET" % ENDPOINT, 5), - ("External/%s/aiobotocore/PUT" % ENDPOINT, 2), - ("External/%s/aiobotocore/DELETE" % ENDPOINT, 2), + (f"External/{ENDPOINT}/aiobotocore/GET", 5), + (f"External/{ENDPOINT}/aiobotocore/PUT", 2), + (f"External/{ENDPOINT}/aiobotocore/DELETE", 2), ] _s3_rollup_metrics = [ ("External/all", 9), ("External/allOther", 9), - ("External/%s/all" % ENDPOINT, 9), - ("External/%s/aiobotocore/GET" % ENDPOINT, 5), - ("External/%s/aiobotocore/PUT" % ENDPOINT, 2), - ("External/%s/aiobotocore/DELETE" % ENDPOINT, 2), + (f"External/{ENDPOINT}/all", 9), + (f"External/{ENDPOINT}/aiobotocore/GET", 5), + (f"External/{ENDPOINT}/aiobotocore/PUT", 2), + (f"External/{ENDPOINT}/aiobotocore/DELETE", 2), ] @@ -78,7 +78,7 @@ async def _test(): async with session.create_client( # nosec "s3", region_name="us-east-1", - endpoint_url="http://localhost:%d" % PORT, + endpoint_url=f"http://localhost:{PORT}", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, ) as client: diff --git a/tests/external_aiobotocore/test_aiobotocore_sns.py b/tests/external_aiobotocore/test_aiobotocore_sns.py index 29ae3a87b3..31e0db92f7 100644 --- a/tests/external_aiobotocore/test_aiobotocore_sns.py +++ b/tests/external_aiobotocore/test_aiobotocore_sns.py @@ -29,7 +29,7 @@ TOPIC = "arn:aws:sns:us-east-1:123456789012:some-topic" sns_metrics = [ - ("MessageBroker/SNS/Topic/Produce/Named/%s" % TOPIC, 1), + (f"MessageBroker/SNS/Topic/Produce/Named/{TOPIC}", 1), ("MessageBroker/SNS/Topic/Produce/Named/PhoneNumber", 1), ] @@ -53,7 +53,7 @@ async def _test(): async with session.create_client( "sns", region_name="us-east-1", - endpoint_url="http://localhost:%d" % PORT, + endpoint_url=f"http://localhost:{PORT}", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, ) as client: diff --git a/tests/external_aiobotocore/test_aiobotocore_sqs.py b/tests/external_aiobotocore/test_aiobotocore_sqs.py index 6d3acba65d..bb76a0d12e 100644 --- a/tests/external_aiobotocore/test_aiobotocore_sqs.py +++ b/tests/external_aiobotocore/test_aiobotocore_sqs.py @@ -27,21 +27,21 @@ from newrelic.api.background_task import background_task -URL = "localhost:%s" % PORT +URL = f"localhost:{PORT}" TEST_QUEUE = "python-agent-test" _sqs_scoped_metrics = [ - ("MessageBroker/SQS/Queue/Produce/Named/%s" % TEST_QUEUE, 2), - ("External/%s/aiobotocore/POST" % URL, 7), + (f"MessageBroker/SQS/Queue/Produce/Named/{TEST_QUEUE}", 2), + (f"External/{URL}/aiobotocore/POST", 7), ] _sqs_rollup_metrics = [ - ("MessageBroker/SQS/Queue/Produce/Named/%s" % TEST_QUEUE, 2), - ("MessageBroker/SQS/Queue/Consume/Named/%s" % TEST_QUEUE, 1), + (f"MessageBroker/SQS/Queue/Produce/Named/{TEST_QUEUE}", 2), + (f"MessageBroker/SQS/Queue/Consume/Named/{TEST_QUEUE}", 1), ("External/all", 7), ("External/allOther", 7), - ("External/%s/all" % URL, 7), - ("External/%s/aiobotocore/POST" % URL, 7), + (f"External/{URL}/all", 7), + (f"External/{URL}/aiobotocore/POST", 7), ] @@ -67,7 +67,7 @@ async def _test(): async with session.create_client( "sqs", region_name="us-east-1", - endpoint_url="http://localhost:%d" % PORT, + endpoint_url=f"http://localhost:{PORT}", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, ) as client: diff --git a/tests/external_botocore/_mock_bedrock_encoding_utils.py b/tests/external_botocore/_mock_bedrock_encoding_utils.py index 36bdbebe84..6144ebb6a8 100644 --- a/tests/external_botocore/_mock_bedrock_encoding_utils.py +++ b/tests/external_botocore/_mock_bedrock_encoding_utils.py @@ -24,7 +24,7 @@ def crc(b): def int_to_escaped_bytes(i, num_bytes=1): """Convert an integer into an arbitrary number of bytes.""" - return bytes.fromhex(("{:0" + str(num_bytes * 2) + "x}").format(i)) + return bytes.fromhex(f"{{:0{str(num_bytes * 2)}x}}".format(i)) def encode_headers(headers): @@ -32,7 +32,7 @@ def encode_headers(headers): new_headers = [] for h, v in headers.items(): if not h.startswith(":"): - h = ":%s" % h + h = f":{h}" h = h.encode("utf-8") v = v.encode("utf-8") new_headers.append(b"".join((int_to_escaped_bytes(len(h)), h, b"\x07\x00", int_to_escaped_bytes(len(v)), v))) diff --git a/tests/external_botocore/_mock_external_bedrock_server.py b/tests/external_botocore/_mock_external_bedrock_server.py index e140d80c8e..5974cdb406 100644 --- a/tests/external_botocore/_mock_external_bedrock_server.py +++ b/tests/external_botocore/_mock_external_bedrock_server.py @@ -6689,7 +6689,7 @@ def simple_get(self): # If no matches found self.send_response(500) self.end_headers() - self.wfile.write(("Unknown Prompt:\n%s" % prompt).encode("utf-8")) + self.wfile.write(f"Unknown Prompt:\n{prompt}".encode("utf-8")) return if stream: @@ -6733,7 +6733,7 @@ def extract_shortened_prompt(content, model): prompt = content.get("inputText", "") or content.get("prompt", "") or content.get("texts", [""])[0] # Sometimes there are leading whitespaces in the prompt. prompt = prompt.strip() - prompt = "::".join((model, prompt)) # Prepend model name to prompt key to keep separate copies + prompt = f"{model}::{prompt}" # Prepend model name to prompt key to keep separate copies return prompt.lstrip().split("\n")[0] @@ -6748,9 +6748,9 @@ def __init__(self, handler=simple_get, port=None, *args, **kwargs): if __name__ == "__main__": # Use this to sort dict for easier future incremental updates - print("RESPONSES = %s" % dict(sorted(RESPONSES.items(), key=lambda i: (i[1][1], i[0])))) + print(f"RESPONSES = {dict(sorted(RESPONSES.items(), key=lambda i: (i[1][1], i[0])))}") with MockExternalBedrockServer() as server: - print("MockExternalBedrockServer serving on port %s" % str(server.port)) + print(f"MockExternalBedrockServer serving on port {str(server.port)}") while True: pass # Serve forever diff --git a/tests/external_botocore/conftest.py b/tests/external_botocore/conftest.py index 026ece7689..08ed863818 100644 --- a/tests/external_botocore/conftest.py +++ b/tests/external_botocore/conftest.py @@ -81,7 +81,7 @@ def bedrock_server(): client = boto3.client( # nosec "bedrock-runtime", "us-east-1", - endpoint_url="http://localhost:%d" % server.port, + endpoint_url=f"http://localhost:{server.port}", aws_access_key_id="NOT-A-REAL-SECRET", aws_secret_access_key="NOT-A-REAL-SECRET", ) diff --git a/tests/external_botocore/test_bedrock_chat_completion.py b/tests/external_botocore/test_bedrock_chat_completion.py index 7cab446348..460b26b7d6 100644 --- a/tests/external_botocore/test_bedrock_chat_completion.py +++ b/tests/external_botocore/test_bedrock_chat_completion.py @@ -169,7 +169,7 @@ def test_bedrock_chat_completion_in_txn_with_llm_metadata( scoped_metrics=expected_metrics, rollup_metrics=expected_metrics, custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -196,7 +196,7 @@ def test_bedrock_chat_completion_no_content(set_trace_info, exercise_model, expe scoped_metrics=expected_metrics, rollup_metrics=expected_metrics, custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -223,7 +223,7 @@ def test_bedrock_chat_completion_with_token_count(set_trace_info, exercise_model scoped_metrics=expected_metrics, rollup_metrics=expected_metrics, custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -249,7 +249,7 @@ def test_bedrock_chat_completion_no_llm_metadata(set_trace_info, exercise_model, scoped_metrics=expected_metrics, rollup_metrics=expected_metrics, custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -290,7 +290,7 @@ def test_bedrock_chat_completion_streaming_disabled( scoped_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], rollup_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -338,7 +338,7 @@ def test_bedrock_chat_completion_error_invalid_model( scoped_metrics=expected_metrics, rollup_metrics=expected_metrics, custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -403,7 +403,7 @@ def test_bedrock_chat_completion_error_incorrect_access_key( scoped_metrics=expected_metrics, rollup_metrics=expected_metrics, custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -456,7 +456,7 @@ def test_bedrock_chat_completion_error_incorrect_access_key_no_content( scoped_metrics=expected_metrics, rollup_metrics=expected_metrics, custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -503,7 +503,7 @@ def test_bedrock_chat_completion_error_incorrect_access_key_with_token( scoped_metrics=expected_metrics, rollup_metrics=expected_metrics, custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -556,7 +556,7 @@ def test_bedrock_chat_completion_error_malformed_request_body( scoped_metrics=expected_metrics, rollup_metrics=expected_metrics, custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -607,7 +607,7 @@ def test_bedrock_chat_completion_error_malformed_response_body( scoped_metrics=[("Llm/completion/Bedrock/invoke_model", 1)], rollup_metrics=[("Llm/completion/Bedrock/invoke_model", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -651,7 +651,7 @@ def test_bedrock_chat_completion_error_malformed_response_streaming_body( scoped_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], rollup_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -715,7 +715,7 @@ def test_bedrock_chat_completion_error_malformed_response_streaming_chunk( scoped_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], rollup_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -783,7 +783,7 @@ def test_bedrock_chat_completion_error_streaming_exception( scoped_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], rollup_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -844,7 +844,7 @@ def test_bedrock_chat_completion_error_streaming_exception_no_content( scoped_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], rollup_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -905,7 +905,7 @@ def test_bedrock_chat_completion_error_streaming_exception_with_token_count( scoped_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], rollup_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -955,4 +955,4 @@ def test_chat_models_instrumented(): if not is_supported: not_supported.append(model) - assert not not_supported, "The following unsupported models were found: %s" % not_supported + assert not not_supported, f"The following unsupported models were found: {not_supported}" diff --git a/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py b/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py index 00be00e17a..5f9b87b82f 100644 --- a/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py +++ b/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py @@ -113,7 +113,7 @@ def test_bedrock_chat_completion_in_txn_with_llm_metadata( scoped_metrics=expected_metrics, rollup_metrics=expected_metrics, custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) diff --git a/tests/external_botocore/test_bedrock_embeddings.py b/tests/external_botocore/test_bedrock_embeddings.py index 0c9e45244a..8ed17fa4f7 100644 --- a/tests/external_botocore/test_bedrock_embeddings.py +++ b/tests/external_botocore/test_bedrock_embeddings.py @@ -117,7 +117,7 @@ def test_bedrock_embedding_with_llm_metadata(set_trace_info, exercise_model, exp scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -143,7 +143,7 @@ def test_bedrock_embedding_no_content(set_trace_info, exercise_model, model_id): scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -168,7 +168,7 @@ def test_bedrock_embedding_no_llm_metadata(set_trace_info, exercise_model, expec scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -190,7 +190,7 @@ def test_bedrock_embedding_with_token_count(set_trace_info, exercise_model, expe scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -258,7 +258,7 @@ def test_bedrock_embedding_error_incorrect_access_key( scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -395,7 +395,7 @@ def test_bedrock_embedding_error_malformed_request_body( scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -438,7 +438,7 @@ def test_bedrock_embedding_error_malformed_response_body( scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], custom_metrics=[ - ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + (f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1), ], background_task=True, ) @@ -482,4 +482,4 @@ def test_embedding_models_instrumented(): if not is_supported: not_supported.append(model) - assert not not_supported, "The following unsupported models were found: %s" % not_supported + assert not not_supported, f"The following unsupported models were found: {not_supported}" diff --git a/tests/external_botocore/test_boto3_iam.py b/tests/external_botocore/test_boto3_iam.py index 7fc927d4f6..ae1f5e466f 100644 --- a/tests/external_botocore/test_boto3_iam.py +++ b/tests/external_botocore/test_boto3_iam.py @@ -33,7 +33,7 @@ AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec (This is fine for testing purposes) -TEST_USER = "python-agent-test-%s" % uuid.uuid4() +TEST_USER = f"python-agent-test-{uuid.uuid4()}" _iam_scoped_metrics = [ ("External/iam.amazonaws.com/botocore/POST", 3), diff --git a/tests/external_botocore/test_boto3_s3.py b/tests/external_botocore/test_boto3_s3.py index b69f592ae1..e2c57b2eb2 100644 --- a/tests/external_botocore/test_boto3_s3.py +++ b/tests/external_botocore/test_boto3_s3.py @@ -33,35 +33,35 @@ AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec AWS_REGION_NAME = "us-west-2" -TEST_BUCKET = "python-agent-test-%s" % uuid.uuid4() +TEST_BUCKET = f"python-agent-test-{uuid.uuid4()}" if BOTOCORE_VERSION < (1, 7, 41): S3_URL = "s3-us-west-2.amazonaws.com" - EXPECTED_BUCKET_URL = "https://%s/%s" % (S3_URL, TEST_BUCKET) - EXPECTED_KEY_URL = EXPECTED_BUCKET_URL + "/hello_world" + EXPECTED_BUCKET_URL = f"https://{S3_URL}/{TEST_BUCKET}" + EXPECTED_KEY_URL = f"{EXPECTED_BUCKET_URL}/hello_world" elif BOTOCORE_VERSION < (1, 28): S3_URL = "s3.us-west-2.amazonaws.com" - EXPECTED_BUCKET_URL = "https://%s/%s" % (S3_URL, TEST_BUCKET) - EXPECTED_KEY_URL = EXPECTED_BUCKET_URL + "/hello_world" + EXPECTED_BUCKET_URL = f"https://{S3_URL}/{TEST_BUCKET}" + EXPECTED_KEY_URL = f"{EXPECTED_BUCKET_URL}/hello_world" else: - S3_URL = "%s.s3.us-west-2.amazonaws.com" % TEST_BUCKET - EXPECTED_BUCKET_URL = "https://%s/" % S3_URL - EXPECTED_KEY_URL = EXPECTED_BUCKET_URL + "hello_world" + S3_URL = f"{TEST_BUCKET}.s3.us-west-2.amazonaws.com" + EXPECTED_BUCKET_URL = f"https://{S3_URL}/" + EXPECTED_KEY_URL = f"{EXPECTED_BUCKET_URL}hello_world" _s3_scoped_metrics = [ - ("External/%s/botocore/GET" % S3_URL, 2), - ("External/%s/botocore/PUT" % S3_URL, 2), - ("External/%s/botocore/DELETE" % S3_URL, 2), + (f"External/{S3_URL}/botocore/GET", 2), + (f"External/{S3_URL}/botocore/PUT", 2), + (f"External/{S3_URL}/botocore/DELETE", 2), ] _s3_rollup_metrics = [ ("External/all", 6), ("External/allOther", 6), - ("External/%s/all" % S3_URL, 6), - ("External/%s/botocore/GET" % S3_URL, 2), - ("External/%s/botocore/PUT" % S3_URL, 2), - ("External/%s/botocore/DELETE" % S3_URL, 2), + (f"External/{S3_URL}/all", 6), + (f"External/{S3_URL}/botocore/GET", 2), + (f"External/{S3_URL}/botocore/PUT", 2), + (f"External/{S3_URL}/botocore/DELETE", 2), ] diff --git a/tests/external_botocore/test_boto3_sns.py b/tests/external_botocore/test_boto3_sns.py index 4e9cdf4d6a..141d675670 100644 --- a/tests/external_botocore/test_boto3_sns.py +++ b/tests/external_botocore/test_boto3_sns.py @@ -33,7 +33,7 @@ AWS_REGION_NAME = "us-east-1" SNS_URL = "sns-us-east-1.amazonaws.com" TOPIC = "arn:aws:sns:us-east-1:123456789012:some-topic" -sns_metrics = [("MessageBroker/SNS/Topic" "/Produce/Named/%s" % TOPIC, 1)] +sns_metrics = [(f"MessageBroker/SNS/Topic/Produce/Named/{TOPIC}", 1)] sns_metrics_phone = [("MessageBroker/SNS/Topic" "/Produce/Named/PhoneNumber", 1)] diff --git a/tests/external_botocore/test_botocore_dynamodb.py b/tests/external_botocore/test_botocore_dynamodb.py index db4aeabe17..ab4ea5d12c 100644 --- a/tests/external_botocore/test_botocore_dynamodb.py +++ b/tests/external_botocore/test_botocore_dynamodb.py @@ -33,18 +33,18 @@ AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec (This is fine for testing purposes) AWS_REGION = "us-east-1" -TEST_TABLE = "python-agent-test-%s" % uuid.uuid4() +TEST_TABLE = f"python-agent-test-{uuid.uuid4()}" _dynamodb_scoped_metrics = [ - ("Datastore/statement/DynamoDB/%s/create_table" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/put_item" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/get_item" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/update_item" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/query" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/scan" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/delete_item" % TEST_TABLE, 1), - ("Datastore/statement/DynamoDB/%s/delete_table" % TEST_TABLE, 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/create_table", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/put_item", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/get_item", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/update_item", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/query", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/scan", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/delete_item", 1), + (f"Datastore/statement/DynamoDB/{TEST_TABLE}/delete_table", 1), ] _dynamodb_rollup_metrics = [ diff --git a/tests/external_botocore/test_botocore_ec2.py b/tests/external_botocore/test_botocore_ec2.py index 6d965e27f5..4154f61a26 100644 --- a/tests/external_botocore/test_botocore_ec2.py +++ b/tests/external_botocore/test_botocore_ec2.py @@ -34,7 +34,7 @@ AWS_REGION = "us-east-1" UBUNTU_14_04_PARAVIRTUAL_AMI = "ami-c65be9ae" -TEST_INSTANCE = "python-agent-test-%s" % uuid.uuid4() +TEST_INSTANCE = f"python-agent-test-{uuid.uuid4()}" _ec2_scoped_metrics = [ ("External/ec2.us-east-1.amazonaws.com/botocore/POST", 3), diff --git a/tests/external_botocore/test_botocore_s3.py b/tests/external_botocore/test_botocore_s3.py index a414646f3b..2805c343bd 100644 --- a/tests/external_botocore/test_botocore_s3.py +++ b/tests/external_botocore/test_botocore_s3.py @@ -33,30 +33,30 @@ AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec AWS_REGION = "us-east-1" -TEST_BUCKET = "python-agent-test-%s" % uuid.uuid4() +TEST_BUCKET = f"python-agent-test-{uuid.uuid4()}" if BOTOCORE_VERSION >= (1, 28): - S3_URL = "%s.s3.amazonaws.com" % TEST_BUCKET - EXPECTED_BUCKET_URL = "https://%s/" % S3_URL - EXPECTED_KEY_URL = EXPECTED_BUCKET_URL + "hello_world" + S3_URL = f"{TEST_BUCKET}.s3.amazonaws.com" + EXPECTED_BUCKET_URL = f"https://{S3_URL}/" + EXPECTED_KEY_URL = f"{EXPECTED_BUCKET_URL}hello_world" else: S3_URL = "s3.amazonaws.com" - EXPECTED_BUCKET_URL = "https://%s/%s" % (S3_URL, TEST_BUCKET) - EXPECTED_KEY_URL = EXPECTED_BUCKET_URL + "/hello_world" + EXPECTED_BUCKET_URL = f"https://{S3_URL}/{TEST_BUCKET}" + EXPECTED_KEY_URL = f"{EXPECTED_BUCKET_URL}/hello_world" _s3_scoped_metrics = [ - ("External/%s/botocore/GET" % S3_URL, 2), - ("External/%s/botocore/PUT" % S3_URL, 2), - ("External/%s/botocore/DELETE" % S3_URL, 2), + (f"External/{S3_URL}/botocore/GET", 2), + (f"External/{S3_URL}/botocore/PUT", 2), + (f"External/{S3_URL}/botocore/DELETE", 2), ] _s3_rollup_metrics = [ ("External/all", 6), ("External/allOther", 6), - ("External/%s/all" % S3_URL, 6), - ("External/%s/botocore/GET" % S3_URL, 2), - ("External/%s/botocore/PUT" % S3_URL, 2), - ("External/%s/botocore/DELETE" % S3_URL, 2), + (f"External/{S3_URL}/all", 6), + (f"External/{S3_URL}/botocore/GET", 2), + (f"External/{S3_URL}/botocore/PUT", 2), + (f"External/{S3_URL}/botocore/DELETE", 2), ] diff --git a/tests/external_botocore/test_botocore_sqs.py b/tests/external_botocore/test_botocore_sqs.py index f5ab9d6b64..b3a6e17578 100644 --- a/tests/external_botocore/test_botocore_sqs.py +++ b/tests/external_botocore/test_botocore_sqs.py @@ -80,21 +80,21 @@ AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec AWS_REGION = "us-east-1" -TEST_QUEUE = "python-agent-test-%s" % uuid.uuid4() +TEST_QUEUE = f"python-agent-test-{uuid.uuid4()}" _sqs_scoped_metrics = [ - ("MessageBroker/SQS/Queue/Produce/Named/%s" % TEST_QUEUE, 2), - ("External/%s/botocore/POST" % url, 3), + (f"MessageBroker/SQS/Queue/Produce/Named/{TEST_QUEUE}", 2), + (f"External/{url}/botocore/POST", 3), ] _sqs_rollup_metrics = [ - ("MessageBroker/SQS/Queue/Produce/Named/%s" % TEST_QUEUE, 2), - ("MessageBroker/SQS/Queue/Consume/Named/%s" % TEST_QUEUE, 1), + (f"MessageBroker/SQS/Queue/Produce/Named/{TEST_QUEUE}", 2), + (f"MessageBroker/SQS/Queue/Consume/Named/{TEST_QUEUE}", 1), ("External/all", 3), ("External/allOther", 3), - ("External/%s/all" % url, 3), - ("External/%s/botocore/POST" % url, 3), + (f"External/{url}/all", 3), + (f"External/{url}/botocore/POST", 3), ] _sqs_scoped_metrics_malformed = [ diff --git a/tests/external_botocore/test_s3transfer.py b/tests/external_botocore/test_s3transfer.py index 2aca9af1f3..2503d0ba1c 100644 --- a/tests/external_botocore/test_s3transfer.py +++ b/tests/external_botocore/test_s3transfer.py @@ -33,20 +33,20 @@ AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec AWS_REGION_NAME = "us-west-2" -TEST_BUCKET = "python-agent-test-%s" % uuid.uuid4() +TEST_BUCKET = f"python-agent-test-{uuid.uuid4()}" if BOTOCORE_VERSION < (1, 7, 41): S3_URL = "s3-us-west-2.amazonaws.com" - EXPECTED_BUCKET_URL = "https://%s/%s" % (S3_URL, TEST_BUCKET) - EXPECTED_KEY_URL = EXPECTED_BUCKET_URL + "/hello_world" + EXPECTED_BUCKET_URL = f"https://{S3_URL}/{TEST_BUCKET}" + EXPECTED_KEY_URL = f"{EXPECTED_BUCKET_URL}/hello_world" elif BOTOCORE_VERSION < (1, 28): S3_URL = "s3.us-west-2.amazonaws.com" - EXPECTED_BUCKET_URL = "https://%s/%s" % (S3_URL, TEST_BUCKET) - EXPECTED_KEY_URL = EXPECTED_BUCKET_URL + "/hello_world" + EXPECTED_BUCKET_URL = f"https://{S3_URL}/{TEST_BUCKET}" + EXPECTED_KEY_URL = f"{EXPECTED_BUCKET_URL}/hello_world" else: - S3_URL = "%s.s3.us-west-2.amazonaws.com" % TEST_BUCKET - EXPECTED_BUCKET_URL = "https://%s/" % S3_URL - EXPECTED_KEY_URL = EXPECTED_BUCKET_URL + "hello_world" + S3_URL = f"{TEST_BUCKET}.s3.us-west-2.amazonaws.com" + EXPECTED_BUCKET_URL = f"https://{S3_URL}/" + EXPECTED_KEY_URL = f"{EXPECTED_BUCKET_URL}hello_world" @dt_enabled @@ -55,13 +55,13 @@ @validate_transaction_metrics( "test_s3transfer:test_s3_context_propagation", scoped_metrics=[ - ("External/%s/botocore/PUT" % S3_URL, 2), + (f"External/{S3_URL}/botocore/PUT", 2), ], rollup_metrics=[ ("External/all", 2), ("External/allOther", 2), - ("External/%s/all" % S3_URL, 2), - ("External/%s/botocore/PUT" % S3_URL, 2), + (f"External/{S3_URL}/all", 2), + (f"External/{S3_URL}/botocore/PUT", 2), ], background_task=True, ) diff --git a/tests/external_feedparser/test_feedparser.py b/tests/external_feedparser/test_feedparser.py index 2dda3a30d2..36833dff41 100644 --- a/tests/external_feedparser/test_feedparser.py +++ b/tests/external_feedparser/test_feedparser.py @@ -29,12 +29,12 @@ def feedparser(): "feed://localhost", )) def test_feedparser_external(feedparser, server, url): - url = url + ':' + str(server.port) + url = f"{url}:{str(server.port)}" @validate_transaction_metrics( "test_feedparser_external", background_task=True, - scoped_metrics=(("External/localhost:%d/feedparser/GET" % server.port, 1),), + scoped_metrics=((f"External/localhost:{server.port}/feedparser/GET", 1),), ) @background_task(name="test_feedparser_external") def _test(): @@ -50,7 +50,7 @@ def test_feedparser_file(feedparser, stream, server): @validate_transaction_metrics( "test_feedparser_file", background_task=True, - scoped_metrics=(("External/localhost:%d/feedparser/GET" % server.port, None),), + scoped_metrics=((f"External/localhost:{server.port}/feedparser/GET", None),), ) @background_task(name="test_feedparser_file") def _test(): @@ -70,6 +70,6 @@ def _test(): )) def test_feedparser_no_transaction(feedparser, server, url): if url.startswith('http://'): - url = url + ':' + str(server.port) + url = f"{url}:{str(server.port)}" feed = feedparser.parse(url) assert feed["feed"]["link"] == "https://pypi.org/" diff --git a/tests/external_http/test_http.py b/tests/external_http/test_http.py index fd607ca39f..d3ff94c1ea 100644 --- a/tests/external_http/test_http.py +++ b/tests/external_http/test_http.py @@ -35,14 +35,14 @@ @pytest.fixture(scope="session") def metrics(server): - _external_metric = "External/localhost:%s/http/" % server.port + _external_metric = f"External/localhost:{server.port}/http/" scoped = [(_external_metric, 1)] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%s/all" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), (_external_metric, 1), ] @@ -112,14 +112,14 @@ def _test(): @cat_enabled def test_http_cross_process_response(server): - _test_http_cross_process_response_scoped_metrics = [("ExternalTransaction/localhost:%s/1#2/test" % server.port, 1)] + _test_http_cross_process_response_scoped_metrics = [(f"ExternalTransaction/localhost:{server.port}/1#2/test", 1)] _test_http_cross_process_response_rollup_metrics = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%s/all" % server.port, 1), - ("ExternalApp/localhost:%s/1#2/all" % server.port, 1), - ("ExternalTransaction/localhost:%s/1#2/test" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"ExternalApp/localhost:{server.port}/1#2/all", 1), + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1), ] _test_http_cross_process_response_external_node_params = [ diff --git a/tests/external_httplib/test_httplib.py b/tests/external_httplib/test_httplib.py index 0c2c2b94d7..634bb61731 100644 --- a/tests/external_httplib/test_httplib.py +++ b/tests/external_httplib/test_httplib.py @@ -41,14 +41,14 @@ def test_httplib_http_request(server): scoped = [ - ("External/localhost:%d/http/" % server.port, 1), + (f"External/localhost:{server.port}/http/", 1), ] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("External/localhost:%d/http/" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"External/localhost:{server.port}/http/", 1), ] @validate_transaction_metrics( @@ -67,14 +67,14 @@ def _test(): def test_httplib_https_request(server): _test_httplib_https_request_scoped_metrics = [ - ("External/localhost:%d/http/" % server.port, 1), + (f"External/localhost:{server.port}/http/", 1), ] _test_httplib_https_request_rollup_metrics = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("External/localhost:%d/http/" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"External/localhost:{server.port}/http/", 1), ] @validate_transaction_metrics( @@ -101,14 +101,14 @@ def _test(): def test_httplib_http_with_port_request(server): scoped = [ - ("External/localhost:%d/http/" % server.port, 1), + (f"External/localhost:{server.port}/http/", 1), ] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("External/localhost:%d/http/" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"External/localhost:{server.port}/http/", 1), ] @validate_transaction_metrics( @@ -168,14 +168,14 @@ def _test(): @cat_enabled @insert_incoming_headers def test_httplib_cross_process_response(server): - scoped = [("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1)] + scoped = [(f"ExternalTransaction/localhost:{server.port}/1#2/test", 1)] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("ExternalApp/localhost:%d/1#2/all" % server.port, 1), - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"ExternalApp/localhost:{server.port}/1#2/all", 1), + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1), ] @validate_transaction_metrics( @@ -200,14 +200,14 @@ def _test(): def test_httplib_multiple_requests_cross_process_response(server): connection = httplib.HTTPConnection("localhost", server.port) - scoped = [("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1)] + scoped = [(f"ExternalTransaction/localhost:{server.port}/1#2/test", 1)] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("ExternalApp/localhost:%d/1#2/all" % server.port, 1), - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"ExternalApp/localhost:{server.port}/1#2/all", 1), + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1), ] @validate_transaction_metrics( @@ -300,10 +300,10 @@ def test_span_events(server): "span_events.enabled": True, } - uri = "http://localhost:%d" % server.port + uri = f"http://localhost:{server.port}" exact_intrinsics = { - "name": "External/localhost:%d/http/" % server.port, + "name": f"External/localhost:{server.port}/http/", "type": "Span", "sampled": True, "category": "http", diff --git a/tests/external_httplib/test_urllib.py b/tests/external_httplib/test_urllib.py index cea88a8dde..90ee87d079 100644 --- a/tests/external_httplib/test_urllib.py +++ b/tests/external_httplib/test_urllib.py @@ -39,13 +39,13 @@ @pytest.fixture(scope="session") def metrics(server): - scoped = [("External/localhost:%d/urllib/" % server.port, 1)] + scoped = [(f"External/localhost:{server.port}/urllib/", 1)] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("External/localhost:%d/urllib/" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"External/localhost:{server.port}/urllib/", 1), ] return scoped, rollup @@ -61,7 +61,7 @@ def test_urlopener_http_request(server, metrics): @background_task(name="test_urllib:test_urlopener_http_request") def _test(): opener = urllib.URLopener() - opener.open("http://localhost:%d/" % server.port) + opener.open(f"http://localhost:{server.port}/") _test() @@ -77,7 +77,7 @@ def test_urlopener_https_request(server, metrics): def _test(): opener = urllib.URLopener() try: - opener.open("https://localhost:%d/" % server.port) + opener.open(f"https://localhost:{server.port}/") except Exception: pass @@ -85,13 +85,13 @@ def _test(): def test_urlopener_http_request_with_port(server): - scoped = [("External/localhost:%d/urllib/" % server.port, 1)] + scoped = [(f"External/localhost:{server.port}/urllib/", 1)] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("External/localhost:%d/urllib/" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"External/localhost:{server.port}/urllib/", 1), ] @validate_transaction_metrics( @@ -103,7 +103,7 @@ def test_urlopener_http_request_with_port(server): @background_task(name="test_urllib:test_urlopener_http_request_with_port") def _test(): opener = urllib.URLopener() - opener.open("http://localhost:%d/" % server.port) + opener.open(f"http://localhost:{server.port}/") _test() @@ -135,21 +135,21 @@ def test_urlopener_file_request(): @validate_cross_process_headers def test_urlopener_cross_process_request(server): opener = urllib.URLopener() - opener.open("http://localhost:%d/" % server.port) + opener.open(f"http://localhost:{server.port}/") @cat_enabled def test_urlopener_cross_process_response(server): _test_urlopener_cross_process_response_scoped_metrics = [ - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1) + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1) ] _test_urlopener_cross_process_response_rollup_metrics = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("ExternalApp/localhost:%d/1#2/all" % server.port, 1), - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"ExternalApp/localhost:{server.port}/1#2/all", 1), + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1), ] _test_urlopener_cross_process_response_external_node_params = [ @@ -169,7 +169,7 @@ def test_urlopener_cross_process_response(server): @background_task(name="test_urllib:test_urlopener_cross_process_response") def _test(): opener = urllib.URLopener() - opener.open("http://localhost:%d/" % server.port) + opener.open(f"http://localhost:{server.port}/") _test() @@ -183,7 +183,7 @@ def test_urlretrieve_http_request(server, metrics): ) @background_task(name="test_urllib:test_urlretrieve_http_request") def _test(): - urllib.urlretrieve("http://localhost:%d/" % server.port) + urllib.urlretrieve(f"http://localhost:{server.port}/") _test() @@ -198,7 +198,7 @@ def test_urlretrieve_https_request(server, metrics): @background_task(name="test_urllib:test_urlretrieve_https_request") def _test(): try: - urllib.urlretrieve("https://localhost:%d/" % server.port) + urllib.urlretrieve(f"https://localhost:{server.port}/") except Exception: pass @@ -209,21 +209,21 @@ def _test(): @cache_outgoing_headers @validate_cross_process_headers def test_urlretrieve_cross_process_request(server): - urllib.urlretrieve("http://localhost:%d/" % server.port) + urllib.urlretrieve(f"http://localhost:{server.port}/") @cat_enabled def test_urlretrieve_cross_process_response(server): _test_urlretrieve_cross_process_response_scoped_metrics = [ - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1) + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1) ] _test_urlretrieve_cross_process_response_rollup_metrics = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("ExternalApp/localhost:%d/1#2/all" % server.port, 1), - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"ExternalApp/localhost:{server.port}/1#2/all", 1), + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1), ] _test_urlretrieve_cross_process_response_external_node_params = [ @@ -242,6 +242,6 @@ def test_urlretrieve_cross_process_response(server): @validate_external_node_params(params=_test_urlretrieve_cross_process_response_external_node_params) @background_task(name="test_urllib:test_urlretrieve_cross_process_response") def _test(): - urllib.urlretrieve("http://localhost:%d/" % server.port) + urllib.urlretrieve(f"http://localhost:{server.port}/") _test() diff --git a/tests/external_httplib/test_urllib2.py b/tests/external_httplib/test_urllib2.py index 44e0f41d8d..c236d77071 100644 --- a/tests/external_httplib/test_urllib2.py +++ b/tests/external_httplib/test_urllib2.py @@ -36,13 +36,13 @@ @pytest.fixture(scope="session") def metrics(server): - scoped = [("External/localhost:%d/urllib2/" % server.port, 1)] + scoped = [(f"External/localhost:{server.port}/urllib2/", 1)] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("External/localhost:%d/urllib2/" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"External/localhost:{server.port}/urllib2/", 1), ] return scoped, rollup @@ -57,7 +57,7 @@ def test_urlopen_http_request(server, metrics): ) @background_task(name="test_urllib2:test_urlopen_http_request") def _test(): - urllib2.urlopen("http://localhost:%d/" % server.port) + urllib2.urlopen(f"http://localhost:{server.port}/") _test() @@ -72,7 +72,7 @@ def test_urlopen_https_request(server, metrics): @background_task(name="test_urllib2:test_urlopen_https_request") def _test(): try: - urllib2.urlopen("https://localhost:%d/" % server.port) + urllib2.urlopen(f"https://localhost:{server.port}/") except Exception: pass @@ -80,13 +80,13 @@ def _test(): def test_urlopen_http_request_with_port(server): - scoped = [("External/localhost:%d/urllib2/" % server.port, 1)] + scoped = [(f"External/localhost:{server.port}/urllib2/", 1)] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("External/localhost:%d/urllib2/" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"External/localhost:{server.port}/urllib2/", 1), ] @validate_transaction_metrics( @@ -97,7 +97,7 @@ def test_urlopen_http_request_with_port(server): ) @background_task(name="test_urllib2:test_urlopen_http_request_with_port") def _test(): - urllib2.urlopen("http://localhost:%d/" % server.port) + urllib2.urlopen(f"http://localhost:{server.port}/") _test() @@ -120,7 +120,7 @@ def _test(): @background_task() def test_urlopen_file_request(): path = os.path.abspath(__file__) - file_uri = "file://%s" % path + file_uri = f"file://{path}" urllib2.urlopen(file_uri) @@ -128,21 +128,21 @@ def test_urlopen_file_request(): @cache_outgoing_headers @validate_cross_process_headers def test_urlopen_cross_process_request(server): - urllib2.urlopen("http://localhost:%d/" % server.port) + urllib2.urlopen(f"http://localhost:{server.port}/") @cat_enabled def test_urlopen_cross_process_response(server): _test_urlopen_cross_process_response_scoped_metrics = [ - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1) + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1) ] _test_urlopen_cross_process_response_rollup_metrics = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("ExternalApp/localhost:%d/1#2/all" % server.port, 1), - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"ExternalApp/localhost:{server.port}/1#2/all", 1), + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1), ] _test_urlopen_cross_process_response_external_node_params = [ @@ -161,6 +161,6 @@ def test_urlopen_cross_process_response(server): @validate_external_node_params(params=_test_urlopen_cross_process_response_external_node_params) @background_task(name="test_urllib2:test_urlopen_cross_process_response") def _test(): - urllib2.urlopen("http://localhost:%d/" % server.port) + urllib2.urlopen(f"http://localhost:{server.port}/") _test() diff --git a/tests/external_httplib2/test_httplib2.py b/tests/external_httplib2/test_httplib2.py index 288aa84ee9..4fc8e2b0d9 100644 --- a/tests/external_httplib2/test_httplib2.py +++ b/tests/external_httplib2/test_httplib2.py @@ -35,13 +35,13 @@ @pytest.fixture(scope="session") def metrics(server): - scoped = [("External/localhost:%d/httplib2/" % server.port, 1)] + scoped = [(f"External/localhost:{server.port}/httplib2/", 1)] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("External/localhost:%d/httplib2/" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"External/localhost:{server.port}/httplib2/", 1), ] return scoped, rollup @@ -94,7 +94,7 @@ def test_httplib2_http_request(server, metrics): @background_task(name="test_httplib2:test_httplib2_http_request") def _test(): connection = httplib2.Http() - response, content = connection.request("http://localhost:%d" % server.port, "GET") + response, content = connection.request(f"http://localhost:{server.port}", "GET") _test() @@ -132,15 +132,15 @@ def _test(): @cat_enabled def test_httplib2_cross_process_response(server): _test_httplib2_cross_process_response_scoped_metrics = [ - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1) + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1) ] _test_httplib2_cross_process_response_rollup_metrics = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("ExternalApp/localhost:%d/1#2/all" % server.port, 1), - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"ExternalApp/localhost:{server.port}/1#2/all", 1), + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1), ] _test_httplib2_cross_process_response_external_node_params = [ diff --git a/tests/external_httpx/test_client.py b/tests/external_httpx/test_client.py index b4760a38f0..756f7d9773 100644 --- a/tests/external_httpx/test_client.py +++ b/tests/external_httpx/test_client.py @@ -82,14 +82,14 @@ def server(): def populate_metrics(server, request): SCOPED_METRICS[:] = [] method = request.getfixturevalue("method").upper() - SCOPED_METRICS.append(("External/localhost:%d/httpx/%s" % (server.port, method), 2)) + SCOPED_METRICS.append((f"External/localhost:{server.port}/httpx/{method}", 2)) def exercise_sync_client(server, client, method): with client as client: resolved_method = getattr(client, method) - resolved_method("http://localhost:%s" % server.port) - response = resolved_method("http://localhost:%s" % server.port) + resolved_method(f"http://localhost:{server.port}") + response = resolved_method(f"http://localhost:{server.port}") return response @@ -124,8 +124,8 @@ async def exercise_async_client(server, client, method): async with client as client: resolved_method = getattr(client, method) responses = await asyncio.gather( - resolved_method("http://localhost:%s" % server.port), - resolved_method("http://localhost:%s" % server.port), + resolved_method(f"http://localhost:{server.port}"), + resolved_method(f"http://localhost:{server.port}"), ) return responses @@ -184,7 +184,7 @@ def _test(): transaction = current_transaction() with httpx.Client() as client: - response = client.get("http://localhost:%s" % server.port) + response = client.get(f"http://localhost:{server.port}") transaction._test_request_headers = response.request.headers @@ -216,7 +216,7 @@ def test_async_cross_process_request(httpx, server, loop, distributed_tracing, s ) async def _test(): async with httpx.AsyncClient() as client: - response = await client.get("http://localhost:%s" % server.port) + response = await client.get(f"http://localhost:{server.port}") return response @@ -243,7 +243,7 @@ def test_sync_cross_process_override_headers(httpx, server, loop): transaction = current_transaction() with httpx.Client() as client: - response = client.get("http://localhost:%s" % server.port, headers={"newrelic": "1234"}) + response = client.get(f"http://localhost:{server.port}", headers={"newrelic": "1234"}) transaction._test_request_headers = response.request.headers @@ -265,7 +265,7 @@ def test_async_cross_process_override_headers(httpx, server, loop): async def _test(): async with httpx.AsyncClient() as client: - response = await client.get("http://localhost:%s" % server.port, headers={"newrelic": "1234"}) + response = await client.get(f"http://localhost:{server.port}", headers={"newrelic": "1234"}) return response @@ -292,7 +292,7 @@ def test_sync_client_cat_response_processing(cat_enabled, response_code, server, expected_metrics = [ ( - "ExternalTransaction/localhost:%s/1#1/WebTransaction/Function/app:beep" % server.port, + f"ExternalTransaction/localhost:{server.port}/1#1/WebTransaction/Function/app:beep", 1 if cat_enabled else None, ), ] @@ -308,7 +308,7 @@ def test_sync_client_cat_response_processing(cat_enabled, response_code, server, @background_task(name="test_sync_client_cat_response_processing") def _test(): with httpx.Client() as client: - response = client.get("http://localhost:%s" % server.port) + response = client.get(f"http://localhost:{server.port}") _test() @@ -330,7 +330,7 @@ def test_async_client_cat_response_processing(cat_enabled, response_code, httpx, expected_metrics = [ ( - "ExternalTransaction/localhost:%s/1#1/WebTransaction/Function/app:beep" % server.port, + f"ExternalTransaction/localhost:{server.port}/1#1/WebTransaction/Function/app:beep", 1 if cat_enabled else None, ), ] @@ -347,7 +347,7 @@ def test_async_client_cat_response_processing(cat_enabled, response_code, httpx, def _test(): async def coro(): async with httpx.AsyncClient() as client: - response = await client.get("http://localhost:%s" % server.port) + response = await client.get(f"http://localhost:{server.port}") return response @@ -370,16 +370,16 @@ def empty_hook(response): @validate_span_events( count=1, - exact_intrinsics={"name": "External/localhost:%d/httpx/GET" % server.port}, + exact_intrinsics={"name": f"External/localhost:{server.port}/httpx/GET"}, exact_agents={"http.statusCode": CAT_RESPONSE_CODE}, ) @background_task(name="test_sync_client_event_hook_exception") def make_request(client, exc_expected=True): if exc_expected: with pytest.raises(RuntimeError): - client.get("http://localhost:%s" % server.port) + client.get(f"http://localhost:{server.port}") else: - client.get("http://localhost:%s" % server.port) + client.get(f"http://localhost:{server.port}") with httpx.Client(event_hooks={"response": [exception_event_hook]}) as client: # Test client init @@ -416,7 +416,7 @@ def empty_hook(response): @validate_span_events( count=1, - exact_intrinsics={"name": "External/localhost:%d/httpx/GET" % server.port}, + exact_intrinsics={"name": f"External/localhost:{server.port}/httpx/GET"}, exact_agents={"http.statusCode": CAT_RESPONSE_CODE}, ) @background_task(name="test_sync_client_event_hook_exception") @@ -424,9 +424,9 @@ def make_request(client, exc_expected=True): async def coro(): if exc_expected: with pytest.raises(RuntimeError): - await client.get("http://localhost:%s" % server.port) + await client.get(f"http://localhost:{server.port}") else: - await client.get("http://localhost:%s" % server.port) + await client.get(f"http://localhost:{server.port}") loop.run_until_complete(coro()) @@ -464,7 +464,7 @@ def test_sync_nr_disabled(httpx, server): with httpx.Client() as client: trace = current_trace() - response = client.get("http://localhost:%s" % server.port) + response = client.get(f"http://localhost:{server.port}") assert response.status_code == 200 assert trace is None @@ -482,7 +482,7 @@ def test_async_nr_disabled(httpx, server, loop): async def _test(): async with httpx.AsyncClient() as client: - response = await client.get("http://localhost:%s" % server.port) + response = await client.get(f"http://localhost:{server.port}") return response diff --git a/tests/external_requests/test_requests.py b/tests/external_requests/test_requests.py index d25d203c08..228429e3f4 100644 --- a/tests/external_requests/test_requests.py +++ b/tests/external_requests/test_requests.py @@ -47,13 +47,13 @@ def get_requests_version(): @pytest.fixture(scope="session") def metrics(server): - scoped = [("External/localhost:%d/requests/" % server.port, 1)] + scoped = [(f"External/localhost:{server.port}/requests/", 1)] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("External/localhost:%d/requests/" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"External/localhost:{server.port}/requests/", 1), ] return scoped, rollup @@ -78,7 +78,7 @@ def test_http_request_get(server, metrics): ) @background_task(name="test_requests:test_http_request_get") def _test(): - requests.get("http://localhost:%d/" % server.port) + requests.get(f"http://localhost:{server.port}/") _test() @@ -95,7 +95,7 @@ def test_https_request_get(server, metrics): @background_task(name="test_requests:test_https_request_get") def _test(): try: - requests.get("https://localhost:%d/" % server.port, verify=False) # nosec + requests.get(f"https://localhost:{server.port}/", verify=False) # nosec except Exception: pass @@ -114,7 +114,7 @@ def test_http_session_send(server, metrics): @background_task(name="test_requests:test_http_session_send") def _test(): session = requests.Session() - req = requests.Request("GET", "http://localhost:%d/" % server.port) + req = requests.Request("GET", f"http://localhost:{server.port}/") prep_req = req.prepare() session.send(prep_req) @@ -189,7 +189,7 @@ def test_requests_cross_process_request(distributed_tracing, span_events, server @cache_outgoing_headers @validate_cross_process_headers def _test(): - requests.get("http://localhost:%d/" % server.port) + requests.get(f"http://localhost:{server.port}/") _test = override_application_settings( { @@ -205,15 +205,15 @@ def _test(): @cat_enabled def test_requests_cross_process_response(server): _test_requests_cross_process_response_scoped_metrics = [ - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1) + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1) ] _test_requests_cross_process_response_rollup_metrics = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("ExternalApp/localhost:%d/1#2/all" % server.port, 1), - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"ExternalApp/localhost:{server.port}/1#2/all", 1), + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1), ] _test_requests_cross_process_response_external_node_params = [ @@ -233,6 +233,6 @@ def test_requests_cross_process_response(server): @validate_external_node_params(params=_test_requests_cross_process_response_external_node_params) @background_task(name="test_requests:test_requests_cross_process_response") def _test(): - requests.get("http://localhost:%d/" % server.port) + requests.get(f"http://localhost:{server.port}/") _test() diff --git a/tests/external_requests/test_span_event.py b/tests/external_requests/test_span_event.py index 575b2d52b7..0eedb99a55 100644 --- a/tests/external_requests/test_span_event.py +++ b/tests/external_requests/test_span_event.py @@ -29,14 +29,14 @@ def server(): yield _server -@pytest.mark.parametrize('path', ('', '/foo', '/' + 'a' * 256)) +@pytest.mark.parametrize('path', ('', '/foo', f"/{'a' * 256}")) def test_span_events(server, path): _settings = { 'distributed_tracing.enabled': True, 'span_events.enabled': True, } - uri = 'http://localhost:%d' % server.port + uri = f'http://localhost:{server.port}' if path: uri += path @@ -44,7 +44,7 @@ def test_span_events(server, path): expected_uri = uri[:255] exact_intrinsics = { - 'name': 'External/localhost:%d/requests/' % server.port, + 'name': f'External/localhost:{server.port}/requests/', 'type': 'Span', 'sampled': True, 'priority': 0.5, diff --git a/tests/external_urllib3/test_urllib3.py b/tests/external_urllib3/test_urllib3.py index a3b9d59265..afb1e4a105 100644 --- a/tests/external_urllib3/test_urllib3.py +++ b/tests/external_urllib3/test_urllib3.py @@ -45,13 +45,13 @@ @pytest.fixture(scope="session") def metrics(server): - scoped = [("External/localhost:%d/urllib3/GET" % server.port, 1)] + scoped = [(f"External/localhost:{server.port}/urllib3/GET", 1)] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("External/localhost:%d/urllib3/GET" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"External/localhost:{server.port}/urllib3/GET", 1), ] return scoped, rollup @@ -67,7 +67,7 @@ def test_http_request_connection_pool_urlopen(server, metrics): ) @background_task(name="test_urllib3:test_http_request_connection_pool_urlopen") def _test(): - pool = urllib3.HTTPConnectionPool("localhost:%d" % server.port) + pool = urllib3.HTTPConnectionPool(f"localhost:{server.port}") pool.urlopen("GET", "/") _test() @@ -83,7 +83,7 @@ def test_http_request_connection_pool_request(server, metrics): ) @background_task(name="test_urllib3:test_http_request_connection_pool_request") def _test(): - pool = urllib3.HTTPConnectionPool("localhost:%d" % server.port) + pool = urllib3.HTTPConnectionPool(f"localhost:{server.port}") pool.request("GET", "/") _test() @@ -99,7 +99,7 @@ def test_http_request_connection_from_url_request(server, metrics): ) @background_task(name="test_urllib3:test_http_request_connection_from_url_request") def _test(): - conn = urllib3.connection_from_url("http://localhost:%d" % server.port) + conn = urllib3.connection_from_url(f"http://localhost:{server.port}") conn.request("GET", "/") _test() @@ -116,7 +116,7 @@ def test_http_request_pool_manager_urlopen(server, metrics): @background_task(name="test_urllib3:test_http_request_pool_manager_urlopen") def _test(): pool = urllib3.PoolManager(5) - pool.urlopen("GET", "http://localhost:%d/" % server.port) + pool.urlopen("GET", f"http://localhost:{server.port}/") _test() @@ -132,7 +132,7 @@ def test_https_request_connection_pool_urlopen(server, metrics): @background_task(name="test_urllib3:test_https_request_connection_pool_urlopen") def _test(): # Setting retries to 0 so that metrics are recorded only once - pool = urllib3.HTTPSConnectionPool("localhost:%d" % server.port, retries=0) + pool = urllib3.HTTPSConnectionPool(f"localhost:{server.port}", retries=0) try: pool.urlopen("GET", "/") except Exception: @@ -152,7 +152,7 @@ def test_https_request_connection_pool_request(server, metrics): @background_task(name="test_urllib3:test_https_request_connection_pool_request") def _test(): # Setting retries to 0 so that metrics are recorded only once - pool = urllib3.HTTPSConnectionPool("localhost:%d" % server.port, retries=0) + pool = urllib3.HTTPSConnectionPool(f"localhost:{server.port}", retries=0) try: pool.request("GET", "/") except Exception: @@ -162,13 +162,13 @@ def _test(): def test_port_included(server): - scoped = [("External/localhost:%d/urllib3/GET" % server.port, 1)] + scoped = [(f"External/localhost:{server.port}/urllib3/GET", 1)] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("External/localhost:%d/urllib3/GET" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"External/localhost:{server.port}/urllib3/GET", 1), ] @validate_transaction_errors(errors=[]) @@ -177,7 +177,7 @@ def test_port_included(server): ) @background_task(name="test_urllib3:test_port_included") def _test(): - conn = urllib3.connection_from_url("http://localhost:%d" % server.port) + conn = urllib3.connection_from_url(f"http://localhost:{server.port}") conn.request("GET", "/") _test() @@ -190,13 +190,13 @@ def _test(): get_package_version_tuple("urllib3") < (1, 8), reason="urllib3.connection.HTTPConnection added in 1.8" ) def test_HTTPConnection_port_included(server): - scoped = [("External/localhost:%d/urllib3/" % server.port, 1)] + scoped = [(f"External/localhost:{server.port}/urllib3/", 1)] rollup = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("External/localhost:%d/urllib3/" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"External/localhost:{server.port}/urllib3/", 1), ] @validate_transaction_errors(errors=[]) @@ -208,7 +208,7 @@ def test_HTTPConnection_port_included(server): ) @background_task(name="test_urllib3:test_HTTPConnection_port_included") def _test(): - conn = urllib3.connection.HTTPConnection("localhost:%d" % server.port) + conn = urllib3.connection.HTTPConnection(f"localhost:{server.port}") conn.request("GET", "/") _test() @@ -228,7 +228,7 @@ def test_urlopen_cross_process_request(distributed_tracing, span_events, server) @cache_outgoing_headers @validate_cross_process_headers def _test(): - pool = urllib3.HTTPConnectionPool("localhost:%d" % server.port) + pool = urllib3.HTTPConnectionPool(f"localhost:{server.port}") pool.urlopen("GET", "/") _test = override_application_settings( @@ -245,15 +245,15 @@ def _test(): @cat_enabled def test_urlopen_cross_process_response(server): _test_urlopen_cross_process_response_scoped_metrics = [ - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1) + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1) ] _test_urlopen_cross_process_response_rollup_metrics = [ ("External/all", 1), ("External/allOther", 1), - ("External/localhost:%d/all" % server.port, 1), - ("ExternalApp/localhost:%d/1#2/all" % server.port, 1), - ("ExternalTransaction/localhost:%d/1#2/test" % server.port, 1), + (f"External/localhost:{server.port}/all", 1), + (f"ExternalApp/localhost:{server.port}/1#2/all", 1), + (f"ExternalTransaction/localhost:{server.port}/1#2/test", 1), ] _test_urlopen_cross_process_response_external_node_params = [ @@ -273,7 +273,7 @@ def test_urlopen_cross_process_response(server): @validate_external_node_params(params=_test_urlopen_cross_process_response_external_node_params) @background_task(name="test_urllib3:test_urlopen_cross_process_response") def _test(): - pool = urllib3.HTTPConnectionPool("localhost:%d" % server.port) + pool = urllib3.HTTPConnectionPool(f"localhost:{server.port}") pool.urlopen("GET", "/") _test() diff --git a/tests/framework_aiohttp/_target_application.py b/tests/framework_aiohttp/_target_application.py index f15e7fd65b..7dde99ce0b 100644 --- a/tests/framework_aiohttp/_target_application.py +++ b/tests/framework_aiohttp/_target_application.py @@ -113,7 +113,7 @@ async def websocket_handler(request): while not ws.closed: msg = await ws.receive() if msg.type == WSMsgType.TEXT: - result = ws.send_str("/" + msg.data) + result = ws.send_str(f"/{msg.data}") if hasattr(result, "__await__"): await result diff --git a/tests/framework_aiohttp/conftest.py b/tests/framework_aiohttp/conftest.py index 317383f0c2..ed4c0963b4 100644 --- a/tests/framework_aiohttp/conftest.py +++ b/tests/framework_aiohttp/conftest.py @@ -137,7 +137,7 @@ def respond_with_cat_header(self): @pytest.fixture(scope="session") def local_server_info(mock_header_server): - host_port = "127.0.0.1:%d" % mock_header_server.port - metric = "External/%s/aiohttp/" % host_port - url = "http://" + host_port + host_port = f"127.0.0.1:{mock_header_server.port}" + metric = f"External/{host_port}/aiohttp/" + url = f"http://{host_port}" return ServerInfo(metric, url) diff --git a/tests/framework_aiohttp/test_client.py b/tests/framework_aiohttp/test_client.py index 96bbb46f01..932c46dfb4 100644 --- a/tests/framework_aiohttp/test_client.py +++ b/tests/framework_aiohttp/test_client.py @@ -198,10 +198,10 @@ def test_ws_connect_yield_from(event_loop, local_server_info, method, exc_expect "fetch_multiple", background_task=True, scoped_metrics=[ - (local_server_info.base_metric + "GET", 2), + (f"{local_server_info.base_metric}GET", 2), ], rollup_metrics=[ - (local_server_info.base_metric + "GET", 2), + (f"{local_server_info.base_metric}GET", 2), ], ) def task_test(): diff --git a/tests/framework_aiohttp/test_client_async_await.py b/tests/framework_aiohttp/test_client_async_await.py index 21278a92b0..87337648b7 100644 --- a/tests/framework_aiohttp/test_client_async_await.py +++ b/tests/framework_aiohttp/test_client_async_await.py @@ -165,10 +165,10 @@ def test_ws_connect_async_await(event_loop, local_server_info, method, exc_expec "fetch_multiple", background_task=True, scoped_metrics=[ - (local_server_info.base_metric + "GET", 2), + (f"{local_server_info.base_metric}GET", 2), ], rollup_metrics=[ - (local_server_info.base_metric + "GET", 2), + (f"{local_server_info.base_metric}GET", 2), ], ) def task_test(): diff --git a/tests/framework_aiohttp/test_client_cat.py b/tests/framework_aiohttp/test_client_cat.py index 5a3172d0c8..edd14498db 100644 --- a/tests/framework_aiohttp/test_client_cat.py +++ b/tests/framework_aiohttp/test_client_cat.py @@ -76,7 +76,7 @@ async def fetch(url, headers=None, raise_for_status=False, connector=None): def test_outbound_cross_process_headers(event_loop, cat_enabled, distributed_tracing, span_events, mock_header_server): @background_task(name="test_outbound_cross_process_headers") async def _test(): - headers = await fetch("http://127.0.0.1:%d" % mock_header_server.port) + headers = await fetch(f"http://127.0.0.1:{mock_header_server.port}") transaction = current_transaction() transaction._test_request_headers = headers @@ -122,7 +122,7 @@ def test(): @pytest.mark.parametrize("customer_headers", _customer_headers_tests) def test_outbound_cross_process_headers_custom_headers(event_loop, customer_headers, mock_header_server): headers = event_loop.run_until_complete( - background_task()(fetch)("http://127.0.0.1:%d" % mock_header_server.port, customer_headers.copy()) + background_task()(fetch)(f"http://127.0.0.1:{mock_header_server.port}", customer_headers.copy()) ) # always honor customer headers @@ -131,7 +131,7 @@ def test_outbound_cross_process_headers_custom_headers(event_loop, customer_head def test_outbound_cross_process_headers_no_txn(event_loop, mock_header_server): - headers = event_loop.run_until_complete(fetch("http://127.0.0.1:%d" % mock_header_server.port)) + headers = event_loop.run_until_complete(fetch(f"http://127.0.0.1:{mock_header_server.port}")) assert not headers.get(ExternalTrace.cat_id_key) assert not headers.get(ExternalTrace.cat_transaction_key) @@ -146,7 +146,7 @@ async def test(): delattr(transaction, "guid") try: - headers = await fetch("http://127.0.0.1:%d" % mock_header_server.port) + headers = await fetch(f"http://127.0.0.1:{mock_header_server.port}") assert not headers.get(ExternalTrace.cat_id_key) assert not headers.get(ExternalTrace.cat_transaction_key) @@ -180,19 +180,19 @@ def test_process_incoming_headers( # always called and thus makes sure the trace is not ended before # StopIteration is called. server, response_values = mock_external_http_server - address = "http://127.0.0.1:%d" % server.port + address = f"http://127.0.0.1:{server.port}" port = server.port _test_cross_process_response_scoped_metrics = [ - ("ExternalTransaction/127.0.0.1:%d/1#2/test" % port, 1 if cat_enabled else None) + (f"ExternalTransaction/127.0.0.1:{port}/1#2/test", 1 if cat_enabled else None) ] _test_cross_process_response_rollup_metrics = [ ("External/all", 1), ("External/allOther", 1), - ("External/127.0.0.1:%d/all" % port, 1), - ("ExternalApp/127.0.0.1:%d/1#2/all" % port, 1 if cat_enabled else None), - ("ExternalTransaction/127.0.0.1:%d/1#2/test" % port, 1 if cat_enabled else None), + (f"External/127.0.0.1:{port}/all", 1), + (f"ExternalApp/127.0.0.1:{port}/1#2/all", 1 if cat_enabled else None), + (f"ExternalTransaction/127.0.0.1:{port}/1#2/test", 1 if cat_enabled else None), ] _test_cross_process_response_external_node_params = [ diff --git a/tests/framework_aiohttp/test_middleware.py b/tests/framework_aiohttp/test_middleware.py index 6cbf86677a..246ae1ad09 100644 --- a/tests/framework_aiohttp/test_middleware.py +++ b/tests/framework_aiohttp/test_middleware.py @@ -74,7 +74,7 @@ def _test(): rollup_metrics = [ ("Function/_target_application:index", 1), (metric, 1), - ("Python/Framework/aiohttp/%s" % aiohttp.__version__, 1), + (f"Python/Framework/aiohttp/{aiohttp.__version__}", 1), ] _test = validate_transaction_metrics( diff --git a/tests/framework_aiohttp/test_server.py b/tests/framework_aiohttp/test_server.py index 6a5ef0d10e..69ebd4b21b 100644 --- a/tests/framework_aiohttp/test_server.py +++ b/tests/framework_aiohttp/test_server.py @@ -87,11 +87,11 @@ async def fetch(): @validate_transaction_metrics( metric_name, scoped_metrics=[ - ("Function/%s" % metric_name, 1), + (f"Function/{metric_name}", 1), ], rollup_metrics=[ - ("Function/%s" % metric_name, 1), - ("Python/Framework/aiohttp/%s" % aiohttp.__version__, 1), + (f"Function/{metric_name}", 1), + (f"Python/Framework/aiohttp/{aiohttp.__version__}", 1), ], ) @validate_transaction_event_attributes( @@ -184,11 +184,11 @@ async def multi_fetch(loop): @validate_transaction_metrics( metric_name, scoped_metrics=[ - ("Function/%s" % metric_name, 1), + (f"Function/{metric_name}", 1), ], rollup_metrics=[ - ("Function/%s" % metric_name, 1), - ("Python/Framework/aiohttp/%s" % aiohttp.__version__, 1), + (f"Function/{metric_name}", 1), + (f"Python/Framework/aiohttp/{aiohttp.__version__}", 1), ], ) @validate_transaction_event_attributes( diff --git a/tests/framework_aiohttp/test_server_cat.py b/tests/framework_aiohttp/test_server_cat.py index 28af90d8df..8b18074558 100644 --- a/tests/framework_aiohttp/test_server_cat.py +++ b/tests/framework_aiohttp/test_server_cat.py @@ -113,7 +113,7 @@ async def fetch(): app_data = json.loads(deobfuscate(raw_headers["X-NewRelic-App-Data"], ENCODING_KEY)) assert app_data[0] == cat_id - assert app_data[1] == ("WebTransaction/Function/%s" % metric_name) + assert app_data[1] == f"WebTransaction/Function/{metric_name}" else: assert "X-NewRelic-App-Data" not in resp.headers @@ -135,7 +135,7 @@ async def fetch(): # a fixture from conftest.py/_target_application.py @validate_analytics_catmap_data( - "WebTransaction/Function/%s" % metric_name, + f"WebTransaction/Function/{metric_name}", expected_attributes=expected_intrinsics, non_expected_attributes=forgone_intrinsics, ) diff --git a/tests/framework_bottle/test_application.py b/tests/framework_bottle/test_application.py index 21a748e010..32db9a64a5 100644 --- a/tests/framework_bottle/test_application.py +++ b/tests/framework_bottle/test_application.py @@ -41,6 +41,8 @@ version = tuple(version) assert version > (0, 1), "version information not found" +version_metrics = [(f"Python/Framework/Bottle/{'.'.join(str(v) for v in version)}", 1)] + requires_auth_basic = pytest.mark.skipif(version < (0, 9, 0), reason="Bottle only added auth_basic in 0.9.0.") requires_plugins = pytest.mark.skipif(version < (0, 9, 0), reason="Bottle only added auth_basic in 0.9.0.") @@ -56,7 +58,7 @@ else: _test_application_index_scoped_metrics.extend([("Function/bottle:Bottle.__call__", 1)]) -_test_application_index_custom_metrics = [("Python/Framework/Bottle/%s.%s.%s" % version, 1)] +_test_application_index_custom_metrics = version_metrics.copy() @validate_code_level_metrics("_target_application", "index_page") @@ -83,7 +85,7 @@ def test_application_index(target_application): else: _test_application_error_scoped_metrics.extend([("Function/bottle:Bottle.__call__", 1)]) -_test_application_error_custom_metrics = [("Python/Framework/Bottle/%s.%s.%s" % version, 1)] +_test_application_error_custom_metrics = version_metrics.copy() _test_application_error_errors = ["builtins:RuntimeError"] @@ -110,7 +112,7 @@ def test_application_error(target_application): else: _test_application_not_found_scoped_metrics.extend([("Function/bottle:Bottle.__call__", 1)]) -_test_application_not_found_custom_metrics = [("Python/Framework/Bottle/%s.%s.%s" % version, 1)] +_test_application_not_found_custom_metrics = version_metrics.copy() @validate_code_level_metrics("_target_application", "error404_page") @@ -137,7 +139,7 @@ def test_application_not_found(target_application): else: _test_application_auth_basic_fail_scoped_metrics.extend([("Function/bottle:Bottle.__call__", 1)]) -_test_application_auth_basic_fail_custom_metrics = [("Python/Framework/Bottle/%s.%s.%s" % version, 1)] +_test_application_auth_basic_fail_custom_metrics = version_metrics.copy() @requires_auth_basic @@ -164,7 +166,7 @@ def test_application_auth_basic_fail(target_application): else: _test_application_auth_basic_okay_scoped_metrics.extend([("Function/bottle:Bottle.__call__", 1)]) -_test_application_auth_basic_okay_custom_metrics = [("Python/Framework/Bottle/%s.%s.%s" % version, 1)] +_test_application_auth_basic_okay_custom_metrics = version_metrics.copy() @requires_auth_basic @@ -177,7 +179,7 @@ def test_application_auth_basic_fail(target_application): ) def test_application_auth_basic_okay(target_application): authorization_value = base64.b64encode(b"user:password").decode("Latin-1") - environ = {"HTTP_AUTHORIZATION": "Basic " + authorization_value} + environ = {"HTTP_AUTHORIZATION": f"Basic {authorization_value}"} response = target_application.get("/auth", extra_environ=environ) response.mustcontain("AUTH OKAY") @@ -194,7 +196,7 @@ def test_application_auth_basic_okay(target_application): else: _test_application_plugin_error_scoped_metrics.extend([("Function/bottle:Bottle.__call__", 1)]) -_test_application_plugin_error_custom_metrics = [("Python/Framework/Bottle/%s.%s.%s" % version, 1)] +_test_application_plugin_error_custom_metrics = version_metrics.copy() @requires_plugins diff --git a/tests/framework_django/test_asgi_application.py b/tests/framework_django/test_asgi_application.py index 4570427662..39e99db2f0 100644 --- a/tests/framework_django/test_asgi_application.py +++ b/tests/framework_django/test_asgi_application.py @@ -45,7 +45,7 @@ ] rollup_metrics = scoped_metrics + [ - ('Python/Framework/Django/%s' % django.get_version(), 1), + (f'Python/Framework/Django/{django.get_version()}', 1), ] @@ -107,7 +107,7 @@ def test_asgi_class_based_view(application, url, view_name): @validate_transaction_errors(errors=[]) @validate_transaction_metrics(view_name, - scoped_metrics=[('Function/' + view_name, 1)] + scoped_metrics, + scoped_metrics=[(f"Function/{view_name}", 1)] + scoped_metrics, rollup_metrics=rollup_metrics) @validate_code_level_metrics(namespace, func) def _test(): diff --git a/tests/framework_django/views.py b/tests/framework_django/views.py index e97e273ded..9a5b33e1d8 100644 --- a/tests/framework_django/views.py +++ b/tests/framework_django/views.py @@ -118,6 +118,5 @@ def gzip_html_insertion(request): # contents must be at least 200 bytes for gzip middleware to work contents = "*" * 200 return HttpResponse( - "Some header" - "

My First Heading

%s

" % contents + f"Some header

My First Heading

{contents}

" ) diff --git a/tests/framework_falcon/test_application.py b/tests/framework_falcon/test_application.py index 6b64c8c678..89175cee1d 100644 --- a/tests/framework_falcon/test_application.py +++ b/tests/framework_falcon/test_application.py @@ -26,7 +26,7 @@ def test_basic(app): _test_basic_metrics = ( - ('Function/' + app.name_prefix + '.__call__', 1), + (f"Function/{app.name_prefix}.__call__", 1), ('Function/_target_application:Index.on_get', 1), ) @@ -45,7 +45,7 @@ def _test(): @validate_transaction_errors(errors=[]) def test_ignored_status_code(app): - @validate_transaction_metrics(app.name_prefix + '._handle_exception') + @validate_transaction_metrics(f"{app.name_prefix}._handle_exception") def _test(): app.get('/foobar', status=404) @@ -56,7 +56,7 @@ def _test(): def test_error_recorded(app): @validate_transaction_errors(errors=[app.not_found_error]) - @validate_transaction_metrics(app.name_prefix + '._handle_exception') + @validate_transaction_metrics(f"{app.name_prefix}._handle_exception") def _test(): app.get('/foobar', status=404) diff --git a/tests/framework_fastapi/test_application.py b/tests/framework_fastapi/test_application.py index 85d230e26b..135a7268f3 100644 --- a/tests/framework_fastapi/test_application.py +++ b/tests/framework_fastapi/test_application.py @@ -29,7 +29,7 @@ def test_application(caplog, app, endpoint, transaction_name): caplog.set_level(logging.ERROR) - @validate_transaction_metrics(transaction_name, scoped_metrics=[("Function/" + transaction_name, 1)]) + @validate_transaction_metrics(transaction_name, scoped_metrics=[(f"Function/{transaction_name}", 1)]) @validate_code_level_metrics(*transaction_name.split(":")) def _test(): response = app.get(endpoint) diff --git a/tests/framework_flask/_test_compress.py b/tests/framework_flask/_test_compress.py index 8ca66c09c2..e0c90d56a5 100644 --- a/tests/framework_flask/_test_compress.py +++ b/tests/framework_flask/_test_compress.py @@ -28,7 +28,7 @@ @application.route("/compress") def index_page(): - return "" + 500 * "X" + "" + return f"{500 * 'X'}" @application.route("/html_insertion") diff --git a/tests/framework_graphql/test_application.py b/tests/framework_graphql/test_application.py index 174ee20a47..4b5883b549 100644 --- a/tests/framework_graphql/test_application.py +++ b/tests/framework_graphql/test_application.py @@ -93,27 +93,27 @@ def _graphql_base_rollup_metrics(framework, version, background_task=True): graphql_version = get_package_version("graphql-core") metrics = [ - ("Python/Framework/GraphQL/%s" % graphql_version, 1), + (f"Python/Framework/GraphQL/{graphql_version}", 1), ("GraphQL/all", 1), - ("GraphQL/%s/all" % framework, 1), + (f"GraphQL/{framework}/all", 1), ] if background_task: metrics.extend( [ ("GraphQL/allOther", 1), - ("GraphQL/%s/allOther" % framework, 1), + (f"GraphQL/{framework}/allOther", 1), ] ) else: metrics.extend( [ ("GraphQL/allWeb", 1), - ("GraphQL/%s/allWeb" % framework, 1), + (f"GraphQL/{framework}/allWeb", 1), ] ) if framework != "GraphQL": - metrics.append(("Python/Framework/%s/%s" % (framework, version), 1)) + metrics.append((f"Python/Framework/{framework}/{version}", 1)) return metrics @@ -143,12 +143,12 @@ def test_query_and_mutation(target_application): type_annotation = "!" if framework == "Strawberry" else "" _test_mutation_scoped_metrics = [ - ("GraphQL/resolve/%s/storage_add" % framework, 1), - ("GraphQL/operation/%s/mutation//%s" % (framework, mutation_path), 1), + (f"GraphQL/resolve/{framework}/storage_add", 1), + (f"GraphQL/operation/{framework}/mutation//{mutation_path}", 1), ] _test_query_scoped_metrics = [ - ("GraphQL/resolve/%s/storage" % framework, 1), - ("GraphQL/operation/%s/query//storage" % framework, 1), + (f"GraphQL/resolve/{framework}/storage", 1), + (f"GraphQL/operation/{framework}/query//storage", 1), ] _expected_mutation_operation_attributes = { "graphql.operation.type": "mutation", @@ -168,16 +168,16 @@ def test_query_and_mutation(target_application): "graphql.field.name": "storage", "graphql.field.parentType": "Query", "graphql.field.path": "storage", - "graphql.field.returnType": "[String%s]%s" % (type_annotation, type_annotation), + "graphql.field.returnType": f"[String{type_annotation}]{type_annotation}", } @validate_code_level_metrics( - "framework_%s._target_schema_%s" % (framework.lower(), schema_type), "resolve_storage_add" + f"framework_{framework.lower()}._target_schema_{schema_type}", "resolve_storage_add" ) @validate_span_events(exact_agents=_expected_mutation_operation_attributes) @validate_span_events(exact_agents=_expected_mutation_resolver_attributes) @validate_transaction_metrics( - "mutation//%s" % mutation_path, + f"mutation//{mutation_path}", "GraphQL", scoped_metrics=_test_mutation_scoped_metrics, rollup_metrics=_test_mutation_scoped_metrics + _graphql_base_rollup_metrics(framework, version, is_bg), @@ -192,7 +192,7 @@ def _mutation(): response = target_application(query) assert response["storage_add"] == "abc" or response["storage_add"]["string"] == "abc" - @validate_code_level_metrics("framework_%s._target_schema_%s" % (framework.lower(), schema_type), "resolve_storage") + @validate_code_level_metrics(f"framework_{framework.lower()}._target_schema_{schema_type}", "resolve_storage") @validate_span_events(exact_agents=_expected_query_operation_attributes) @validate_span_events(exact_agents=_expected_query_resolver_attributes) @validate_transaction_metrics( @@ -216,22 +216,22 @@ def _query(): def test_middleware(target_application, middleware): framework, version, target_application, is_bg, schema_type, extra_spans = target_application - name = "%s:%s" % (middleware.__module__, middleware.__name__) + name = f"{middleware.__module__}:{middleware.__name__}" if "async" in name: if schema_type != "async": pytest.skip("Async middleware not supported in sync applications.") _test_middleware_metrics = [ - ("GraphQL/operation/%s/query//hello" % framework, 1), - ("GraphQL/resolve/%s/hello" % framework, 1), - ("Function/%s" % name, 1), + (f"GraphQL/operation/{framework}/query//hello", 1), + (f"GraphQL/resolve/{framework}/hello", 1), + (f"Function/{name}", 1), ] # Span count 5: Transaction, Operation, Middleware, and 1 Resolver and Resolver Function span_count = 5 + extra_spans @validate_code_level_metrics(*name.split(":")) - @validate_code_level_metrics("framework_%s._target_schema_%s" % (framework.lower(), schema_type), "resolve_hello") + @validate_code_level_metrics(f"framework_{framework.lower()}._target_schema_{schema_type}", "resolve_hello") @validate_span_events(count=span_count) @validate_transaction_metrics( "query//hello", @@ -255,21 +255,21 @@ def test_exception_in_middleware(target_application, middleware): query = "query MyQuery { error_middleware }" field = "error_middleware" - name = "%s:%s" % (middleware.__module__, middleware.__name__) + name = f"{middleware.__module__}:{middleware.__name__}" if "async" in name: if schema_type != "async": pytest.skip("Async middleware not supported in sync applications.") # Metrics _test_exception_scoped_metrics = [ - ("GraphQL/operation/%s/query/MyQuery/%s" % (framework, field), 1), - ("GraphQL/resolve/%s/%s" % (framework, field), 1), - ("Function/%s" % name, 1), + (f"GraphQL/operation/{framework}/query/MyQuery/{field}", 1), + (f"GraphQL/resolve/{framework}/{field}", 1), + (f"Function/{name}", 1), ] _test_exception_rollup_metrics = [ ("Errors/all", 1), - ("Errors/all%s" % ("Other" if is_bg else "Web"), 1), - ("Errors/%sTransaction/GraphQL/%s" % ("Other" if is_bg else "Web", name), 1), + (f"Errors/all{'Other' if is_bg else 'Web'}", 1), + (f"Errors/{'Other' if is_bg else 'Web'}Transaction/GraphQL/{name}", 1), ] + _test_exception_scoped_metrics # Attributes @@ -306,19 +306,19 @@ def _test(): @dt_enabled def test_exception_in_resolver(target_application, field): framework, version, target_application, is_bg, schema_type, extra_spans = target_application - query = "query MyQuery { %s }" % field + query = f"query MyQuery {{ {field} }}" - txn_name = "framework_%s._target_schema_%s:resolve_error" % (framework.lower(), schema_type) + txn_name = f"framework_{framework.lower()}._target_schema_{schema_type}:resolve_error" # Metrics _test_exception_scoped_metrics = [ - ("GraphQL/operation/%s/query/MyQuery/%s" % (framework, field), 1), - ("GraphQL/resolve/%s/%s" % (framework, field), 1), + (f"GraphQL/operation/{framework}/query/MyQuery/{field}", 1), + (f"GraphQL/resolve/{framework}/{field}", 1), ] _test_exception_rollup_metrics = [ ("Errors/all", 1), - ("Errors/all%s" % ("Other" if is_bg else "Web"), 1), - ("Errors/%sTransaction/GraphQL/%s" % ("Other" if is_bg else "Web", txn_name), 1), + (f"Errors/all{'Other' if is_bg else 'Web'}", 1), + (f"Errors/{'Other' if is_bg else 'Web'}Transaction/GraphQL/{txn_name}", 1), ] + _test_exception_scoped_metrics # Attributes @@ -373,12 +373,12 @@ def test_exception_in_validation(target_application, query, exc_class): exc_class = callable_name(GraphQLError) _test_exception_scoped_metrics = [ - ("GraphQL/operation/%s///" % framework, 1), + (f"GraphQL/operation/{framework}///", 1), ] _test_exception_rollup_metrics = [ ("Errors/all", 1), - ("Errors/all%s" % ("Other" if is_bg else "Web"), 1), - ("Errors/%sTransaction/GraphQL/%s" % ("Other" if is_bg else "Web", txn_name), 1), + (f"Errors/all{'Other' if is_bg else 'Web'}", 1), + (f"Errors/{'Other' if is_bg else 'Web'}Transaction/GraphQL/{txn_name}", 1), ] + _test_exception_scoped_metrics # Attributes @@ -407,7 +407,7 @@ def _test(): @dt_enabled def test_operation_metrics_and_attrs(target_application): framework, version, target_application, is_bg, schema_type, extra_spans = target_application - operation_metrics = [("GraphQL/operation/%s/query/MyQuery/library" % framework, 1)] + operation_metrics = [(f"GraphQL/operation/{framework}/query/MyQuery/library", 1)] operation_attrs = { "graphql.operation.type": "query", "graphql.operation.name": "MyQuery", @@ -437,14 +437,14 @@ def _test(): @dt_enabled def test_field_resolver_metrics_and_attrs(target_application): framework, version, target_application, is_bg, schema_type, extra_spans = target_application - field_resolver_metrics = [("GraphQL/resolve/%s/hello" % framework, 1)] + field_resolver_metrics = [(f"GraphQL/resolve/{framework}/hello", 1)] type_annotation = "!" if framework == "Strawberry" else "" graphql_attrs = { "graphql.field.name": "hello", "graphql.field.parentType": "Query", "graphql.field.path": "hello", - "graphql.field.returnType": "String" + type_annotation, + "graphql.field.returnType": f"String{type_annotation}", } # Span count 4: Transaction, Operation, and 1 Resolver and Resolver function @@ -545,9 +545,9 @@ def _test(): def test_deepest_unique_path(target_application, query, expected_path): framework, version, target_application, is_bg, schema_type, extra_spans = target_application if expected_path == "/error": - txn_name = "framework_%s._target_schema_%s:resolve_error" % (framework.lower(), schema_type) + txn_name = f"framework_{framework.lower()}._target_schema_{schema_type}:resolve_error" else: - txn_name = "query/%s" % expected_path + txn_name = f"query/{expected_path}" @validate_transaction_metrics( txn_name, diff --git a/tests/framework_grpc/conftest.py b/tests/framework_grpc/conftest.py index 27498b363f..c5eae9db6f 100644 --- a/tests/framework_grpc/conftest.py +++ b/tests/framework_grpc/conftest.py @@ -71,6 +71,6 @@ def stub_and_channel(mock_grpc_server): def create_stub_and_channel(port): from sample_application import SampleApplicationStub - channel = grpc.insecure_channel("localhost:%s" % port) + channel = grpc.insecure_channel(f"localhost:{port}") stub = SampleApplicationStub(channel) return stub, channel diff --git a/tests/framework_grpc/sample_application/__init__.py b/tests/framework_grpc/sample_application/__init__.py index f56d62af86..707fbfd8b9 100644 --- a/tests/framework_grpc/sample_application/__init__.py +++ b/tests/framework_grpc/sample_application/__init__.py @@ -45,7 +45,7 @@ def DoUnaryUnary(self, request, context): if request.timesout: while context.is_active(): time.sleep(0.1) - return Message(text="unary_unary: %s" % request.text) + return Message(text=f"unary_unary: {request.text}") def DoUnaryStream(self, request, context): context.set_trailing_metadata([("content-type", "text/plain")]) @@ -53,7 +53,7 @@ def DoUnaryStream(self, request, context): while context.is_active(): time.sleep(0.1) for i in range(request.count): - yield Message(text="unary_stream: %s" % request.text) + yield Message(text=f"unary_stream: {request.text}") def DoStreamUnary(self, request_iter, context): context.set_trailing_metadata([("content-type", "text/plain")]) @@ -61,7 +61,7 @@ def DoStreamUnary(self, request_iter, context): if request.timesout: while context.is_active(): time.sleep(0.1) - return Message(text="stream_unary: %s" % request.text) + return Message(text=f"stream_unary: {request.text}") def DoStreamStream(self, request_iter, context): context.set_trailing_metadata([("content-type", "text/plain")]) @@ -69,39 +69,39 @@ def DoStreamStream(self, request_iter, context): if request.timesout: while context.is_active(): time.sleep(0.1) - yield Message(text="stream_stream: %s" % request.text) + yield Message(text=f"stream_stream: {request.text}") def DoUnaryUnaryRaises(self, request, context): - raise AssertionError("unary_unary: %s" % request.text) + raise AssertionError(f"unary_unary: {request.text}") def DoUnaryStreamRaises(self, request, context): - raise AssertionError("unary_stream: %s" % request.text) + raise AssertionError(f"unary_stream: {request.text}") def DoStreamUnaryRaises(self, request_iter, context): for request in request_iter: - raise AssertionError("stream_unary: %s" % request.text) + raise AssertionError(f"stream_unary: {request.text}") def DoStreamStreamRaises(self, request_iter, context): for request in request_iter: - raise AssertionError("stream_stream: %s" % request.text) + raise AssertionError(f"stream_stream: {request.text}") def NoTxnUnaryUnaryRaises(self, request, context): current_transaction().ignore_transaction = True - raise AssertionError("unary_unary: %s" % request.text) + raise AssertionError(f"unary_unary: {request.text}") def NoTxnUnaryStreamRaises(self, request, context): current_transaction().ignore_transaction = True - raise AssertionError("unary_stream: %s" % request.text) + raise AssertionError(f"unary_stream: {request.text}") def NoTxnStreamUnaryRaises(self, request_iter, context): current_transaction().ignore_transaction = True for request in request_iter: - raise AssertionError("stream_unary: %s" % request.text) + raise AssertionError(f"stream_unary: {request.text}") def NoTxnStreamStreamRaises(self, request_iter, context): current_transaction().ignore_transaction = True for request in request_iter: - raise AssertionError("stream_stream: %s" % request.text) + raise AssertionError(f"stream_stream: {request.text}") def NoTxnUnaryUnary(self, request, context): current_transaction().ignore_transaction = True diff --git a/tests/framework_grpc/test_clients.py b/tests/framework_grpc/test_clients.py index 67f8f9daeb..10b76de62e 100644 --- a/tests/framework_grpc/test_clients.py +++ b/tests/framework_grpc/test_clients.py @@ -63,19 +63,16 @@ def test_client( ): port = mock_grpc_server - service_method_class_name = "NoTxn%s%s" % ( - service_method_type.title().replace("_", ""), - "Raises" if raises_exception else "", - ) + service_method_class_name = f"NoTxn{service_method_type.title().replace('_', '')}{'Raises' if raises_exception else ''}" streaming_request = service_method_type.split("_")[0] == "stream" streaming_response = service_method_type.split("_")[1] == "stream" _test_scoped_metrics = [ - ("External/localhost:%s/gRPC/SampleApplication/%s" % (port, service_method_class_name), 1), + (f"External/localhost:{port}/gRPC/SampleApplication/{service_method_class_name}", 1), ] _test_rollup_metrics = [ - ("External/localhost:%s/gRPC/SampleApplication/%s" % (port, service_method_class_name), 1), - ("External/localhost:%s/all" % port, 1), + (f"External/localhost:{port}/gRPC/SampleApplication/{service_method_class_name}", 1), + (f"External/localhost:{port}/all", 1), ("External/allOther", 1), ("External/all", 1), ] @@ -122,7 +119,7 @@ def _test_client(): except (AttributeError, TypeError): reply = [reply] - expected_text = "%s: Hello World" % service_method_type + expected_text = f"{service_method_type}: Hello World" response_texts_correct = [r.text == expected_text for r in reply] assert len(response_texts_correct) == message_count assert response_texts_correct and all(response_texts_correct) @@ -131,7 +128,7 @@ def _test_client(): _test_client() except grpc.RpcError as e: if raises_exception: - assert "%s: Hello World" % service_method_type in e.details() + assert f"{service_method_type}: Hello World" in e.details() elif cancel: assert e.code() == grpc.StatusCode.CANCELLED else: @@ -159,15 +156,15 @@ def _test_client(): def test_future_timeout_error(service_method_type, service_method_method_name, future_response, mock_grpc_server, stub): port = mock_grpc_server - service_method_class_name = "NoTxn%s" % (service_method_type.title().replace("_", "")) + service_method_class_name = f"NoTxn{service_method_type.title().replace('_', '')}" streaming_request = service_method_type.split("_")[0] == "stream" _test_scoped_metrics = [ - ("External/localhost:%s/gRPC/SampleApplication/%s" % (port, service_method_class_name), 1), + (f"External/localhost:{port}/gRPC/SampleApplication/{service_method_class_name}", 1), ] _test_rollup_metrics = [ - ("External/localhost:%s/gRPC/SampleApplication/%s" % (port, service_method_class_name), 1), - ("External/localhost:%s/all" % port, 1), + (f"External/localhost:{port}/gRPC/SampleApplication/{service_method_class_name}", 1), + (f"External/localhost:{port}/all", 1), ("External/allOther", 1), ("External/all", 1), ] @@ -207,15 +204,15 @@ def _test_future_timeout_error(): def test_repeated_result(service_method_type, service_method_method_name, mock_grpc_server, stub): port = mock_grpc_server - service_method_class_name = "NoTxn%s" % (service_method_type.title().replace("_", "")) + service_method_class_name = f"NoTxn{service_method_type.title().replace('_', '')}" streaming_request = service_method_type.split("_")[0] == "stream" _test_scoped_metrics = [ - ("External/localhost:%s/gRPC/SampleApplication/%s" % (port, service_method_class_name), 1), + (f"External/localhost:{port}/gRPC/SampleApplication/{service_method_class_name}", 1), ] _test_rollup_metrics = [ - ("External/localhost:%s/gRPC/SampleApplication/%s" % (port, service_method_class_name), 1), - ("External/localhost:%s/all" % port, 1), + (f"External/localhost:{port}/gRPC/SampleApplication/{service_method_class_name}", 1), + (f"External/localhost:{port}/all", 1), ("External/allOther", 1), ("External/all", 1), ] @@ -257,15 +254,15 @@ def _test_repeated_result(): def test_future_cancel(service_method_type, service_method_method_name, future_response, mock_grpc_server, stub): port = mock_grpc_server - service_method_class_name = "NoTxn%s" % (service_method_type.title().replace("_", "")) + service_method_class_name = f"NoTxn{service_method_type.title().replace('_', '')}" streaming_request = service_method_type.split("_")[0] == "stream" _test_scoped_metrics = [ - ("External/localhost:%s/gRPC/SampleApplication/%s" % (port, service_method_class_name), 1), + (f"External/localhost:{port}/gRPC/SampleApplication/{service_method_class_name}", 1), ] _test_rollup_metrics = [ - ("External/localhost:%s/gRPC/SampleApplication/%s" % (port, service_method_class_name), 1), - ("External/localhost:%s/all" % port, 1), + (f"External/localhost:{port}/gRPC/SampleApplication/{service_method_class_name}", 1), + (f"External/localhost:{port}/all", 1), ("External/allOther", 1), ("External/all", 1), ] diff --git a/tests/framework_grpc/test_distributed_tracing.py b/tests/framework_grpc/test_distributed_tracing.py index 7f253651dc..80e833895b 100644 --- a/tests/framework_grpc/test_distributed_tracing.py +++ b/tests/framework_grpc/test_distributed_tracing.py @@ -45,7 +45,7 @@ def test_inbound_distributed_trace(mock_grpc_server, method_name, dt_headers = ExternalTrace.generate_request_headers(transaction) @validate_transaction_metrics( - 'sample_application:SampleApplicationServicer.' + method_name, + f"sample_application:SampleApplicationServicer.{method_name}", rollup_metrics=( ('Supportability/TraceContext/Accept/Success', 1), ), @@ -84,7 +84,7 @@ def test_outbound_distributed_trace( request_type, response_type = method_type.split('_', 1) streaming_request = request_type == 'stream' streaming_response = response_type == 'stream' - stub_method = 'DtNoTxn' + method_type.title().replace('_', '') + stub_method = f"DtNoTxn{method_type.title().replace('_', '')}" request = create_request(streaming_request) @@ -96,8 +96,7 @@ def test_outbound_distributed_trace( 'span.kind': 'client', } - txn_name = 'test_outbound_DT[{0}-{1}-{2}-{3}]'.format( - method_type, method_name, dt_enabled, dt_error) + txn_name = f'test_outbound_DT[{method_type}-{method_name}-{dt_enabled}-{dt_error}]' settings = {'distributed_tracing.enabled': dt_enabled} span_count = 1 if dt_enabled else 0 if dt_error: @@ -162,7 +161,7 @@ def test_outbound_payload_outside_transaction( request_type, response_type = method_type.split('_', 1) streaming_request = request_type == 'stream' streaming_response = response_type == 'stream' - stub_method = 'DtNoTxn' + method_type.title().replace('_', '') + stub_method = f"DtNoTxn{method_type.title().replace('_', '')}" request = create_request(streaming_request) diff --git a/tests/framework_grpc/test_server.py b/tests/framework_grpc/test_server.py index 8b057eda3d..74fc851699 100644 --- a/tests/framework_grpc/test_server.py +++ b/tests/framework_grpc/test_server.py @@ -52,7 +52,7 @@ def test_simple(method_name, streaming_request, mock_grpc_server, stub): port = mock_grpc_server request = create_request(streaming_request) - _transaction_name = "sample_application:SampleApplicationServicer.{}".format(method_name) + _transaction_name = f"sample_application:SampleApplicationServicer.{method_name}" method = getattr(stub, method_name) @validate_code_level_metrics("sample_application.SampleApplicationServicer", method_name) @@ -83,9 +83,9 @@ def test_raises_response_status(method_name, streaming_request, mock_grpc_server port = mock_grpc_server request = create_request(streaming_request) - method_name = method_name + "Raises" + method_name = f"{method_name}Raises" - _transaction_name = "sample_application:SampleApplicationServicer.{}".format(method_name) + _transaction_name = f"sample_application:SampleApplicationServicer.{method_name}" method = getattr(stub, method_name) status_code = str(grpc.StatusCode.UNKNOWN.value[0]) diff --git a/tests/framework_sanic/test_application.py b/tests/framework_sanic/test_application.py index a949d91d02..36271f254e 100644 --- a/tests/framework_sanic/test_application.py +++ b/tests/framework_sanic/test_application.py @@ -51,7 +51,7 @@ ("Function/_target_application:request_middleware", 1 if sanic_v19_to_v22_12 else 2), ] FRAMEWORK_METRICS = [ - ("Python/Framework/Sanic/%s" % sanic.__version__, 1), + (f"Python/Framework/Sanic/{sanic.__version__}", 1), ] BASE_ATTRS = ["response.status", "response.headers.contentType", "response.headers.contentLength"] @@ -90,11 +90,11 @@ def test_websocket(app): ), ) def test_method_view(app, method): - metric_name = "Function/_target_application:MethodView." + method + metric_name = f"Function/_target_application:MethodView.{method}" @validate_code_level_metrics("_target_application.MethodView", method) @validate_transaction_metrics( - "_target_application:MethodView." + method, + f"_target_application:MethodView.{method}", scoped_metrics=[(metric_name, 1)], rollup_metrics=[(metric_name, 1)], ) @@ -137,22 +137,22 @@ def test_recorded_error(app, endpoint, sanic_version): pytest.skip() ERROR_METRICS = [ - ("Function/_target_application:%s" % endpoint, 1), + (f"Function/_target_application:{endpoint}", 1), ] @validate_transaction_errors(errors=["builtins:ValueError"]) @validate_base_transaction_event_attr @validate_transaction_metrics( - "_target_application:%s" % endpoint, + f"_target_application:{endpoint}", scoped_metrics=ERROR_METRICS, rollup_metrics=ERROR_METRICS + FRAMEWORK_METRICS, ) def _test(): if endpoint == "write_response_error": with pytest.raises(ValueError): - response = app.fetch("get", "/" + endpoint) + response = app.fetch("get", f"/{endpoint}") else: - response = app.fetch("get", "/" + endpoint) + response = app.fetch("get", f"/{endpoint}") assert response.status == 500 _test() @@ -327,7 +327,7 @@ def sync_failing_middleware(*args, **kwargs): def test_returning_middleware(app, middleware, attach_to, metric_name, transaction_name): metrics = [ - ("Function/%s" % metric_name, 1), + (f"Function/{metric_name}", 1), ] @validate_code_level_metrics(*metric_name.split(":")) diff --git a/tests/framework_sanic/test_cross_application.py b/tests/framework_sanic/test_cross_application.py index 31dc3b9b92..3b484f706b 100644 --- a/tests/framework_sanic/test_cross_application.py +++ b/tests/framework_sanic/test_cross_application.py @@ -49,7 +49,7 @@ def raw_headers(response): try: # Manually encode into bytes - return " ".join("%s: %s" % (k, v) for k, v in response.processed_headers).encode() + return " ".join(f"{k}: {v}" for k, v in response.processed_headers).encode() except AttributeError: try: return response.get_headers() @@ -120,7 +120,7 @@ def test_inbound_distributed_trace(app): def test_cat_response_headers(app, inbound_payload, expected_intrinsics, forgone_intrinsics, cat_id, url, metric_name): _base_metrics = [ - ("Function/%s" % metric_name, 1), + (f"Function/{metric_name}", 1), ] @validate_transaction_metrics( @@ -129,7 +129,7 @@ def test_cat_response_headers(app, inbound_payload, expected_intrinsics, forgone rollup_metrics=_base_metrics, ) @validate_analytics_catmap_data( - "WebTransaction/Function/%s" % metric_name, + f"WebTransaction/Function/{metric_name}", expected_attributes=expected_intrinsics, non_expected_attributes=forgone_intrinsics, ) @@ -144,7 +144,7 @@ def _test(): app_data = json.loads(deobfuscate(cat_response_header, ENCODING_KEY)) assert app_data[0] == cat_id - assert app_data[1] == ("WebTransaction/Function/%s" % metric_name) + assert app_data[1] == f"WebTransaction/Function/{metric_name}" else: assert b"X-NewRelic-App-Data" not in raw_headers(response) @@ -159,6 +159,6 @@ def test_cat_response_custom_header(app): cat_headers = make_cross_agent_headers(inbound_payload, ENCODING_KEY, cat_id) response = app.fetch( - "get", "/custom-header/%s/%s" % ("X-NewRelic-App-Data", custom_header_value), headers=dict(cat_headers) + "get", f"/custom-header/X-NewRelic-App-Data/{custom_header_value}", headers=dict(cat_headers) ) assert custom_header_value in raw_headers(response), raw_headers(response) diff --git a/tests/framework_starlette/test_application.py b/tests/framework_starlette/test_application.py index bd89bb9a9c..cc265fd6d9 100644 --- a/tests/framework_starlette/test_application.py +++ b/tests/framework_starlette/test_application.py @@ -40,7 +40,7 @@ def target_application(): return _test_application.target_application -FRAMEWORK_METRIC = ("Python/Framework/Starlette/%s" % starlette.__version__, 1) +FRAMEWORK_METRIC = (f"Python/Framework/Starlette/{starlette.__version__}", 1) if starlette_version >= (0, 20, 1): DEFAULT_MIDDLEWARE_METRICS = [ @@ -94,14 +94,14 @@ def test_application_non_async(target_application, app_name): DEFAULT_MIDDLEWARE_METRICS = [ ("Function/starlette.middleware.errors:ServerErrorMiddleware.__call__", 1), - ("Function/starlette%s.exceptions:ExceptionMiddleware.__call__" % version_tweak_string, 1), + (f"Function/starlette{version_tweak_string}.exceptions:ExceptionMiddleware.__call__", 1), ] middleware_test = ( - ("no_error_handler", "starlette%s.exceptions:ExceptionMiddleware.__call__" % version_tweak_string), + ("no_error_handler", f"starlette{version_tweak_string}.exceptions:ExceptionMiddleware.__call__"), ( "non_async_error_handler_no_middleware", - "starlette%s.exceptions:ExceptionMiddleware.__call__" % version_tweak_string, + f"starlette{version_tweak_string}.exceptions:ExceptionMiddleware.__call__", ), ) @@ -113,7 +113,7 @@ def test_application_non_async(target_application, app_name): def test_application_nonexistent_route(target_application, app_name, transaction_name): @validate_transaction_metrics( transaction_name, - scoped_metrics=[("Function/" + transaction_name, 1)], + scoped_metrics=[(f"Function/{transaction_name}", 1)], rollup_metrics=[FRAMEWORK_METRIC], ) def _test(): @@ -226,7 +226,7 @@ def test_application_handled_error(target_application, app_name, transaction_nam @validate_transaction_errors(errors=[error]) @validate_transaction_metrics( transaction_name, - scoped_metrics=[("Function/" + transaction_name, 1)], + scoped_metrics=[(f"Function/{transaction_name}", 1)], rollup_metrics=[FRAMEWORK_METRIC], ) def _test(): @@ -257,7 +257,7 @@ def test_application_ignored_error(target_application, app_name, transaction_nam @validate_transaction_errors(errors=[]) @validate_transaction_metrics( transaction_name, - scoped_metrics=[("Function/" + transaction_name, 1)], + scoped_metrics=[(f"Function/{transaction_name}", 1)], rollup_metrics=[FRAMEWORK_METRIC], ) def _test(): @@ -271,7 +271,7 @@ def _test(): middleware_test_exception = ( ( "no_middleware", - [("Function/starlette%s.exceptions:ExceptionMiddleware.http_exception" % version_tweak_string, 1)], + [(f"Function/starlette{version_tweak_string}.exceptions:ExceptionMiddleware.http_exception", 1)], ), ( "teapot_exception_handler_no_middleware", diff --git a/tests/framework_starlette/test_bg_tasks.py b/tests/framework_starlette/test_bg_tasks.py index 9ad8fe61be..3ebd25134e 100644 --- a/tests/framework_starlette/test_bg_tasks.py +++ b/tests/framework_starlette/test_bg_tasks.py @@ -45,14 +45,14 @@ def target_application(): @pytest.mark.parametrize("route", ["async", "sync"]) def test_simple(target_application, route): - route_metrics = [("Function/_test_bg_tasks:run_%s_bg_task" % route, 1)] + route_metrics = [(f"Function/_test_bg_tasks:run_{route}_bg_task", 1)] - @validate_transaction_metrics("_test_bg_tasks:run_%s_bg_task" % route, index=-2, scoped_metrics=route_metrics) - @validate_transaction_metrics("_test_bg_tasks:%s_bg_task" % route, background_task=True) + @validate_transaction_metrics(f"_test_bg_tasks:run_{route}_bg_task", index=-2, scoped_metrics=route_metrics) + @validate_transaction_metrics(f"_test_bg_tasks:{route}_bg_task", background_task=True) @validate_transaction_count(2) def _test(): app = target_application["none"] - response = app.get("/" + route) + response = app.get(f"/{route}") assert response.status == 200 _test() @@ -61,14 +61,14 @@ def _test(): @skip_if_no_middleware @pytest.mark.parametrize("route", ["async", "sync"]) def test_asgi_style_middleware(target_application, route): - route_metrics = [("Function/_test_bg_tasks:run_%s_bg_task" % route, 1)] + route_metrics = [(f"Function/_test_bg_tasks:run_{route}_bg_task", 1)] - @validate_transaction_metrics("_test_bg_tasks:run_%s_bg_task" % route, index=-2, scoped_metrics=route_metrics) - @validate_transaction_metrics("_test_bg_tasks:%s_bg_task" % route, background_task=True) + @validate_transaction_metrics(f"_test_bg_tasks:run_{route}_bg_task", index=-2, scoped_metrics=route_metrics) + @validate_transaction_metrics(f"_test_bg_tasks:{route}_bg_task", background_task=True) @validate_transaction_count(2) def _test(): app = target_application["asgi"] - response = app.get("/" + route) + response = app.get(f"/{route}") assert response.status == 200 _test() @@ -77,15 +77,15 @@ def _test(): @skip_if_no_middleware @pytest.mark.parametrize("route", ["async", "sync"]) def test_basehttp_style_middleware(target_application, route): - route_metric = ("Function/_test_bg_tasks:run_%s_bg_task" % route, 1) + route_metric = (f"Function/_test_bg_tasks:run_{route}_bg_task", 1) # A function trace metric that appears only when the bug below is present, causing background tasks to be # completed inside web transactions, requiring a function trace to be used for timing # instead of a background task transaction. Should not be present at all when bug is fixed. - bg_task_metric = ("Function/_test_bg_tasks:%s_bg_task" % route, 1) + bg_task_metric = (f"Function/_test_bg_tasks:{route}_bg_task", 1) def _test(): app = target_application["basehttp"] - response = app.get("/" + route) + response = app.get(f"/{route}") assert response.status == 200 # The bug was fixed in version 0.21.0 but re-occured in 0.23.1. @@ -107,16 +107,16 @@ def _test(): if BUG_COMPLETELY_FIXED: # Assert both web transaction and background task transactions are present. _test = validate_transaction_metrics( - "_test_bg_tasks:run_%s_bg_task" % route, index=-2, scoped_metrics=[route_metric] + f"_test_bg_tasks:run_{route}_bg_task", index=-2, scoped_metrics=[route_metric] )(_test) - _test = validate_transaction_metrics("_test_bg_tasks:%s_bg_task" % route, background_task=True)(_test) + _test = validate_transaction_metrics(f"_test_bg_tasks:{route}_bg_task", background_task=True)(_test) _test = validate_transaction_count(2)(_test) elif BUG_PARTIALLY_FIXED: # The background task no longer blocks the completion of the web request/web transaction. # However, the BaseHTTPMiddleware causes the task to be cancelled when the web request disconnects, so there are no # longer function traces or background task transactions. # In version 0.23.1, the check to see if more_body exists is removed, reverting behavior to this model - _test = validate_transaction_metrics("_test_bg_tasks:run_%s_bg_task" % route, scoped_metrics=[route_metric])( + _test = validate_transaction_metrics(f"_test_bg_tasks:run_{route}_bg_task", scoped_metrics=[route_metric])( _test ) _test = validate_transaction_count(1)(_test) @@ -124,7 +124,7 @@ def _test(): # The BaseHTTPMiddleware causes the background task to execute within the web request # with the web transaction still active. _test = validate_transaction_metrics( - "_test_bg_tasks:run_%s_bg_task" % route, scoped_metrics=[route_metric, bg_task_metric] + f"_test_bg_tasks:run_{route}_bg_task", scoped_metrics=[route_metric, bg_task_metric] )(_test) _test = validate_transaction_count(1)(_test) diff --git a/tests/framework_tornado/_target_application.py b/tests/framework_tornado/_target_application.py index 98db75ab9c..33b0880ef1 100644 --- a/tests/framework_tornado/_target_application.py +++ b/tests/framework_tornado/_target_application.py @@ -171,7 +171,7 @@ def trace(self): class WebSocketHandler(tornado.websocket.WebSocketHandler): def on_message(self, message): - self.write_message("hello " + message) + self.write_message(f"hello {message}") class EnsureFutureHandler(tornado.web.RequestHandler): diff --git a/tests/framework_tornado/test_custom_handler.py b/tests/framework_tornado/test_custom_handler.py index a8cb77d76f..faa8ebf100 100644 --- a/tests/framework_tornado/test_custom_handler.py +++ b/tests/framework_tornado/test_custom_handler.py @@ -20,7 +20,7 @@ def test_custom_handler(app): - FRAMEWORK_METRIC = 'Python/Framework/Tornado/%s' % app.tornado_version + FRAMEWORK_METRIC = f'Python/Framework/Tornado/{app.tornado_version}' @validate_transaction_metrics( name='_target_application:CustomApplication', diff --git a/tests/framework_tornado/test_externals.py b/tests/framework_tornado/test_externals.py index 0c44e43361..8d455f27cc 100644 --- a/tests/framework_tornado/test_externals.py +++ b/tests/framework_tornado/test_externals.py @@ -57,7 +57,7 @@ class CustomAsyncHTTPClient(tornado.httpclient.AsyncHTTPClient): def fetch_impl(self, request, callback): out = [] for k, v in request.headers.items(): - out.append("%s: %s" % (k, v)) + out.append(f"{k}: {v}") body = "\n".join(out).encode("utf-8") response = tornado.httpclient.HTTPResponse(request=request, code=200, buffer=io.BytesIO(body)) callback(response) @@ -76,19 +76,19 @@ def fetch_impl(self, request, callback): elif client_cls == "CustomAsyncHTTPClient": cls.configure(CustomAsyncHTTPClient) else: - raise ValueError("Received unknown client type: %s" % client_cls) + raise ValueError(f"Received unknown client type: {client_cls}") client = cls(force_instance=True) callback = None - uri = "http://localhost:%s" % port + uri = f"http://localhost:{port}" if req_type == "class": req = tornado.httpclient.HTTPRequest(uri, **kwargs) kwargs = {} elif req_type == "uri": req = uri else: - raise ValueError("Received unknown request type: %s" % req_type) + raise ValueError(f"Received unknown request type: {req_type}") @tornado.gen.coroutine def _make_request(): @@ -163,7 +163,7 @@ def test_httpclient( port = external.port - expected_metrics = [("External/localhost:%s/tornado/GET" % port, num_requests)] + expected_metrics = [(f"External/localhost:{port}/tornado/GET", num_requests)] @override_application_settings( { @@ -280,7 +280,7 @@ def test_client_cat_response_processing( port = cat_response_server.port expected_metrics = [ - ("ExternalTransaction/localhost:%s/1#1/WebTransaction/Function/app:beep" % port, 1 if cat_enabled else None), + (f"ExternalTransaction/localhost:{port}/1#1/WebTransaction/Function/app:beep", 1 if cat_enabled else None), ] @validate_transaction_metrics( @@ -322,8 +322,8 @@ def test_httpclient_fetch_crashes(external): @validate_transaction_metrics( "test_httpclient_fetch_crashes", background_task=True, - rollup_metrics=[("External/localhost:%d/tornado/GET" % external.port, 1)], - scoped_metrics=[("External/localhost:%d/tornado/GET" % external.port, 1)], + rollup_metrics=[(f"External/localhost:{external.port}/tornado/GET", 1)], + scoped_metrics=[(f"External/localhost:{external.port}/tornado/GET", 1)], ) @background_task(name="test_httpclient_fetch_crashes") def _test(): @@ -337,7 +337,7 @@ def fetch_impl(self, *args, **kwargs): port = external.port with pytest.raises(ValueError): - tornado.ioloop.IOLoop.current().run_sync(lambda: client.fetch("http://localhost:%s" % port)) + tornado.ioloop.IOLoop.current().run_sync(lambda: client.fetch(f"http://localhost:{port}")) _test() @@ -346,8 +346,8 @@ def test_httpclient_fetch_inside_terminal_node(external): @validate_transaction_metrics( "test_httpclient_fetch_inside_terminal_node", background_task=True, - rollup_metrics=[("External/localhost:%d/tornado/GET" % external.port, None)], - scoped_metrics=[("External/localhost:%d/tornado/GET" % external.port, None)], + rollup_metrics=[(f"External/localhost:{external.port}/tornado/GET", None)], + scoped_metrics=[(f"External/localhost:{external.port}/tornado/GET", None)], ) @background_task(name="test_httpclient_fetch_inside_terminal_node") def _test(): @@ -366,7 +366,7 @@ def _test(): @tornado.gen.coroutine def _make_request(): with FunctionTrace(name="parent", terminal=True): - response = yield client.fetch("http://localhost:%s" % port) + response = yield client.fetch(f"http://localhost:{port}") return response response = tornado.ioloop.IOLoop.current().run_sync(_make_request) diff --git a/tests/framework_tornado/test_inbound_cat.py b/tests/framework_tornado/test_inbound_cat.py index 44fbf29337..7c2addcec2 100644 --- a/tests/framework_tornado/test_inbound_cat.py +++ b/tests/framework_tornado/test_inbound_cat.py @@ -60,8 +60,7 @@ def test_response_to_inbound_cat(app, manual_flush): client_cross_process_id = headers['X-NewRelic-ID'] txn_header = headers['X-NewRelic-Transaction'] - response = app.fetch('/force-cat-response/%s/%s/%s' % - (client_cross_process_id, txn_header, manual_flush)) + response = app.fetch(f'/force-cat-response/{client_cross_process_id}/{txn_header}/{manual_flush}') assert response.code == 200 assert 'X-NewRelic-App-Data' in list(response.headers.keys()) @@ -93,8 +92,7 @@ def test_cat_headers_not_inserted(app): client_cross_process_id = headers['X-NewRelic-ID'] txn_header = headers['X-NewRelic-Transaction'] - response = app.fetch('/304-cat-response/%s/%s' % - (client_cross_process_id, txn_header)) + response = app.fetch(f'/304-cat-response/{client_cross_process_id}/{txn_header}') assert response.code == 304 assert 'X-NewRelic-App-Data' not in list(response.headers.keys()) diff --git a/tests/framework_tornado/test_server.py b/tests/framework_tornado/test_server.py index 6f8e6bf2ac..31be5030ff 100644 --- a/tests/framework_tornado/test_server.py +++ b/tests/framework_tornado/test_server.py @@ -55,14 +55,14 @@ ) @override_application_settings({"attributes.include": ["request.*"]}) def test_server(app, uri, name, metrics, method_metric): - FRAMEWORK_METRIC = "Python/Framework/Tornado/%s" % app.tornado_version - METHOD_METRIC = "Function/%s" % name + FRAMEWORK_METRIC = f"Python/Framework/Tornado/{app.tornado_version}" + METHOD_METRIC = f"Function/{name}" metrics = metrics or [] metrics.append((FRAMEWORK_METRIC, 1)) metrics.append((METHOD_METRIC, 1 if method_metric else None)) - host = "127.0.0.1:" + str(app.get_http_port()) + host = f"127.0.0.1:{str(app.get_http_port())}" namespace, func_name = name.split(".") namespace = namespace.replace(":", ".") @@ -112,8 +112,8 @@ def _test(): def test_concurrent_inbound_requests(app, uri, name, metrics, method_metric): from tornado import gen - FRAMEWORK_METRIC = "Python/Framework/Tornado/%s" % app.tornado_version - METHOD_METRIC = "Function/%s" % name + FRAMEWORK_METRIC = f"Python/Framework/Tornado/{app.tornado_version}" + METHOD_METRIC = f"Function/{name}" metrics = metrics or [] metrics.append((FRAMEWORK_METRIC, 1)) @@ -220,7 +220,7 @@ def test_web_socket(uri, name, app): @validate_transaction_metrics( name, - rollup_metrics=[("Function/%s" % name, None)], + rollup_metrics=[(f"Function/{name}", None)], ) @validate_code_level_metrics(namespace, func_name) def _test(): diff --git a/tests/logger_logging/test_local_decorating.py b/tests/logger_logging/test_local_decorating.py index 85d1fb433a..ebf02d8813 100644 --- a/tests/logger_logging/test_local_decorating.py +++ b/tests/logger_logging/test_local_decorating.py @@ -48,20 +48,10 @@ def get_metadata_string(log_message, is_txn): assert host entity_guid = application_settings().entity_guid if is_txn: - metadata_string = "".join( - ( - "NR-LINKING|", - entity_guid, - "|", - host, - "|abcdefgh12345678|abcdefgh|Python%20Agent%20Test%20%28logger_logging%29|", - ) - ) + metadata_string = f"NR-LINKING|{entity_guid}|{host}|abcdefgh12345678|abcdefgh|Python%20Agent%20Test%20%28logger_logging%29|" else: - metadata_string = "".join( - ("NR-LINKING|", entity_guid, "|", host, "|||Python%20Agent%20Test%20%28logger_logging%29|") - ) - formatted_string = log_message + " " + metadata_string + metadata_string = f"NR-LINKING|{entity_guid}|{host}|||Python%20Agent%20Test%20%28logger_logging%29|" + formatted_string = f"{log_message} {metadata_string}" return formatted_string diff --git a/tests/logger_loguru/test_attributes.py b/tests/logger_loguru/test_attributes.py index e15cc9aec3..097624c27d 100644 --- a/tests/logger_loguru/test_attributes.py +++ b/tests/logger_loguru/test_attributes.py @@ -22,7 +22,7 @@ [ { # Fixed attributes "message": "context_attrs: arg1", - "context.file": "(name='%s', path='%s')" % ("test_attributes.py", str(__file__)), + "context.file": f"(name='test_attributes.py', path='{str(__file__)}')", "context.function": "test_loguru_default_context_attributes", "context.extra.bound_attr": 1, "context.extra.contextual_attr": 2, diff --git a/tests/logger_loguru/test_local_decorating.py b/tests/logger_loguru/test_local_decorating.py index 915f683a8b..5d1273a92a 100644 --- a/tests/logger_loguru/test_local_decorating.py +++ b/tests/logger_loguru/test_local_decorating.py @@ -42,10 +42,10 @@ def get_metadata_string(log_message, is_txn): assert host entity_guid = application_settings().entity_guid if is_txn: - metadata_string = "".join(('NR-LINKING|', entity_guid, '|', host, '|abcdefgh12345678|abcdefgh|Python%20Agent%20Test%20%28logger_loguru%29|')) + metadata_string = f"NR-LINKING|{entity_guid}|{host}|abcdefgh12345678|abcdefgh|Python%20Agent%20Test%20%28logger_loguru%29|" else: - metadata_string = "".join(('NR-LINKING|', entity_guid, '|', host, '|||Python%20Agent%20Test%20%28logger_loguru%29|')) - formatted_string = log_message + " " + metadata_string + metadata_string = f"NR-LINKING|{entity_guid}|{host}|||Python%20Agent%20Test%20%28logger_loguru%29|" + formatted_string = f"{log_message} {metadata_string}" return formatted_string diff --git a/tests/logger_loguru/test_settings.py b/tests/logger_loguru/test_settings.py index 43d675d562..76bf5a1d0c 100644 --- a/tests/logger_loguru/test_settings.py +++ b/tests/logger_loguru/test_settings.py @@ -27,10 +27,10 @@ def get_metadata_string(log_message, is_txn): assert host entity_guid = application_settings().entity_guid if is_txn: - metadata_string = "".join(('NR-LINKING|', entity_guid, '|', host, '|abcdefgh12345678|abcdefgh|Python%20Agent%20Test%20%28internal_logging%29|')) + metadata_string = f"NR-LINKING|{entity_guid}|{host}|abcdefgh12345678|abcdefgh|Python%20Agent%20Test%20%28internal_logging%29|" else: - metadata_string = "".join(('NR-LINKING|', entity_guid, '|', host, '|||Python%20Agent%20Test%20%28internal_logging%29|')) - formatted_string = log_message + " " + metadata_string + metadata_string = f"NR-LINKING|{entity_guid}|{host}|||Python%20Agent%20Test%20%28internal_logging%29|" + formatted_string = f"{log_message} {metadata_string}" return formatted_string diff --git a/tests/logger_structlog/conftest.py b/tests/logger_structlog/conftest.py index bd5bd8922e..1ff9ad919d 100644 --- a/tests/logger_structlog/conftest.py +++ b/tests/logger_structlog/conftest.py @@ -58,7 +58,7 @@ def msg(self, event, **kwargs): fatal = failure = err = error = critical = exception = msg def __repr__(self): - return "" % str(id(self)) + return f"" __str__ = __repr__ diff --git a/tests/logger_structlog/test_local_decorating.py b/tests/logger_structlog/test_local_decorating.py index 78e99d2385..29d322d784 100644 --- a/tests/logger_structlog/test_local_decorating.py +++ b/tests/logger_structlog/test_local_decorating.py @@ -26,10 +26,10 @@ def get_metadata_string(log_message, is_txn): assert host entity_guid = application_settings().entity_guid if is_txn: - metadata_string = "".join(('NR-LINKING|', entity_guid, '|', host, '|abcdefgh12345678|abcdefgh|Python%20Agent%20Test%20%28logger_structlog%29|')) + metadata_string = f"NR-LINKING|{entity_guid}|{host}|abcdefgh12345678|abcdefgh|Python%20Agent%20Test%20%28logger_structlog%29|" else: - metadata_string = "".join(('NR-LINKING|', entity_guid, '|', host, '|||Python%20Agent%20Test%20%28logger_structlog%29|')) - formatted_string = log_message + " " + metadata_string + metadata_string = f"NR-LINKING|{entity_guid}|{host}|||Python%20Agent%20Test%20%28logger_structlog%29|" + formatted_string = f"{log_message} {metadata_string}" return formatted_string diff --git a/tests/messagebroker_confluentkafka/conftest.py b/tests/messagebroker_confluentkafka/conftest.py index 576ec27f82..eddaa84d8b 100644 --- a/tests/messagebroker_confluentkafka/conftest.py +++ b/tests/messagebroker_confluentkafka/conftest.py @@ -27,7 +27,7 @@ DB_SETTINGS = kafka_settings()[0] -BROKER = "%s:%s" % (DB_SETTINGS["host"], DB_SETTINGS["port"]) +BROKER = f"{DB_SETTINGS['host']}:{DB_SETTINGS['port']}" _default_settings = { @@ -171,7 +171,7 @@ def __call__(self, obj, ctx): def topic(): from confluent_kafka.admin import AdminClient, NewTopic - topic = "test-topic-%s" % str(uuid.uuid4()) + topic = f"test-topic-{str(uuid.uuid4())}" admin = AdminClient({"bootstrap.servers": BROKER}) new_topics = [NewTopic(topic, num_partitions=1, replication_factor=1)] @@ -228,7 +228,7 @@ def _test(): record_count += 1 consumer.poll(0.5) # Exit the transaction. - assert record_count == 1, "Incorrect count of records consumed: %d. Expected 1." % record_count + assert record_count == 1, f"Incorrect count of records consumed: {record_count}. Expected 1." return _test diff --git a/tests/messagebroker_confluentkafka/test_consumer.py b/tests/messagebroker_confluentkafka/test_consumer.py index cf276f46aa..7c759c91b8 100644 --- a/tests/messagebroker_confluentkafka/test_consumer.py +++ b/tests/messagebroker_confluentkafka/test_consumer.py @@ -38,12 +38,12 @@ def test_custom_metrics(get_consumer_record, topic): custom_metrics = [ - ("Message/Kafka/Topic/Named/%s/Received/Bytes" % topic, 1), - ("Message/Kafka/Topic/Named/%s/Received/Messages" % topic, 1), + (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", 1), + (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", 1), ] @validate_transaction_metrics( - "Named/%s" % topic, + f"Named/{topic}", group="Message/Kafka/Topic", custom_metrics=custom_metrics, background_task=True, @@ -70,9 +70,9 @@ def test_custom_metrics_on_existing_transaction(get_consumer_record, topic): @validate_transaction_metrics( "test_consumer:test_custom_metrics_on_existing_transaction.._test", custom_metrics=[ - ("Message/Kafka/Topic/Named/%s/Received/Bytes" % topic, 1), - ("Message/Kafka/Topic/Named/%s/Received/Messages" % topic, 1), - ("Python/MessageBroker/Confluent-Kafka/%s" % version, 1), + (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", 1), + (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", 1), + (f"Python/MessageBroker/Confluent-Kafka/{version}", 1), ], background_task=True, ) @@ -88,8 +88,8 @@ def test_custom_metrics_inactive_transaction(get_consumer_record, topic): @validate_transaction_metrics( "test_consumer:test_custom_metrics_inactive_transaction.._test", custom_metrics=[ - ("Message/Kafka/Topic/Named/%s/Received/Bytes" % topic, None), - ("Message/Kafka/Topic/Named/%s/Received/Messages" % topic, None), + (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", None), + (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", None), ], background_task=True, ) @@ -148,7 +148,7 @@ def _produce(): producer.flush() @validate_transaction_metrics( - "Named/%s" % topic, + f"Named/{topic}", group="Message/Kafka/Topic", rollup_metrics=[ ("Supportability/DistributedTrace/AcceptPayload/Success", None), diff --git a/tests/messagebroker_confluentkafka/test_producer.py b/tests/messagebroker_confluentkafka/test_producer.py index a3fc04a50f..bafb556aee 100644 --- a/tests/messagebroker_confluentkafka/test_producer.py +++ b/tests/messagebroker_confluentkafka/test_producer.py @@ -99,14 +99,14 @@ def producer_callback2(err, msg): def test_trace_metrics(topic, send_producer_message): from confluent_kafka import __version__ as version - scoped_metrics = [("MessageBroker/Kafka/Topic/Produce/Named/%s" % topic, 1)] + scoped_metrics = [(f"MessageBroker/Kafka/Topic/Produce/Named/{topic}", 1)] unscoped_metrics = scoped_metrics @validate_transaction_metrics( "test_producer:test_trace_metrics..test", scoped_metrics=scoped_metrics, rollup_metrics=unscoped_metrics, - custom_metrics=[("Python/MessageBroker/Confluent-Kafka/%s" % version, 1)], + custom_metrics=[(f"Python/MessageBroker/Confluent-Kafka/{version}", 1)], background_task=True, ) @background_task() diff --git a/tests/messagebroker_confluentkafka/test_serialization.py b/tests/messagebroker_confluentkafka/test_serialization.py index 91a19b916d..a1d1f807c3 100644 --- a/tests/messagebroker_confluentkafka/test_serialization.py +++ b/tests/messagebroker_confluentkafka/test_serialization.py @@ -26,8 +26,8 @@ def test_serialization_metrics(skip_if_not_serializing, topic, send_producer_message): _metrics = [ - ("MessageBroker/Kafka/Topic/Named/%s/Serialization/Value" % topic, 1), - ("MessageBroker/Kafka/Topic/Named/%s/Serialization/Key" % topic, 1), + (f"MessageBroker/Kafka/Topic/Named/{topic}/Serialization/Value", 1), + (f"MessageBroker/Kafka/Topic/Named/{topic}/Serialization/Key", 1), ] @validate_transaction_metrics( @@ -45,12 +45,12 @@ def test(): def test_deserialization_metrics(skip_if_not_serializing, topic, get_consumer_record): _metrics = [ - ("Message/Kafka/Topic/Named/%s/Deserialization/Value" % topic, 1), - ("Message/Kafka/Topic/Named/%s/Deserialization/Key" % topic, 1), + (f"Message/Kafka/Topic/Named/{topic}/Deserialization/Value", 1), + (f"Message/Kafka/Topic/Named/{topic}/Deserialization/Key", 1), ] @validate_transaction_metrics( - "Named/%s" % topic, + f"Named/{topic}", group="Message/Kafka/Topic", scoped_metrics=_metrics, rollup_metrics=_metrics, @@ -146,6 +146,6 @@ def _test(): record_count += 1 consumer.poll(0.5) # Exit the transaction. - assert record_count == 1, "Incorrect count of records consumed: %d. Expected 1." % record_count + assert record_count == 1, f"Incorrect count of records consumed: {record_count}. Expected 1." return _test diff --git a/tests/messagebroker_kafkapython/conftest.py b/tests/messagebroker_kafkapython/conftest.py index 09a095cd41..63080d4d4e 100644 --- a/tests/messagebroker_kafkapython/conftest.py +++ b/tests/messagebroker_kafkapython/conftest.py @@ -28,7 +28,7 @@ DB_SETTINGS = kafka_settings()[0] -BOOTSTRAP_SERVER = "%s:%s" % (DB_SETTINGS["host"], DB_SETTINGS["port"]) +BOOTSTRAP_SERVER = f"{DB_SETTINGS['host']}:{DB_SETTINGS['port']}" BROKER = [BOOTSTRAP_SERVER] @@ -194,7 +194,7 @@ def topic(): from kafka.admin.client import KafkaAdminClient from kafka.admin.new_topic import NewTopic - topic = "test-topic-%s" % str(uuid.uuid4()) + topic = f"test-topic-{str(uuid.uuid4())}" admin = KafkaAdminClient(bootstrap_servers=BROKER) new_topics = [NewTopic(topic, num_partitions=1, replication_factor=1)] @@ -235,7 +235,7 @@ def _test(): record_count += 1 attempts += 1 - assert record_count == 1, "Incorrect count of records consumed: %d. Expected 1." % record_count + assert record_count == 1, f"Incorrect count of records consumed: {record_count}. Expected 1." return _test diff --git a/tests/messagebroker_kafkapython/test_consumer.py b/tests/messagebroker_kafkapython/test_consumer.py index 2d62e038e1..2ed3d7ae67 100644 --- a/tests/messagebroker_kafkapython/test_consumer.py +++ b/tests/messagebroker_kafkapython/test_consumer.py @@ -38,11 +38,11 @@ def test_custom_metrics(get_consumer_record, topic): @validate_transaction_metrics( - "Named/%s" % topic, + f"Named/{topic}", group="Message/Kafka/Topic", custom_metrics=[ - ("Message/Kafka/Topic/Named/%s/Received/Bytes" % topic, 1), - ("Message/Kafka/Topic/Named/%s/Received/Messages" % topic, 1), + (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", 1), + (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", 1), ], background_task=True, ) @@ -67,9 +67,9 @@ def test_custom_metrics_on_existing_transaction(get_consumer_record, topic): @validate_transaction_metrics( "test_consumer:test_custom_metrics_on_existing_transaction.._test", custom_metrics=[ - ("Message/Kafka/Topic/Named/%s/Received/Bytes" % topic, 1), - ("Message/Kafka/Topic/Named/%s/Received/Messages" % topic, 1), - ("Python/MessageBroker/Kafka-Python/%s" % version, 1), + (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", 1), + (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", 1), + (f"Python/MessageBroker/Kafka-Python/{version}", 1), ], background_task=True, ) @@ -85,8 +85,8 @@ def test_custom_metrics_inactive_transaction(get_consumer_record, topic): @validate_transaction_metrics( "test_consumer:test_custom_metrics_inactive_transaction.._test", custom_metrics=[ - ("Message/Kafka/Topic/Named/%s/Received/Bytes" % topic, None), - ("Message/Kafka/Topic/Named/%s/Received/Messages" % topic, None), + (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", None), + (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", None), ], background_task=True, ) @@ -138,7 +138,7 @@ def _produce(): producer.flush() @validate_transaction_metrics( - "Named/%s" % topic, + f"Named/{topic}", group="Message/Kafka/Topic", rollup_metrics=[ ("Supportability/DistributedTrace/AcceptPayload/Success", None), diff --git a/tests/messagebroker_kafkapython/test_producer.py b/tests/messagebroker_kafkapython/test_producer.py index 168e80a739..816d33ac3f 100644 --- a/tests/messagebroker_kafkapython/test_producer.py +++ b/tests/messagebroker_kafkapython/test_producer.py @@ -32,14 +32,14 @@ def test_trace_metrics(topic, send_producer_message): from kafka.version import __version__ as version - scoped_metrics = [("MessageBroker/Kafka/Topic/Produce/Named/%s" % topic, 1)] + scoped_metrics = [(f"MessageBroker/Kafka/Topic/Produce/Named/{topic}", 1)] unscoped_metrics = scoped_metrics @validate_transaction_metrics( "test_producer:test_trace_metrics..test", scoped_metrics=scoped_metrics, rollup_metrics=unscoped_metrics, - custom_metrics=[("Python/MessageBroker/Kafka-Python/%s" % version, 1)], + custom_metrics=[(f"Python/MessageBroker/Kafka-Python/{version}", 1)], background_task=True, ) @background_task() diff --git a/tests/messagebroker_kafkapython/test_serialization.py b/tests/messagebroker_kafkapython/test_serialization.py index 4c9096284d..aa42082b6d 100644 --- a/tests/messagebroker_kafkapython/test_serialization.py +++ b/tests/messagebroker_kafkapython/test_serialization.py @@ -32,8 +32,8 @@ def test_serialization_metrics(skip_if_not_serializing, topic, send_producer_message): _metrics = [ - ("MessageBroker/Kafka/Topic/Named/%s/Serialization/Value" % topic, 1), - ("MessageBroker/Kafka/Topic/Named/%s/Serialization/Key" % topic, 1), + (f"MessageBroker/Kafka/Topic/Named/{topic}/Serialization/Value", 1), + (f"MessageBroker/Kafka/Topic/Named/{topic}/Serialization/Key", 1), ] @validate_transaction_metrics( diff --git a/tests/messagebroker_pika/conftest.py b/tests/messagebroker_pika/conftest.py index f6cd12ae03..a3ddcf9328 100644 --- a/tests/messagebroker_pika/conftest.py +++ b/tests/messagebroker_pika/conftest.py @@ -25,14 +25,13 @@ collector_available_fixture, ) - PIKA_VERSION_INFO = get_package_version_tuple("pika") -QUEUE = "test_pika-%s" % uuid.uuid4() -QUEUE_2 = "test_pika-%s" % uuid.uuid4() +QUEUE = f"test_pika-{uuid.uuid4()}" +QUEUE_2 = f"test_pika-{uuid.uuid4()}" -EXCHANGE = "exchange-%s" % uuid.uuid4() -EXCHANGE_2 = "exchange-%s" % uuid.uuid4() +EXCHANGE = f"exchange-{uuid.uuid4()}" +EXCHANGE_2 = f"exchange-{uuid.uuid4()}" CORRELATION_ID = "test-correlation-id" REPLY_TO = "test-reply-to" diff --git a/tests/messagebroker_pika/test_cat.py b/tests/messagebroker_pika/test_cat.py index 57085501f8..0108bd76a4 100644 --- a/tests/messagebroker_pika/test_cat.py +++ b/tests/messagebroker_pika/test_cat.py @@ -84,7 +84,7 @@ def on_receive(ch, method, properties, msg): with pika.BlockingConnection(pika.ConnectionParameters(DB_SETTINGS["host"])) as connection: channel = connection.channel() - queue_name = "TESTCAT-%s" % os.getpid() + queue_name = f"TESTCAT-{os.getpid()}" channel.queue_declare(queue_name, durable=False) properties = pika.BasicProperties() @@ -131,7 +131,7 @@ def do_basic_get(channel, QUEUE): def test_basic_get_no_cat_headers(): with pika.BlockingConnection(pika.ConnectionParameters(DB_SETTINGS["host"])) as connection: channel = connection.channel() - queue_name = "TESTCAT-%s" % os.getpid() + queue_name = f"TESTCAT-{os.getpid()}" channel.queue_declare(queue_name, durable=False) properties = pika.BasicProperties() diff --git a/tests/messagebroker_pika/test_distributed_tracing.py b/tests/messagebroker_pika/test_distributed_tracing.py index 9de1fe1897..bac0b8dcda 100644 --- a/tests/messagebroker_pika/test_distributed_tracing.py +++ b/tests/messagebroker_pika/test_distributed_tracing.py @@ -107,7 +107,7 @@ def on_receive(ch, method, properties, msg): with pika.BlockingConnection(pika.ConnectionParameters(DB_SETTINGS["host"])) as connection: channel = connection.channel() - queue_name = "TESTDT-%s" % os.getpid() + queue_name = f"TESTDT-{os.getpid()}" channel.queue_declare(queue_name, durable=False) properties = pika.BasicProperties() @@ -156,7 +156,7 @@ def do_basic_get(channel, QUEUE): def test_basic_get_no_distributed_tracing_headers(): with pika.BlockingConnection(pika.ConnectionParameters(DB_SETTINGS["host"])) as connection: channel = connection.channel() - queue_name = "TESTDT-%s" % os.getpid() + queue_name = f"TESTDT-{os.getpid()}" channel.queue_declare(queue_name, durable=False) properties = pika.BasicProperties() @@ -173,7 +173,7 @@ def test_basic_get_no_distributed_tracing_headers(): def test_distributed_tracing_sends_produce_id(): with pika.BlockingConnection(pika.ConnectionParameters(DB_SETTINGS["host"])) as connection: channel = connection.channel() - queue_name = "TESTDT-%s" % os.getpid() + queue_name = f"TESTDT-{os.getpid()}" channel.queue_declare(queue_name, durable=False) properties = pika.BasicProperties() diff --git a/tests/messagebroker_pika/test_pika_async_connection_consume.py b/tests/messagebroker_pika/test_pika_async_connection_consume.py index 4c41aef920..6b9e932394 100644 --- a/tests/messagebroker_pika/test_pika_async_connection_consume.py +++ b/tests/messagebroker_pika/test_pika_async_connection_consume.py @@ -78,8 +78,8 @@ def handle_callback_exception(self, *args, **kwargs): _test_select_conn_basic_get_inside_txn_metrics = [ - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, 1), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}", 1), ("Function/test_pika_async_connection_consume:test_async_connection_basic_get_inside_txn..on_message", 1), ] @@ -93,7 +93,7 @@ def handle_callback_exception(self, *args, **kwargs): ) @validate_span_events( count=1, - exact_intrinsics={"name": "MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE}, + exact_intrinsics={"name": f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}"}, exact_agents={"server.address": DB_SETTINGS["host"]}, ) @validate_transaction_metrics( @@ -175,8 +175,8 @@ def on_open_connection(connection): _test_select_conn_basic_get_inside_txn_no_callback_metrics = [ - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}", None), ] @@ -213,8 +213,8 @@ def on_open_connection(connection): _test_async_connection_basic_get_empty_metrics = [ - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}", None), ] @@ -257,8 +257,8 @@ def on_open_connection(connection): _test_select_conn_basic_consume_in_txn_metrics = [ - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}", None), ( "Function/test_pika_async_connection_consume:test_async_connection_basic_consume_inside_txn..on_message", 1, @@ -305,10 +305,10 @@ def on_open_connection(connection): _test_select_conn_basic_consume_two_exchanges = [ - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE_2, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE_2, None), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE_2}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE_2}", None), ( "Function/test_pika_async_connection_consume:test_async_connection_basic_consume_two_exchanges..on_message_1", 1, @@ -425,7 +425,7 @@ def on_open_connection(connection): scoped_metrics=_test_select_connection_consume_outside_txn_metrics, rollup_metrics=_test_select_connection_consume_outside_txn_metrics, background_task=True, - group="Message/RabbitMQ/Exchange/%s" % EXCHANGE, + group=f"Message/RabbitMQ/Exchange/{EXCHANGE}", ) @validate_code_level_metrics( "test_pika_async_connection_consume.test_select_connection_basic_consume_outside_transaction.", diff --git a/tests/messagebroker_pika/test_pika_blocking_connection_consume.py b/tests/messagebroker_pika/test_pika_blocking_connection_consume.py index 3d6a453bf4..11720dad5f 100644 --- a/tests/messagebroker_pika/test_pika_blocking_connection_consume.py +++ b/tests/messagebroker_pika/test_pika_blocking_connection_consume.py @@ -46,8 +46,8 @@ } _test_blocking_connection_basic_get_metrics = [ - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, 1), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}", 1), ("Function/pika.adapters.blocking_connection:_CallbackResult.set_value_once", 1), ] @@ -62,7 +62,7 @@ @validate_tt_collector_json(message_broker_params=_message_broker_tt_params) @validate_span_events( count=1, - exact_intrinsics={"name": "MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE}, + exact_intrinsics={"name": f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}"}, exact_agents={"server.address": DB_SETTINGS["host"]}, ) @background_task() @@ -75,8 +75,8 @@ def test_blocking_connection_basic_get(producer): _test_blocking_connection_basic_get_empty_metrics = [ - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}", None), ] @@ -89,7 +89,7 @@ def test_blocking_connection_basic_get(producer): @validate_tt_collector_json(message_broker_params=_message_broker_tt_params) @background_task() def test_blocking_connection_basic_get_empty(): - QUEUE = "test_blocking_empty-%s" % os.getpid() + QUEUE = f"test_blocking_empty-{os.getpid()}" with pika.BlockingConnection(pika.ConnectionParameters(DB_SETTINGS["host"])) as connection: channel = connection.channel() channel.queue_declare(queue=QUEUE) @@ -122,8 +122,8 @@ def test_basic_get(): _test_blocking_conn_basic_consume_no_txn_metrics = [ - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}", None), ("Function/test_pika_blocking_connection_consume:test_blocking_connection_basic_consume_outside_transaction..on_message", None), ] _txn_name = "test_pika_blocking_connection_consume:test_blocking_connection_basic_consume_outside_transaction..on_message" @@ -139,12 +139,12 @@ def test_basic_get(): scoped_metrics=_test_blocking_conn_basic_consume_no_txn_metrics, rollup_metrics=_test_blocking_conn_basic_consume_no_txn_metrics, background_task=True, - group="Message/RabbitMQ/Exchange/%s" % EXCHANGE, + group=f"Message/RabbitMQ/Exchange/{EXCHANGE}", ) @validate_tt_collector_json(message_broker_params=_message_broker_tt_params) @validate_span_events( count=1, - exact_intrinsics={"name": "Message/RabbitMQ/Exchange/%s/%s" % (EXCHANGE, _txn_name)}, + exact_intrinsics={"name": f"Message/RabbitMQ/Exchange/{EXCHANGE}/{_txn_name}"}, exact_agents={"server.address": DB_SETTINGS["host"]}, ) def test_blocking_connection_basic_consume_outside_transaction(producer, as_partial): @@ -168,8 +168,8 @@ def on_message(channel, method_frame, header_frame, body): _test_blocking_conn_basic_consume_in_txn_metrics = [ - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}", None), ("Function/test_pika_blocking_connection_consume:test_blocking_connection_basic_consume_inside_txn..on_message", 1), ] @@ -207,9 +207,9 @@ def on_message(channel, method_frame, header_frame, body): _test_blocking_conn_basic_consume_stopped_txn_metrics = [ - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), - ("OtherTransaction/Message/RabbitMQ/Exchange/Named/%s" % EXCHANGE, None), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}", None), + (f"OtherTransaction/Message/RabbitMQ/Exchange/Named/{EXCHANGE}", None), ("Function/test_pika_blocking_connection_consume:test_blocking_connection_basic_consume_stopped_txn..on_message", None), ] diff --git a/tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py b/tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py index 94f1088b18..22a4ac9733 100644 --- a/tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py +++ b/tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py @@ -35,8 +35,8 @@ } _test_blocking_connection_consume_metrics = [ - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}", None), ("MessageBroker/RabbitMQ/Exchange/Consume/Named/Unknown", None), ] @@ -127,14 +127,14 @@ def test_blocking_connection_consume_exception_in_for_loop(producer): # Expected error pass except Exception as e: - assert False, "Wrong exception was raised: %s" % e + assert False, f"Wrong exception was raised: {e}" else: assert False, "No exception was raised!" _test_blocking_connection_consume_empty_metrics = [ - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}", None), ("MessageBroker/RabbitMQ/Exchange/Consume/Named/Unknown", None), ] @@ -159,14 +159,14 @@ def test_blocking_connection_consume_exception_in_generator(): # Expected error pass except Exception as e: - assert False, "Wrong exception was raised: %s" % e + assert False, f"Wrong exception was raised: {e}" else: assert False, "No exception was raised!" _test_blocking_connection_consume_many_metrics = [ - ("MessageBroker/RabbitMQ/Exchange/Produce/Named/%s" % EXCHANGE, None), - ("MessageBroker/RabbitMQ/Exchange/Consume/Named/%s" % EXCHANGE, None), + (f"MessageBroker/RabbitMQ/Exchange/Produce/Named/{EXCHANGE}", None), + (f"MessageBroker/RabbitMQ/Exchange/Consume/Named/{EXCHANGE}", None), ("MessageBroker/RabbitMQ/Exchange/Consume/Named/Unknown", None), ] @@ -227,7 +227,7 @@ def test_blocking_connection_consume_using_methods(producer): @validate_transaction_metrics( - "Named/%s" % EXCHANGE, + f"Named/{EXCHANGE}", scoped_metrics=_test_blocking_connection_consume_metrics, rollup_metrics=_test_blocking_connection_consume_metrics, background_task=True, @@ -251,7 +251,7 @@ def test_blocking_connection_consume_outside_txn(producer): def test_blocking_connection_consume_many_outside_txn(produce_five): @validate_transaction_metrics( - "Named/%s" % EXCHANGE, + f"Named/{EXCHANGE}", scoped_metrics=_test_blocking_connection_consume_metrics, rollup_metrics=_test_blocking_connection_consume_metrics, background_task=True, @@ -282,7 +282,7 @@ def consume_it(consumer, up_next=None): @validate_transaction_metrics( - "Named/%s" % EXCHANGE, + f"Named/{EXCHANGE}", scoped_metrics=_test_blocking_connection_consume_metrics, rollup_metrics=_test_blocking_connection_consume_metrics, background_task=True, diff --git a/tests/messagebroker_pika/test_pika_instance_info.py b/tests/messagebroker_pika/test_pika_instance_info.py index 3111a4976e..a8df228fc4 100644 --- a/tests/messagebroker_pika/test_pika_instance_info.py +++ b/tests/messagebroker_pika/test_pika_instance_info.py @@ -24,7 +24,7 @@ EXPECTED_HOST = DB_SETTINGS["host"] CONNECTION_PARAMS = [ pika.ConnectionParameters(host=DB_SETTINGS["host"], port=DB_SETTINGS["port"]), - pika.URLParameters("amqp://%s:%s" % (DB_SETTINGS["host"], DB_SETTINGS["port"])), + pika.URLParameters(f"amqp://{DB_SETTINGS['host']}:{DB_SETTINGS['port']}"), ] diff --git a/tests/mlmodel_langchain/_mock_external_openai_server.py b/tests/mlmodel_langchain/_mock_external_openai_server.py index 6eefc9ef2f..b956c9abce 100644 --- a/tests/mlmodel_langchain/_mock_external_openai_server.py +++ b/tests/mlmodel_langchain/_mock_external_openai_server.py @@ -372,7 +372,7 @@ def _simple_get(self): else: # If no matches found self.send_response(500) self.end_headers() - self.wfile.write(("Unknown Prompt:\n%s" % prompt).encode("utf-8")) + self.wfile.write(f"Unknown Prompt:\n{prompt}".encode("utf-8")) return # Send response code @@ -438,6 +438,6 @@ def openai_version(): if __name__ == "__main__": _MockExternalOpenAIServer = MockExternalOpenAIServer() with MockExternalOpenAIServer() as server: - print("MockExternalOpenAIServer serving on port %s" % str(server.port)) + print(f"MockExternalOpenAIServer serving on port {str(server.port)}") while True: pass # Serve forever diff --git a/tests/mlmodel_langchain/conftest.py b/tests/mlmodel_langchain/conftest.py index 806528d00f..a6e2385df6 100644 --- a/tests/mlmodel_langchain/conftest.py +++ b/tests/mlmodel_langchain/conftest.py @@ -71,11 +71,11 @@ def openai_clients(openai_version, MockExternalOpenAIServer): # noqa: F811 if not _environ_as_bool("NEW_RELIC_TESTING_RECORD_OPENAI_RESPONSES", False): with MockExternalOpenAIServer() as server: chat = ChatOpenAI( - base_url="http://localhost:%d" % server.port, + base_url=f"http://localhost:{server.port}", api_key="NOT-A-REAL-SECRET", ) embeddings = OpenAIEmbeddings( - openai_api_key="NOT-A-REAL-SECRET", openai_api_base="http://localhost:%d" % server.port + openai_api_key="NOT-A-REAL-SECRET", openai_api_base=f"http://localhost:{server.port}" ) yield chat, embeddings else: diff --git a/tests/mlmodel_langchain/test_agent.py b/tests/mlmodel_langchain/test_agent.py index 521b85c087..18524d2c2d 100644 --- a/tests/mlmodel_langchain/test_agent.py +++ b/tests/mlmodel_langchain/test_agent.py @@ -62,7 +62,7 @@ def prompt(): scoped_metrics=[("Llm/agent/LangChain/invoke", 1)], rollup_metrics=[("Llm/agent/LangChain/invoke", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -81,7 +81,7 @@ def test_sync_agent(chat_openai_client, tools, prompt): scoped_metrics=[("Llm/agent/LangChain/ainvoke", 1)], rollup_metrics=[("Llm/agent/LangChain/ainvoke", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) diff --git a/tests/mlmodel_langchain/test_chain.py b/tests/mlmodel_langchain/test_chain.py index a2a21cf212..5226e60e6d 100644 --- a/tests/mlmodel_langchain/test_chain.py +++ b/tests/mlmodel_langchain/test_chain.py @@ -483,7 +483,7 @@ scoped_metrics=[("Llm/chain/LangChain/invoke", 1)], rollup_metrics=[("Llm/chain/LangChain/invoke", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -608,10 +608,10 @@ def test_langchain_chain( @validate_custom_event_count(count=8) @validate_transaction_metrics( name="test_chain:test_langchain_chain.._test", - scoped_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], - rollup_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], + scoped_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], + rollup_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -682,10 +682,10 @@ def test_langchain_chain_no_content( @validate_custom_event_count(count=8) @validate_transaction_metrics( name="test_chain:test_langchain_chain_no_content.._test", - scoped_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], - rollup_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], + scoped_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], + rollup_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -781,10 +781,10 @@ def test_langchain_chain_error_in_openai( @validate_custom_event_count(count=6) @validate_transaction_metrics( name="test_chain:test_langchain_chain_error_in_openai.._test", - scoped_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], - rollup_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], + scoped_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], + rollup_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -858,10 +858,10 @@ def test_langchain_chain_error_in_langchain( @validate_custom_event_count(count=2) @validate_transaction_metrics( name="test_chain:test_langchain_chain_error_in_langchain.._test", - scoped_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], - rollup_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], + scoped_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], + rollup_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -936,10 +936,10 @@ def test_langchain_chain_error_in_langchain_no_content( @validate_custom_event_count(count=2) @validate_transaction_metrics( name="test_chain:test_langchain_chain_error_in_langchain_no_content.._test", - scoped_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], - rollup_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], + scoped_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], + rollup_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -1008,7 +1008,7 @@ def test_langchain_chain_ai_monitoring_disabled( scoped_metrics=[("Llm/chain/LangChain/ainvoke", 1)], rollup_metrics=[("Llm/chain/LangChain/ainvoke", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -1053,7 +1053,7 @@ def test_async_langchain_chain_list_response( scoped_metrics=[("Llm/chain/LangChain/ainvoke", 1)], rollup_metrics=[("Llm/chain/LangChain/ainvoke", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -1186,10 +1186,10 @@ def test_async_langchain_chain( @validate_custom_event_count(count=8) @validate_transaction_metrics( name="test_chain:test_async_langchain_chain.._test", - scoped_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], - rollup_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], + scoped_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], + rollup_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -1285,10 +1285,10 @@ def test_async_langchain_chain_error_in_openai( @validate_custom_event_count(count=6) @validate_transaction_metrics( name="test_chain:test_async_langchain_chain_error_in_openai.._test", - scoped_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], - rollup_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], + scoped_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], + rollup_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -1361,10 +1361,10 @@ def test_async_langchain_chain_error_in_langchain( @validate_custom_event_count(count=2) @validate_transaction_metrics( name="test_chain:test_async_langchain_chain_error_in_langchain.._test", - scoped_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], - rollup_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], + scoped_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], + rollup_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -1438,10 +1438,10 @@ def test_async_langchain_chain_error_in_langchain_no_content( @validate_custom_event_count(count=2) @validate_transaction_metrics( name="test_chain:test_async_langchain_chain_error_in_langchain_no_content.._test", - scoped_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], - rollup_metrics=[("Llm/chain/LangChain/%s" % call_function, 1)], + scoped_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], + rollup_metrics=[(f"Llm/chain/LangChain/{call_function}", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -1541,10 +1541,10 @@ def test_multiple_async_langchain_chain( @validate_custom_event_count(count=16) @validate_transaction_metrics( name="test_chain:test_multiple_async_langchain_chain.._test", - scoped_metrics=[("Llm/chain/LangChain/%s" % call_function, 2)], - rollup_metrics=[("Llm/chain/LangChain/%s" % call_function, 2)], + scoped_metrics=[(f"Llm/chain/LangChain/{call_function}", 2)], + rollup_metrics=[(f"Llm/chain/LangChain/{call_function}", 2)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) diff --git a/tests/mlmodel_langchain/test_tool.py b/tests/mlmodel_langchain/test_tool.py index 6ff0ff1665..ba187784e1 100644 --- a/tests/mlmodel_langchain/test_tool.py +++ b/tests/mlmodel_langchain/test_tool.py @@ -100,7 +100,7 @@ def events_sans_content(event): scoped_metrics=[("Llm/tool/LangChain/run", 1)], rollup_metrics=[("Llm/tool/LangChain/run", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -120,7 +120,7 @@ def test_langchain_single_arg_tool(set_trace_info, single_arg_tool): scoped_metrics=[("Llm/tool/LangChain/run", 1)], rollup_metrics=[("Llm/tool/LangChain/run", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -139,7 +139,7 @@ def test_langchain_single_arg_tool_no_content(set_trace_info, single_arg_tool): scoped_metrics=[("Llm/tool/LangChain/arun", 1)], rollup_metrics=[("Llm/tool/LangChain/arun", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -159,7 +159,7 @@ def test_langchain_single_arg_tool_async(set_trace_info, single_arg_tool, loop): scoped_metrics=[("Llm/tool/LangChain/arun", 1)], rollup_metrics=[("Llm/tool/LangChain/arun", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -201,7 +201,7 @@ def test_langchain_single_arg_tool_async_no_content(set_trace_info, single_arg_t scoped_metrics=[("Llm/tool/LangChain/run", 1)], rollup_metrics=[("Llm/tool/LangChain/run", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -225,7 +225,7 @@ def test_langchain_multi_arg_tool(set_trace_info, multi_arg_tool): scoped_metrics=[("Llm/tool/LangChain/arun", 1)], rollup_metrics=[("Llm/tool/LangChain/arun", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -283,7 +283,7 @@ def test_langchain_multi_arg_tool_async(set_trace_info, multi_arg_tool, loop): scoped_metrics=[("Llm/tool/LangChain/run", 1)], rollup_metrics=[("Llm/tool/LangChain/run", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -315,7 +315,7 @@ def test_langchain_error_in_run(set_trace_info, multi_arg_tool): scoped_metrics=[("Llm/tool/LangChain/run", 1)], rollup_metrics=[("Llm/tool/LangChain/run", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -346,7 +346,7 @@ def test_langchain_error_in_run_no_content(set_trace_info, multi_arg_tool): scoped_metrics=[("Llm/tool/LangChain/arun", 1)], rollup_metrics=[("Llm/tool/LangChain/arun", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -380,7 +380,7 @@ def test_langchain_error_in_run_async(set_trace_info, multi_arg_tool, loop): scoped_metrics=[("Llm/tool/LangChain/arun", 1)], rollup_metrics=[("Llm/tool/LangChain/arun", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -451,7 +451,7 @@ def test_langchain_multiple_async_calls(set_trace_info, single_arg_tool, multi_a @validate_transaction_metrics( name="test_tool:test_langchain_multiple_async_calls.._test", custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) diff --git a/tests/mlmodel_langchain/test_vectorstore.py b/tests/mlmodel_langchain/test_vectorstore.py index 41a9dfc146..d406277f22 100644 --- a/tests/mlmodel_langchain/test_vectorstore.py +++ b/tests/mlmodel_langchain/test_vectorstore.py @@ -118,10 +118,8 @@ def test_vectorstore_modules_instrumented(): if not hasattr(getattr(class_, "asimilarity_search"), "__wrapped__"): uninstrumented_async_classes.append(class_name) - assert not uninstrumented_sync_classes, "Uninstrumented sync classes found: %s" % str(uninstrumented_sync_classes) - assert not uninstrumented_async_classes, "Uninstrumented async classes found: %s" % str( - uninstrumented_async_classes - ) + assert not uninstrumented_sync_classes, f"Uninstrumented sync classes found: {str(uninstrumented_sync_classes)}" + assert not uninstrumented_async_classes, f"Uninstrumented async classes found: {str(uninstrumented_async_classes)}" @reset_core_stats_engine() @@ -133,7 +131,7 @@ def test_vectorstore_modules_instrumented(): scoped_metrics=[("Llm/vectorstore/LangChain/similarity_search", 1)], rollup_metrics=[("Llm/vectorstore/LangChain/similarity_search", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -164,7 +162,7 @@ def test_pdf_pagesplitter_vectorstore_in_txn(set_trace_info, embedding_openai_cl scoped_metrics=[("Llm/vectorstore/LangChain/similarity_search", 1)], rollup_metrics=[("Llm/vectorstore/LangChain/similarity_search", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -224,7 +222,7 @@ def test_pdf_pagesplitter_vectorstore_ai_monitoring_disabled(set_trace_info, emb scoped_metrics=[("Llm/vectorstore/LangChain/asimilarity_search", 1)], rollup_metrics=[("Llm/vectorstore/LangChain/asimilarity_search", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -259,7 +257,7 @@ async def _test(): scoped_metrics=[("Llm/vectorstore/LangChain/asimilarity_search", 1)], rollup_metrics=[("Llm/vectorstore/LangChain/asimilarity_search", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -349,7 +347,7 @@ async def _test(): scoped_metrics=[("Llm/vectorstore/LangChain/similarity_search", 1)], rollup_metrics=[("Llm/vectorstore/LangChain/similarity_search", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -377,7 +375,7 @@ def test_vectorstore_error(set_trace_info, embedding_openai_client, loop): scoped_metrics=[("Llm/vectorstore/LangChain/similarity_search", 1)], rollup_metrics=[("Llm/vectorstore/LangChain/similarity_search", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -404,7 +402,7 @@ def test_vectorstore_error_no_content(set_trace_info, embedding_openai_client): scoped_metrics=[("Llm/vectorstore/LangChain/asimilarity_search", 1)], rollup_metrics=[("Llm/vectorstore/LangChain/asimilarity_search", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) @@ -437,7 +435,7 @@ async def _test(): scoped_metrics=[("Llm/vectorstore/LangChain/asimilarity_search", 1)], rollup_metrics=[("Llm/vectorstore/LangChain/asimilarity_search", 1)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) diff --git a/tests/mlmodel_openai/_mock_external_openai_server.py b/tests/mlmodel_openai/_mock_external_openai_server.py index 9bb88e40d0..161f8e16f3 100644 --- a/tests/mlmodel_openai/_mock_external_openai_server.py +++ b/tests/mlmodel_openai/_mock_external_openai_server.py @@ -704,7 +704,7 @@ def _simple_get(self): else: # If no matches found self.send_response(500) self.end_headers() - self.wfile.write(("Unknown Prompt:\n%s" % prompt).encode("utf-8")) + self.wfile.write(f"Unknown Prompt:\n{prompt}".encode("utf-8")) return # Send response code @@ -772,6 +772,6 @@ def openai_version(): if __name__ == "__main__": _MockExternalOpenAIServer = MockExternalOpenAIServer() with MockExternalOpenAIServer() as server: - print("MockExternalOpenAIServer serving on port %s" % str(server.port)) + print(f"MockExternalOpenAIServer serving on port {str(server.port)}") while True: pass # Serve forever diff --git a/tests/mlmodel_openai/conftest.py b/tests/mlmodel_openai/conftest.py index 54970e6f8c..4bccc6079d 100644 --- a/tests/mlmodel_openai/conftest.py +++ b/tests/mlmodel_openai/conftest.py @@ -93,16 +93,16 @@ def openai_clients(openai_version, MockExternalOpenAIServer): # noqa: F811 if not _environ_as_bool("NEW_RELIC_TESTING_RECORD_OPENAI_RESPONSES", False): with MockExternalOpenAIServer() as server: if openai_version < (1, 0): - openai.api_base = "http://localhost:%d" % server.port + openai.api_base = f"http://localhost:{server.port}" openai.api_key = "NOT-A-REAL-SECRET" yield else: openai_sync = openai.OpenAI( - base_url="http://localhost:%d" % server.port, + base_url=f"http://localhost:{server.port}", api_key="NOT-A-REAL-SECRET", ) openai_async = openai.AsyncOpenAI( - base_url="http://localhost:%d" % server.port, + base_url=f"http://localhost:{server.port}", api_key="NOT-A-REAL-SECRET", ) yield (openai_sync, openai_async) diff --git a/tests/mlmodel_openai/test_chat_completion.py b/tests/mlmodel_openai/test_chat_completion.py index 229ec44272..cbeb9cdd0d 100644 --- a/tests/mlmodel_openai/test_chat_completion.py +++ b/tests/mlmodel_openai/test_chat_completion.py @@ -136,7 +136,7 @@ @validate_transaction_metrics( name="test_chat_completion:test_openai_chat_completion_sync_with_llm_metadata", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -161,7 +161,7 @@ def test_openai_chat_completion_sync_with_llm_metadata(set_trace_info): @validate_transaction_metrics( name="test_chat_completion:test_openai_chat_completion_sync_no_content", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -185,7 +185,7 @@ def test_openai_chat_completion_sync_no_content(set_trace_info): @validate_transaction_metrics( name="test_chat_completion:test_openai_chat_completion_sync_with_token_count", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -307,7 +307,7 @@ def test_openai_chat_completion_async_stream_monitoring_disabled(loop, set_trace scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -335,7 +335,7 @@ def test_openai_chat_completion_async_with_llm_metadata(loop, set_trace_info): scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -361,7 +361,7 @@ def test_openai_chat_completion_async_no_content(loop, set_trace_info): @validate_transaction_metrics( name="test_chat_completion:test_openai_chat_completion_async_with_token_count", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) diff --git a/tests/mlmodel_openai/test_chat_completion_stream.py b/tests/mlmodel_openai/test_chat_completion_stream.py index 3c32dd9f05..32420c78f1 100644 --- a/tests/mlmodel_openai/test_chat_completion_stream.py +++ b/tests/mlmodel_openai/test_chat_completion_stream.py @@ -138,7 +138,7 @@ @validate_transaction_metrics( name="test_chat_completion_stream:test_openai_chat_completion_sync_with_llm_metadata", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -169,7 +169,7 @@ def test_openai_chat_completion_sync_with_llm_metadata(set_trace_info): @validate_transaction_metrics( name="test_chat_completion_stream:test_openai_chat_completion_sync_no_content", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -199,7 +199,7 @@ def test_openai_chat_completion_sync_no_content(set_trace_info): @validate_transaction_metrics( name="test_chat_completion_stream:test_openai_chat_completion_sync_with_token_count", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -252,7 +252,7 @@ def test_openai_chat_completion_sync_no_llm_metadata(set_trace_info): @validate_transaction_metrics( "test_chat_completion_stream:test_openai_chat_completion_sync_ai_monitoring_streaming_disabled", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], scoped_metrics=[("Llm/completion/OpenAI/create", 1)], rollup_metrics=[("Llm/completion/OpenAI/create", 1)], @@ -330,7 +330,7 @@ async def consumer(): scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -365,7 +365,7 @@ async def consumer(): scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -399,7 +399,7 @@ async def consumer(): scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -430,7 +430,7 @@ async def consumer(): @validate_transaction_metrics( name="test_chat_completion_stream:test_openai_chat_completion_async_ai_monitoring_streaming_disabled", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], diff --git a/tests/mlmodel_openai/test_chat_completion_stream_v1.py b/tests/mlmodel_openai/test_chat_completion_stream_v1.py index 7d268ced9a..c94cbef558 100644 --- a/tests/mlmodel_openai/test_chat_completion_stream_v1.py +++ b/tests/mlmodel_openai/test_chat_completion_stream_v1.py @@ -148,7 +148,7 @@ @validate_transaction_metrics( name="test_chat_completion_stream_v1:test_openai_chat_completion_sync_with_llm_metadata", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -185,7 +185,7 @@ def test_openai_chat_completion_sync_with_llm_metadata(set_trace_info, sync_open @validate_transaction_metrics( name="test_chat_completion_stream_v1:test_openai_chat_completion_sync_with_llm_metadata_with_streaming_response_lines", # custom_metrics=[ - # ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + # (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), # ], background_task=True, ) @@ -227,7 +227,7 @@ def test_openai_chat_completion_sync_with_llm_metadata_with_streaming_response_l @validate_transaction_metrics( name="test_chat_completion_stream_v1:test_openai_chat_completion_sync_with_llm_metadata_with_streaming_response_bytes", # custom_metrics=[ - # ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + # (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), # ], background_task=True, ) @@ -269,7 +269,7 @@ def test_openai_chat_completion_sync_with_llm_metadata_with_streaming_response_b @validate_transaction_metrics( name="test_chat_completion_stream_v1:test_openai_chat_completion_sync_with_llm_metadata_with_streaming_response_text", # custom_metrics=[ - # ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + # (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), # ], background_task=True, ) @@ -306,7 +306,7 @@ def test_openai_chat_completion_sync_with_llm_metadata_with_streaming_response_t @validate_transaction_metrics( name="test_chat_completion_stream_v1:test_openai_chat_completion_sync_no_content", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -337,7 +337,7 @@ def test_openai_chat_completion_sync_no_content(set_trace_info, sync_openai_clie @validate_transaction_metrics( name="test_chat_completion_stream_v1:test_openai_chat_completion_sync_in_txn_with_llm_metadata_with_token_count", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -391,7 +391,7 @@ def test_openai_chat_completion_sync_no_llm_metadata(set_trace_info, sync_openai @validate_transaction_metrics( "test_chat_completion_stream_v1:test_openai_chat_completion_sync_ai_monitoring_streaming_disabled", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], scoped_metrics=[("Llm/completion/OpenAI/create", 1)], rollup_metrics=[("Llm/completion/OpenAI/create", 1)], @@ -478,7 +478,7 @@ async def consumer(): scoped_metrics=[("Llm/completion/OpenAI/create", 1)], rollup_metrics=[("Llm/completion/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -521,7 +521,7 @@ async def consumer(): # scoped_metrics=[("Llm/completion/OpenAI/create", 1)], # rollup_metrics=[("Llm/completion/OpenAI/create", 1)], # custom_metrics=[ - # ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + # (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), # ], background_task=True, ) @@ -569,7 +569,7 @@ async def consumer(): # scoped_metrics=[("Llm/completion/OpenAI/create", 1)], # rollup_metrics=[("Llm/completion/OpenAI/create", 1)], # custom_metrics=[ - # ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + # (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), # ], background_task=True, ) @@ -617,7 +617,7 @@ async def consumer(): # scoped_metrics=[("Llm/completion/OpenAI/create", 1)], # rollup_metrics=[("Llm/completion/OpenAI/create", 1)], # custom_metrics=[ - # ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + # (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), # ], background_task=True, ) @@ -657,7 +657,7 @@ async def consumer(): scoped_metrics=[("Llm/completion/OpenAI/create", 1)], rollup_metrics=[("Llm/completion/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -690,7 +690,7 @@ async def consumer(): @validate_transaction_metrics( name="test_chat_completion_stream_v1:test_openai_chat_completion_async_with_token_count", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -721,7 +721,7 @@ async def consumer(): @validate_transaction_metrics( "test_chat_completion_stream_v1:test_openai_chat_completion_async_ai_monitoring_streaming_disabled", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], scoped_metrics=[("Llm/completion/OpenAI/create", 1)], rollup_metrics=[("Llm/completion/OpenAI/create", 1)], diff --git a/tests/mlmodel_openai/test_chat_completion_v1.py b/tests/mlmodel_openai/test_chat_completion_v1.py index cf0c4f8491..cbf631d550 100644 --- a/tests/mlmodel_openai/test_chat_completion_v1.py +++ b/tests/mlmodel_openai/test_chat_completion_v1.py @@ -136,7 +136,7 @@ @validate_transaction_metrics( name="test_chat_completion_v1:test_openai_chat_completion_sync_with_llm_metadata", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -160,7 +160,7 @@ def test_openai_chat_completion_sync_with_llm_metadata(set_trace_info, sync_open @validate_transaction_metrics( name="test_chat_completion_v1:test_openai_chat_completion_sync_with_llm_metadata_with_raw_response", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -185,7 +185,7 @@ def test_openai_chat_completion_sync_with_llm_metadata_with_raw_response(set_tra @validate_transaction_metrics( name="test_chat_completion_v1:test_openai_chat_completion_sync_no_content", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -209,7 +209,7 @@ def test_openai_chat_completion_sync_no_content(set_trace_info, sync_openai_clie @validate_transaction_metrics( name="test_chat_completion_v1:test_openai_chat_completion_sync_with_token_count", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -331,7 +331,7 @@ def test_openai_chat_completion_async_stream_monitoring_disabled(loop, set_trace scoped_metrics=[("Llm/completion/OpenAI/create", 1)], rollup_metrics=[("Llm/completion/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -358,7 +358,7 @@ def test_openai_chat_completion_async_with_llm_metadata(loop, set_trace_info, as scoped_metrics=[("Llm/completion/OpenAI/create", 1)], rollup_metrics=[("Llm/completion/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -386,7 +386,7 @@ def test_openai_chat_completion_async_with_llm_metadata_with_raw_response(loop, scoped_metrics=[("Llm/completion/OpenAI/create", 1)], rollup_metrics=[("Llm/completion/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -412,7 +412,7 @@ def test_openai_chat_completion_async_with_llm_metadata_no_content(loop, set_tra @validate_transaction_metrics( name="test_chat_completion_v1:test_openai_chat_completion_async_in_txn_with_token_count", custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) diff --git a/tests/mlmodel_openai/test_embeddings.py b/tests/mlmodel_openai/test_embeddings.py index 488f60d2e1..52378d1679 100644 --- a/tests/mlmodel_openai/test_embeddings.py +++ b/tests/mlmodel_openai/test_embeddings.py @@ -72,7 +72,7 @@ scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -96,7 +96,7 @@ def test_openai_embedding_sync(set_trace_info): scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -120,7 +120,7 @@ def test_openai_embedding_sync_no_content(set_trace_info): scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -158,7 +158,7 @@ def test_openai_embedding_sync_disabled_ai_monitoring_events(set_trace_info): scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -184,7 +184,7 @@ def test_openai_embedding_async(loop, set_trace_info): scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -210,7 +210,7 @@ def test_openai_embedding_async_no_content(loop, set_trace_info): scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) diff --git a/tests/mlmodel_openai/test_embeddings_error.py b/tests/mlmodel_openai/test_embeddings_error.py index 020648d0c2..d5c762ffe9 100644 --- a/tests/mlmodel_openai/test_embeddings_error.py +++ b/tests/mlmodel_openai/test_embeddings_error.py @@ -79,7 +79,7 @@ scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -118,7 +118,7 @@ def test_embeddings_invalid_request_error_no_model(set_trace_info): scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -176,7 +176,7 @@ def test_embeddings_invalid_request_error_no_model_no_content(set_trace_info): scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -213,7 +213,7 @@ def test_embeddings_invalid_request_error_invalid_model_with_token_count(set_tra scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -265,7 +265,7 @@ def test_embeddings_invalid_request_error_invalid_model(set_trace_info): scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -320,7 +320,7 @@ def test_embeddings_authentication_error(monkeypatch, set_trace_info): scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -360,7 +360,7 @@ def test_embeddings_wrong_api_key_error(monkeypatch, set_trace_info): scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -401,7 +401,7 @@ def test_embeddings_invalid_request_error_no_model_async(loop, set_trace_info): scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -442,7 +442,7 @@ def test_embeddings_invalid_request_error_no_model_async_no_content(loop, set_tr scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -478,7 +478,7 @@ def test_embeddings_invalid_request_error_invalid_model_with_token_count_async(s scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -512,7 +512,7 @@ def test_embeddings_invalid_request_error_invalid_model_async(loop, set_trace_in scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -549,7 +549,7 @@ def test_embeddings_authentication_error_async(loop, monkeypatch, set_trace_info scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) diff --git a/tests/mlmodel_openai/test_embeddings_error_v1.py b/tests/mlmodel_openai/test_embeddings_error_v1.py index bb79986c46..23633f03b4 100644 --- a/tests/mlmodel_openai/test_embeddings_error_v1.py +++ b/tests/mlmodel_openai/test_embeddings_error_v1.py @@ -80,7 +80,7 @@ scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -116,7 +116,7 @@ def test_embeddings_invalid_request_error_no_model(set_trace_info, sync_openai_c scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -151,7 +151,7 @@ def test_embeddings_invalid_request_error_no_model_no_content(set_trace_info, sy scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -208,7 +208,7 @@ def test_embeddings_invalid_request_error_no_model_async(set_trace_info, async_o scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -244,7 +244,7 @@ def test_embeddings_invalid_request_error_invalid_model_with_token_count(set_tra scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -280,7 +280,7 @@ def test_embeddings_invalid_request_error_invalid_model(set_trace_info, sync_ope scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -319,7 +319,7 @@ def test_embeddings_invalid_request_error_invalid_model_async(set_trace_info, as scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -358,7 +358,7 @@ def test_embeddings_invalid_request_error_invalid_model_async_no_content(set_tra scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -417,7 +417,7 @@ def test_embeddings_invalid_request_error_invalid_model_async_with_token_count( scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -454,7 +454,7 @@ def test_embeddings_wrong_api_key_error(set_trace_info, monkeypatch, sync_openai scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -495,7 +495,7 @@ def test_embeddings_wrong_api_key_error_async(set_trace_info, monkeypatch, async scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -533,7 +533,7 @@ def test_embeddings_invalid_request_error_no_model_with_raw_response(set_trace_i scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -570,7 +570,7 @@ def test_embeddings_invalid_request_error_no_model_no_content_with_raw_response( scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -609,7 +609,7 @@ def test_embeddings_invalid_request_error_no_model_async_with_raw_response(set_t scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -647,7 +647,7 @@ def test_embeddings_invalid_request_error_invalid_model_with_token_count_with_ra scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -684,7 +684,7 @@ def test_embeddings_invalid_request_error_invalid_model_with_raw_response(set_tr scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -727,7 +727,7 @@ def test_embeddings_invalid_request_error_invalid_model_async_with_raw_response( scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -770,7 +770,7 @@ def test_embeddings_invalid_request_error_invalid_model_async_no_content_with_ra scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -813,7 +813,7 @@ def test_embeddings_invalid_request_error_invalid_model_async_with_token_count_w scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -850,7 +850,7 @@ def test_embeddings_wrong_api_key_error_with_raw_response(set_trace_info, monkey scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) diff --git a/tests/mlmodel_openai/test_embeddings_v1.py b/tests/mlmodel_openai/test_embeddings_v1.py index 31540e75a7..f12ad002fb 100644 --- a/tests/mlmodel_openai/test_embeddings_v1.py +++ b/tests/mlmodel_openai/test_embeddings_v1.py @@ -69,7 +69,7 @@ scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -88,7 +88,7 @@ def test_openai_embedding_sync(set_trace_info, sync_openai_client): scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -110,7 +110,7 @@ def test_openai_embedding_sync_with_raw_response(set_trace_info, sync_openai_cli scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -130,7 +130,7 @@ def test_openai_embedding_sync_no_content(set_trace_info, sync_openai_client): scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -163,7 +163,7 @@ def test_openai_embedding_sync_ai_monitoring_disabled(sync_openai_client): scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -185,7 +185,7 @@ def test_openai_embedding_async(loop, set_trace_info, async_openai_client): scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -210,7 +210,7 @@ def test_openai_embedding_async_with_raw_response(loop, set_trace_info, async_op scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) @@ -233,7 +233,7 @@ def test_openai_embedding_async_no_content(loop, set_trace_info, async_openai_cl scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], custom_metrics=[ - ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + (f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1), ], background_task=True, ) diff --git a/tests/mlmodel_sklearn/test_metric_scorers.py b/tests/mlmodel_sklearn/test_metric_scorers.py index 50557b8822..de16b764b8 100644 --- a/tests/mlmodel_sklearn/test_metric_scorers.py +++ b/tests/mlmodel_sklearn/test_metric_scorers.py @@ -33,7 +33,7 @@ ), ) def test_metric_scorer_attributes(metric_scorer_name, run_metric_scorer): - @validate_attributes("agent", ["DecisionTreeClassifier/TrainingStep/0/%s" % metric_scorer_name]) + @validate_attributes("agent", [f"DecisionTreeClassifier/TrainingStep/0/{metric_scorer_name}"]) @background_task() def _test(): run_metric_scorer(metric_scorer_name) @@ -57,8 +57,8 @@ def test_metric_scorer_training_steps_attributes(metric_scorer_name, run_metric_ @validate_attributes( "agent", [ - "DecisionTreeClassifier/TrainingStep/0/%s" % metric_scorer_name, - "DecisionTreeClassifier/TrainingStep/1/%s" % metric_scorer_name, + f"DecisionTreeClassifier/TrainingStep/0/{metric_scorer_name}", + f"DecisionTreeClassifier/TrainingStep/1/{metric_scorer_name}", ], ) @background_task() @@ -80,8 +80,8 @@ def test_metric_scorer_iterable_score_attributes(metric_scorer_name, kwargs, run @validate_attributes( "agent", [ - "DecisionTreeClassifier/TrainingStep/0/%s[0]" % metric_scorer_name, - "DecisionTreeClassifier/TrainingStep/0/%s[1]" % metric_scorer_name, + f"DecisionTreeClassifier/TrainingStep/0/{metric_scorer_name}[0]", + f"DecisionTreeClassifier/TrainingStep/0/{metric_scorer_name}[1]", ], ) @background_task() @@ -104,7 +104,7 @@ def _test(): ], ) def test_metric_scorer_attributes_unknown_model(metric_scorer_name): - @validate_attributes("agent", ["Unknown/TrainingStep/Unknown/%s" % metric_scorer_name]) + @validate_attributes("agent", [f"Unknown/TrainingStep/Unknown/{metric_scorer_name}"]) @background_task() def _test(): from sklearn import metrics diff --git a/tests/mlmodel_sklearn/test_prediction_stats.py b/tests/mlmodel_sklearn/test_prediction_stats.py index 5538119e7e..fd97da6c77 100644 --- a/tests/mlmodel_sklearn/test_prediction_stats.py +++ b/tests/mlmodel_sklearn/test_prediction_stats.py @@ -469,7 +469,7 @@ def test_prediction_stats_multilabel_output(force_uuid): stats = ["Mean", "Percentile25", "Percentile50", "Percentile75", "StandardDeviation", "Min", "Max", "Count"] metrics = [ ( - "MLModel/Sklearn/Named/MultiOutputClassifier/Predict/Feature/%s/%s" % (feature_col, stat_name), + f"MLModel/Sklearn/Named/MultiOutputClassifier/Predict/Feature/{feature_col}/{stat_name}", _test_prediction_stats_multilabel_output_tags, 1, ) @@ -479,7 +479,7 @@ def test_prediction_stats_multilabel_output(force_uuid): metrics.extend( [ ( - "MLModel/Sklearn/Named/MultiOutputClassifier/Predict/Label/%s/%s" % (label_col, stat_name), + f"MLModel/Sklearn/Named/MultiOutputClassifier/Predict/Label/{label_col}/{stat_name}", _test_prediction_stats_multilabel_output_tags, 1, ) diff --git a/tests/testing_support/db_settings.py b/tests/testing_support/db_settings.py index da4c0055f6..4411704778 100644 --- a/tests/testing_support/db_settings.py +++ b/tests/testing_support/db_settings.py @@ -39,8 +39,8 @@ def postgresql_settings(): "name": "postgres", "host": host, "port": 8080 + instance_num, - "procedure_name": "postgres_procedure_" + identifier, - "table_name": "postgres_table_" + identifier, + "procedure_name": f"postgres_procedure_{identifier}", + "table_name": f"postgres_table_{identifier}", } for instance_num in range(instances) ] @@ -186,7 +186,7 @@ def mongodb_settings(): host = "host.docker.internal" if "GITHUB_ACTIONS" in os.environ else "127.0.0.1" instances = 2 settings = [ - {"host": host, "port": 8080 + instance_num, "collection": "mongodb_collection_" + str(os.getpid())} + {"host": host, "port": 8080 + instance_num, "collection": f"mongodb_collection_{str(os.getpid())}"} for instance_num in range(instances) ] return settings diff --git a/tests/testing_support/external_fixtures.py b/tests/testing_support/external_fixtures.py index d8ed48658a..cbc6386681 100644 --- a/tests/testing_support/external_fixtures.py +++ b/tests/testing_support/external_fixtures.py @@ -90,14 +90,14 @@ def __getattr__(self, name): if synthetics_header: assert ( synthetics_header == external_headers["X-NewRelic-Synthetics"] - ), "synthetics_header=%r, external_headers=%r" % (synthetics_header, external_headers) + ), f"synthetics_header={synthetics_header!r}, external_headers={external_headers!r}" else: assert "X-NewRelic-Synthetics" not in external_headers if synthetics_info_header: assert ( synthetics_info_header == external_headers["X-NewRelic-Synthetics-Info"] - ), "synthetics_info_header=%r, external_headers=%r" % (synthetics_info_header, external_headers) + ), f"synthetics_info_header={synthetics_info_header!r}, external_headers={external_headers!r}" else: assert "X-NewRelic-Synthetics-Info" not in external_headers diff --git a/tests/testing_support/fixtures.py b/tests/testing_support/fixtures.py index 85a75c64ef..a3a42ab9e2 100644 --- a/tests/testing_support/fixtures.py +++ b/tests/testing_support/fixtures.py @@ -501,10 +501,10 @@ def _bind_params(transaction, *args, **kwargs): for name in attribute_names: assert name in root_attribute_names, name for name in required_attr_names: - assert name in attribute_names, "name=%r, attributes=%r" % (name, attributes) + assert name in attribute_names, f"name={name!r}, attributes={attributes!r}" for name in forgone_attr_names: - assert name not in attribute_names, "name=%r, attributes=%r" % (name, attributes) + assert name not in attribute_names, f"name={name!r}, attributes={attributes!r}" return wrapped(*args, **kwargs) @@ -564,10 +564,10 @@ def _find_match(a, attributes): for required in required_attrs: match = _find_match(required, attributes) - assert match, "required=%r, attributes=%r" % (required, attributes) + assert match, f"required={required!r}, attributes={attributes!r}" result_dest = required.destinations & match.destinations - assert result_dest == required.destinations, "required=%r, attributes=%r" % (required, attributes) + assert result_dest == required.destinations, f"required={required!r}, attributes={attributes!r}" # Check that the name and value are NOT going to ANY of the # destinations provided as forgone, either because there is no @@ -579,7 +579,7 @@ def _find_match(a, attributes): if match: result_dest = forgone.destinations & match.destinations - assert result_dest == 0, "forgone=%r, attributes=%r" % (forgone, attributes) + assert result_dest == 0, f"forgone={forgone!r}, attributes={attributes!r}" return wrapped(*args, **kwargs) @@ -896,16 +896,12 @@ def _validate_application_exception_message(wrapped, instance, args, kwargs): def _validate_node_parenting(node, expected_node): - assert node.exclusive >= 0, "node.exclusive = %s" % node.exclusive + assert node.exclusive >= 0, f"node.exclusive = {node.exclusive}" expected_children = expected_node[1] def len_error(): - return ("len(node.children)=%s, len(expected_children)=%s, node.children=%s") % ( - len(node.children), - len(expected_children), - node.children, - ) + return f"len(node.children)={len(node.children)}, len(expected_children)={len(expected_children)}, node.children={node.children}" assert len(node.children) == len(expected_children), len_error() diff --git a/tests/testing_support/mock_external_grpc_server.py b/tests/testing_support/mock_external_grpc_server.py index 1511d55e47..912f91cdfe 100644 --- a/tests/testing_support/mock_external_grpc_server.py +++ b/tests/testing_support/mock_external_grpc_server.py @@ -31,7 +31,7 @@ class MockExternalgRPCServer(): def __init__(self, port=None, *args, **kwargs): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=4)) if port: - self.server.port = self.server.add_insecure_port('127.0.0.1:%s' % port) + self.server.port = self.server.add_insecure_port(f'127.0.0.1:{port}') self.port = port else: # If port not set, try to bind to a port until successful @@ -43,7 +43,7 @@ def __init__(self, port=None, *args, **kwargs): # Obtain random open port port = self.get_open_port() # Attempt to bind to port - self.server.port = self.server.add_insecure_port('127.0.0.1:%s' % port) + self.server.port = self.server.add_insecure_port(f'127.0.0.1:{port}') self.port = port except OSError as exc: # Reraise errors other than port already in use diff --git a/tests/testing_support/sample_applications.py b/tests/testing_support/sample_applications.py index ed0b83b89e..734201e3cd 100644 --- a/tests/testing_support/sample_applications.py +++ b/tests/testing_support/sample_applications.py @@ -81,7 +81,7 @@ def fully_featured_app(environ, start_response): if "db" in environ and int(environ["db"]) > 0: connection = db.connect(":memory:") for i in range(int(environ["db"]) - 1): - connection.execute("create table test_db%d (a, b, c)" % i) + connection.execute(f"create table test_db{i} (a, b, c)") if "external" in environ: for i in range(int(environ["external"])): diff --git a/tests/testing_support/validators/validate_apdex_metrics.py b/tests/testing_support/validators/validate_apdex_metrics.py index 942c366635..c8589417f3 100644 --- a/tests/testing_support/validators/validate_apdex_metrics.py +++ b/tests/testing_support/validators/validate_apdex_metrics.py @@ -36,7 +36,7 @@ def _capture_metrics(wrapped, instance, args, kwargs): return result def _validate(): - metric_name = 'Apdex/%s/%s' % (group, name) + metric_name = f'Apdex/{group}/{name}' key = (metric_name, '') metric = recorded_metrics.get(key) diff --git a/tests/testing_support/validators/validate_application_errors.py b/tests/testing_support/validators/validate_application_errors.py index 7f653d691e..ca72d22579 100644 --- a/tests/testing_support/validators/validate_application_errors.py +++ b/tests/testing_support/validators/validate_application_errors.py @@ -37,19 +37,15 @@ def _validate_application_errors(wrapped, instace, args, kwargs): expected = sorted(errors) captured = sorted([(e.type, e.message) for e in stats.error_data()]) - assert expected == captured, "expected=%r, captured=%r, errors=%r" % (expected, captured, app_errors) + assert expected == captured, f"expected={expected!r}, captured={captured!r}, errors={app_errors!r}" for e in app_errors: for name, value in required_params: - assert name in e.parameters["userAttributes"], "name=%r, params=%r" % (name, e.parameters) - assert e.parameters["userAttributes"][name] == value, "name=%r, value=%r, params=%r" % ( - name, - value, - e.parameters, - ) + assert name in e.parameters["userAttributes"], f"name={name!r}, params={e.parameters!r}" + assert e.parameters["userAttributes"][name] == value, f"name={name!r}, value={value!r}, params={e.parameters!r}" for name, value in forgone_params: - assert name not in e.parameters["userAttributes"], "name=%r, params=%r" % (name, e.parameters) + assert name not in e.parameters["userAttributes"], f"name={name!r}, params={e.parameters!r}" return result diff --git a/tests/testing_support/validators/validate_custom_events.py b/tests/testing_support/validators/validate_custom_events.py index bc746e785e..dd537edd06 100644 --- a/tests/testing_support/validators/validate_custom_events.py +++ b/tests/testing_support/validators/validate_custom_events.py @@ -66,13 +66,13 @@ def _check_event_attributes(expected, captured, mismatches): intrinsics = captured[0] if intrinsics["type"] != expected[0]["type"]: - mismatches.append("key: type, value:<%s><%s>" % (expected[0]["type"], captured[0].get("type", None))) + mismatches.append(f"key: type, value:<{expected[0]['type']}><{captured[0].get('type', None)}>") return False now = time.time() if not (isinstance(intrinsics["timestamp"], int) and intrinsics["timestamp"] <= 1000.0 * now): - mismatches.append("key: timestamp, value:<%s>" % intrinsics["timestamp"]) + mismatches.append(f"key: timestamp, value:<{intrinsics['timestamp']}>") return False captured_keys = set(captured[1].keys()) @@ -80,19 +80,19 @@ def _check_event_attributes(expected, captured, mismatches): extra_keys = captured_keys - expected_keys if extra_keys: - mismatches.append("extra_keys: %s" % str(tuple(extra_keys))) + mismatches.append(f"extra_keys: {str(tuple(extra_keys))}") return False for key, value in expected[1].items(): if key in captured[1]: captured_value = captured[1].get(key, None) else: - mismatches.append("key: %s, value:<%s><%s>" % (key, value, captured[1].get(key, None))) + mismatches.append(f"key: {key}, value:<{value}><{captured[1].get(key, None)}>") return False if value is not None: if value != captured_value: - mismatches.append("key: %s, value:<%s><%s>" % (key, value, captured_value)) + mismatches.append(f"key: {key}, value:<{value}><{captured_value}>") return False return True @@ -100,9 +100,9 @@ def _check_event_attributes(expected, captured, mismatches): def _event_details(matching_custom_events, captured, mismatches): details = [ - "matching_custom_events=%d" % matching_custom_events, - "mismatches=%s" % mismatches, - "captured_events=%s" % captured, + f"matching_custom_events={matching_custom_events}", + f"mismatches={mismatches}", + f"captured_events={captured}", ] return "\n".join(details) diff --git a/tests/testing_support/validators/validate_custom_metrics_outside_transaction.py b/tests/testing_support/validators/validate_custom_metrics_outside_transaction.py index bb9e4841e2..dc6dc7121d 100644 --- a/tests/testing_support/validators/validate_custom_metrics_outside_transaction.py +++ b/tests/testing_support/validators/validate_custom_metrics_outside_transaction.py @@ -52,13 +52,13 @@ def _validate(metrics, name, count): def _metrics_table(): out = [""] - out.append("Expected: {0}: {1}".format(key, count)) + out.append(f"Expected: {key}: {count}") for metric_key, metric_value in metrics.items(): - out.append("{0}: {1}".format(metric_key, metric_value[0])) + out.append(f"{metric_key}: {metric_value[0]}") return "\n".join(out) def _metric_details(): - return "metric=%r, count=%r" % (key, metric.call_count) + return f"metric={key!r}, count={metric.call_count!r}" if count is not None: assert metric is not None, _metrics_table() diff --git a/tests/testing_support/validators/validate_custom_parameters.py b/tests/testing_support/validators/validate_custom_parameters.py index 9dc92fb129..d772049be4 100644 --- a/tests/testing_support/validators/validate_custom_parameters.py +++ b/tests/testing_support/validators/validate_custom_parameters.py @@ -37,11 +37,11 @@ def _bind_params(transaction, *args, **kwargs): attrs[attr.name] = attr.value for name, value in required_params: - assert name in attrs, "name=%r, params=%r" % (name, attrs) - assert attrs[name] == value, "name=%r, value=%r, params=%r" % (name, value, attrs) + assert name in attrs, f"name={name!r}, params={attrs!r}" + assert attrs[name] == value, f"name={name!r}, value={value!r}, params={attrs!r}" for name, value in forgone_params: - assert name not in attrs, "name=%r, params=%r" % (name, attrs) + assert name not in attrs, f"name={name!r}, params={attrs!r}" return wrapped(*args, **kwargs) diff --git a/tests/testing_support/validators/validate_datastore_trace_inputs.py b/tests/testing_support/validators/validate_datastore_trace_inputs.py index 365a14ebda..e938b0a57a 100644 --- a/tests/testing_support/validators/validate_datastore_trace_inputs.py +++ b/tests/testing_support/validators/validate_datastore_trace_inputs.py @@ -41,21 +41,15 @@ def _bind_params(product, target, operation, host=None, port_path_or_id=None, da ) = _bind_params(*args, **kwargs) if target is not None: - assert captured_target == target, "%s didn't match expected %s" % (captured_target, target) + assert captured_target == target, f"{captured_target} didn't match expected {target}" if operation is not None: - assert captured_operation == operation, "%s didn't match expected %s" % (captured_operation, operation) + assert captured_operation == operation, f"{captured_operation} didn't match expected {operation}" if host is not None: - assert captured_host == host, "%s didn't match expected %s" % (captured_host, host) + assert captured_host == host, f"{captured_host} didn't match expected {host}" if port_path_or_id is not None: - assert captured_port_path_or_id == port_path_or_id, "%s didn't match expected %s" % ( - captured_port_path_or_id, - port_path_or_id, - ) + assert captured_port_path_or_id == port_path_or_id, f"{captured_port_path_or_id} didn't match expected {port_path_or_id}" if database_name is not None: - assert captured_database_name == database_name, "%s didn't match expected %s" % ( - captured_database_name, - database_name, - ) + assert captured_database_name == database_name, f"{captured_database_name} didn't match expected {database_name}" return wrapped(*args, **kwargs) diff --git a/tests/testing_support/validators/validate_dimensional_metric_payload.py b/tests/testing_support/validators/validate_dimensional_metric_payload.py index 2f4f48c077..9b524e9505 100644 --- a/tests/testing_support/validators/validate_dimensional_metric_payload.py +++ b/tests/testing_support/validators/validate_dimensional_metric_payload.py @@ -42,7 +42,7 @@ def attribute_to_value(attribute): elif attribute_type == "string_value": return str(attribute_value) else: - raise TypeError("Invalid attribute type: %s" % attribute_type) + raise TypeError(f"Invalid attribute type: {attribute_type}") def payload_to_metrics(payload): @@ -77,7 +77,7 @@ def payload_to_metrics(payload): elif metric.get("summary"): sent_summary_metrics[metric_name] = metric else: - raise TypeError("Unknown metrics type for metric: %s" % metric) + raise TypeError(f"Unknown metrics type for metric: {metric}") return sent_summary_metrics, sent_count_metrics @@ -117,34 +117,22 @@ def _bind_params(method, payload=(), *args, **kwargs): if not count: if metric in sent_summary_metrics: data_points = data_points_to_dict(sent_summary_metrics[metric]["summary"]["data_points"]) - assert tags not in data_points, "(%s, %s) Unexpected but found." % (metric, tags and dict(tags)) + assert tags not in data_points, f"({metric}, {tags and dict(tags)}) Unexpected but found." else: - assert metric in sent_summary_metrics, "%s Not Found. Got: %s" % ( - metric, - list(sent_summary_metrics.keys()), - ) + assert metric in sent_summary_metrics, f"{metric} Not Found. Got: {list(sent_summary_metrics.keys())}" data_points = data_points_to_dict(sent_summary_metrics[metric]["summary"]["data_points"]) - assert tags in data_points, "(%s, %s) Not Found. Got: %s" % ( - metric, - tags and dict(tags), - list(data_points.keys()), - ) + assert tags in data_points, f"({metric}, {tags and dict(tags)}) Not Found. Got: {list(data_points.keys())}" # Validate metric format metric_container = data_points[tags] for key in ("start_time_unix_nano", "time_unix_nano", "count", "sum", "quantile_values"): - assert key in metric_container, "Invalid metric format. Missing key: %s" % key + assert key in metric_container, f"Invalid metric format. Missing key: {key}" quantile_values = metric_container["quantile_values"] assert len(quantile_values) == 2 # Min and Max # Validate metric count if count != "present": - assert int(metric_container["count"]) == count, "(%s, %s): Expected: %s Got: %s" % ( - metric, - tags and dict(tags), - count, - metric_container["count"], - ) + assert int(metric_container["count"]) == count, f"({metric}, {tags and dict(tags)}): Expected: {count} Got: {metric_container['count']}" for metric, tags, count in count_metrics: if isinstance(tags, dict): @@ -153,34 +141,22 @@ def _bind_params(method, payload=(), *args, **kwargs): if not count: if metric in sent_count_metrics: data_points = data_points_to_dict(sent_count_metrics[metric]["sum"]["data_points"]) - assert tags not in data_points, "(%s, %s) Unexpected but found." % (metric, tags and dict(tags)) + assert tags not in data_points, f"({metric}, {tags and dict(tags)}) Unexpected but found." else: - assert metric in sent_count_metrics, "%s Not Found. Got: %s" % ( - metric, - list(sent_count_metrics.keys()), - ) + assert metric in sent_count_metrics, f"{metric} Not Found. Got: {list(sent_count_metrics.keys())}" data_points = data_points_to_dict(sent_count_metrics[metric]["sum"]["data_points"]) - assert tags in data_points, "(%s, %s) Not Found. Got: %s" % ( - metric, - tags and dict(tags), - list(data_points.keys()), - ) + assert tags in data_points, f"({metric}, {tags and dict(tags)}) Not Found. Got: {list(data_points.keys())}" # Validate metric format assert sent_count_metrics[metric]["sum"].get("is_monotonic") assert sent_count_metrics[metric]["sum"].get("aggregation_temporality") == 1 metric_container = data_points[tags] for key in ("start_time_unix_nano", "time_unix_nano", "as_int"): - assert key in metric_container, "Invalid metric format. Missing key: %s" % key + assert key in metric_container, f"Invalid metric format. Missing key: {key}" # Validate metric count if count != "present": - assert int(metric_container["as_int"]) == count, "(%s, %s): Expected: %s Got: %s" % ( - metric, - tags and dict(tags), - count, - metric_container["count"], - ) + assert int(metric_container["as_int"]) == count, f"({metric}, {tags and dict(tags)}): Expected: {count} Got: {metric_container['count']}" return val diff --git a/tests/testing_support/validators/validate_dimensional_metrics_outside_transaction.py b/tests/testing_support/validators/validate_dimensional_metrics_outside_transaction.py index 2854a74782..98417ef5ba 100644 --- a/tests/testing_support/validators/validate_dimensional_metrics_outside_transaction.py +++ b/tests/testing_support/validators/validate_dimensional_metrics_outside_transaction.py @@ -54,17 +54,17 @@ def _validate(metrics, name, tags, count): def _metrics_table(): out = [""] - out.append("Expected: {0}: {1}".format(key, count)) + out.append(f"Expected: {key}: {count}") for metric_key, metric_container in metrics.items(): if isinstance(metric_container, dict): for metric_tags, metric_value in metric_container.items(): - out.append("{0}: {1}".format((metric_key, metric_tags), metric_value[0])) + out.append(f"{metric_key, metric_tags}: {metric_value[0]}") else: - out.append("{0}: {1}".format(metric_key, metric_container[0])) + out.append(f"{metric_key}: {metric_container[0]}") return "\n".join(out) def _metric_details(): - return "metric=%r, count=%r" % (key, metric.call_count) + return f"metric={key!r}, count={metric.call_count!r}" if count is not None: assert metric is not None, _metrics_table() diff --git a/tests/testing_support/validators/validate_error_event_attributes_outside_transaction.py b/tests/testing_support/validators/validate_error_event_attributes_outside_transaction.py index 74fbe9361a..5f2833df75 100644 --- a/tests/testing_support/validators/validate_error_event_attributes_outside_transaction.py +++ b/tests/testing_support/validators/validate_error_event_attributes_outside_transaction.py @@ -34,10 +34,7 @@ def _validate_error_event_attributes_outside_transaction(wrapped, instance, args event_data = list(instance.error_events) if num_errors is not None: - exc_message = "Expected: %d, Got: %d. Verify StatsEngine is being reset before using this validator." % ( - num_errors, - len(event_data), - ) + exc_message = f"Expected: {int(num_errors)}, Got: {len(event_data)}. Verify StatsEngine is being reset before using this validator." assert num_errors == len(event_data), exc_message for event in event_data: diff --git a/tests/testing_support/validators/validate_error_trace_attributes.py b/tests/testing_support/validators/validate_error_trace_attributes.py index 2b4ddf4ae1..eb171983b1 100644 --- a/tests/testing_support/validators/validate_error_trace_attributes.py +++ b/tests/testing_support/validators/validate_error_trace_attributes.py @@ -39,7 +39,7 @@ def _validate_error_trace_attributes(wrapped, instance, args, kwargs): def _validator_wrapper(wrapped, instance, args, kwargs): result = _validate_error_trace_attributes(wrapped)(*args, **kwargs) - assert target_error and target_error[0] is not None, "No error found with name %s" % err_name + assert target_error and target_error[0] is not None, f"No error found with name {err_name}" check_error_attributes(target_error[0].parameters, required_params, forgone_params, exact_attrs) return result diff --git a/tests/testing_support/validators/validate_internal_metrics.py b/tests/testing_support/validators/validate_internal_metrics.py index c685d9b5f5..92c4d1ffef 100644 --- a/tests/testing_support/validators/validate_internal_metrics.py +++ b/tests/testing_support/validators/validate_internal_metrics.py @@ -41,10 +41,10 @@ def _validate(name, count): metric = captured_metrics.get(name) def _metrics_table(): - return "metric=%r, metrics=%r" % (name, captured_metrics) + return f"metric={name!r}, metrics={captured_metrics!r}" def _metric_details(): - return "metric=%r, count=%r" % (name, metric.call_count) + return f"metric={name!r}, count={metric.call_count!r}" if count is not None and count > 0: assert metric is not None, _metrics_table() diff --git a/tests/testing_support/validators/validate_log_event_count.py b/tests/testing_support/validators/validate_log_event_count.py index 13f6c4e478..5eba9417d8 100644 --- a/tests/testing_support/validators/validate_log_event_count.py +++ b/tests/testing_support/validators/validate_log_event_count.py @@ -47,7 +47,7 @@ def _validate_log_event_count(wrapped, instance, args, kwargs): record_called[:] = [] recorded_logs[:] = [] - assert count == len(logs), "Expected %d, Got %d" % (count, len(logs)) + assert count == len(logs), f"Expected {count}, Got {len(logs)}" return val diff --git a/tests/testing_support/validators/validate_log_event_count_outside_transaction.py b/tests/testing_support/validators/validate_log_event_count_outside_transaction.py index e04c9df85a..484da0c084 100644 --- a/tests/testing_support/validators/validate_log_event_count_outside_transaction.py +++ b/tests/testing_support/validators/validate_log_event_count_outside_transaction.py @@ -48,7 +48,7 @@ def _validate_log_event_count_outside_transaction(wrapped, instance, args, kwarg record_called[:] = [] recorded_logs[:] = [] - assert count == len(logs), "Expected %d, Got %d" % (count, len(logs)) + assert count == len(logs), f"Expected {count}, Got {len(logs)}" return val diff --git a/tests/testing_support/validators/validate_log_events.py b/tests/testing_support/validators/validate_log_events.py index c62413f3da..5ecbcad25e 100644 --- a/tests/testing_support/validators/validate_log_events.py +++ b/tests/testing_support/validators/validate_log_events.py @@ -70,17 +70,17 @@ def _check_log_attributes(expected, required_attrs, forgone_attrs, captured, mis elif key in captured.attributes: captured_value = captured.attributes[key] else: - mismatches.append("key: %s, value:<%s><%s>" % (key, value, getattr(captured, key, None))) + mismatches.append(f"key: {key}, value:<{value}><{getattr(captured, key, None)}>") return False if value is not None: if value != captured_value: - mismatches.append("key: %s, value:<%s><%s>" % (key, value, captured_value)) + mismatches.append(f"key: {key}, value:<{value}><{captured_value}>") return False for key in required_attrs: if not hasattr(captured, key) and key not in captured.attributes: - mismatches.append("required_key: %s" % key) + mismatches.append(f"required_key: {key}") return False for key in forgone_attrs: @@ -90,16 +90,16 @@ def _check_log_attributes(expected, required_attrs, forgone_attrs, captured, mis elif key in captured.attributes: captured_value = captured.attributes[key] - mismatches.append("forgone_key: %s, value:<%s>" % (key, captured_value)) + mismatches.append(f"forgone_key: {key}, value:<{captured_value}>") return False return True def _log_details(matching_log_events, captured, mismatches): details = [ - "matching_log_events=%d" % matching_log_events, - "mismatches=%s" % mismatches, - "captured_events=%s" % captured, + f"matching_log_events={matching_log_events}", + f"mismatches={mismatches}", + f"captured_events={captured}", ] return "\n".join(details) diff --git a/tests/testing_support/validators/validate_log_events_outside_transaction.py b/tests/testing_support/validators/validate_log_events_outside_transaction.py index 053c137186..e2576c3fbe 100644 --- a/tests/testing_support/validators/validate_log_events_outside_transaction.py +++ b/tests/testing_support/validators/validate_log_events_outside_transaction.py @@ -67,17 +67,17 @@ def _check_log_attributes(expected, required_attrs, forgone_attrs, captured, mis elif key in captured.attributes: captured_value = captured.attributes[key] else: - mismatches.append("key: %s, value:<%s><%s>" % (key, value, getattr(captured, key, None))) + mismatches.append(f"key: {key}, value:<{value}><{getattr(captured, key, None)}>") return False if value is not None: if value != captured_value: - mismatches.append("key: %s, value:<%s><%s>" % (key, value, captured_value)) + mismatches.append(f"key: {key}, value:<{value}><{captured_value}>") return False for key in required_attrs: if not hasattr(captured, key) and key not in captured.attributes: - mismatches.append("required_key: %s" % key) + mismatches.append(f"required_key: {key}") return False for key in forgone_attrs: @@ -87,16 +87,16 @@ def _check_log_attributes(expected, required_attrs, forgone_attrs, captured, mis elif key in captured.attributes: captured_value = captured.attributes[key] - mismatches.append("forgone_key: %s, value:<%s>" % (key, captured_value)) + mismatches.append(f"forgone_key: {key}, value:<{captured_value}>") return False return True def _log_details(matching_log_events, captured, mismatches): details = [ - "matching_log_events=%d" % matching_log_events, - "mismatches=%s" % mismatches, - "captured_events=%s" % captured, + f"matching_log_events={matching_log_events}", + f"mismatches={mismatches}", + f"captured_events={captured}", ] return "\n".join(details) diff --git a/tests/testing_support/validators/validate_metric_payload.py b/tests/testing_support/validators/validate_metric_payload.py index 00e655191e..b758289961 100644 --- a/tests/testing_support/validators/validate_metric_payload.py +++ b/tests/testing_support/validators/validate_metric_payload.py @@ -56,7 +56,7 @@ def _bind_params(method, payload=(), *args, **kwargs): else: assert unscoped_metric in sent_metrics, unscoped_metric metric_values = sent_metrics[unscoped_metric] - assert metric_values[0] == count, "%s: Expected: %d Got: %d" % (metric, count, metric_values[0]) + assert metric_values[0] == count, f"{metric}: Expected: {count} Got: {metric_values[0]}" return val diff --git a/tests/testing_support/validators/validate_ml_event_payload.py b/tests/testing_support/validators/validate_ml_event_payload.py index 9933b85f6d..281c650a0e 100644 --- a/tests/testing_support/validators/validate_ml_event_payload.py +++ b/tests/testing_support/validators/validate_ml_event_payload.py @@ -32,7 +32,7 @@ def attribute_to_value(attribute): elif attribute_type == "string_value": return str(attribute_value) else: - raise TypeError("Invalid attribute type: %s" % attribute_type) + raise TypeError(f"Invalid attribute type: {attribute_type}") def payload_to_ml_events(payload): @@ -105,10 +105,10 @@ def _bind_params(method, payload=(), *args, **kwargs): all_inference_logs = normalize_logs(decoded_inference_payloads) for expected_event in ml_events.get("inference", []): - assert expected_event in all_inference_logs, "%s Not Found. Got: %s" % (expected_event, all_inference_logs) + assert expected_event in all_inference_logs, f"{expected_event} Not Found. Got: {all_inference_logs}" for expected_event in ml_events.get("apm", []): - assert expected_event in all_apm_logs, "%s Not Found. Got: %s" % (expected_event, all_apm_logs) + assert expected_event in all_apm_logs, f"{expected_event} Not Found. Got: {all_apm_logs}" return val return _validate_wrapper @@ -119,7 +119,7 @@ def normalize_logs(decoded_payloads): for sent_logs in decoded_payloads: for data_point in sent_logs: for key in ("time_unix_nano",): - assert key in data_point, "Invalid log format. Missing key: %s" % key + assert key in data_point, f"Invalid log format. Missing key: {key}" all_logs.append( {attr["key"]: attribute_to_value(attr["value"]) for attr in (data_point.get("attributes") or [])} ) diff --git a/tests/testing_support/validators/validate_ml_events.py b/tests/testing_support/validators/validate_ml_events.py index 37830a0851..e0e9f5a7fb 100644 --- a/tests/testing_support/validators/validate_ml_events.py +++ b/tests/testing_support/validators/validate_ml_events.py @@ -66,13 +66,13 @@ def _check_event_attributes(expected, captured, mismatches): intrinsics = captured[0] if intrinsics["type"] != expected[0]["type"]: - mismatches.append("key: type, value:<%s><%s>" % (expected[0]["type"], captured[0].get("type", None))) + mismatches.append(f"key: type, value:<{expected[0]['type']}><{captured[0].get('type', None)}>") return False now = time.time() if not (isinstance(intrinsics["timestamp"], int) and intrinsics["timestamp"] <= 1000.0 * now): - mismatches.append("key: timestamp, value:<%s>" % intrinsics["timestamp"]) + mismatches.append(f"key: timestamp, value:<{intrinsics['timestamp']}>") return False captured_keys = set(captured[1].keys()) @@ -80,19 +80,19 @@ def _check_event_attributes(expected, captured, mismatches): extra_keys = captured_keys - expected_keys if extra_keys: - mismatches.append("extra_keys: %s" % str(tuple(extra_keys))) + mismatches.append(f"extra_keys: {str(tuple(extra_keys))}") return False for key, value in expected[1].items(): if key in captured[1]: captured_value = captured[1].get(key, None) else: - mismatches.append("key: %s, value:<%s><%s>" % (key, value, captured[1].get(key, None))) + mismatches.append(f"key: {key}, value:<{value}><{captured[1].get(key, None)}>") return False if value is not None: if value != captured_value: - mismatches.append("key: %s, value:<%s><%s>" % (key, value, captured_value)) + mismatches.append(f"key: {key}, value:<{value}><{captured_value}>") return False return True @@ -100,9 +100,9 @@ def _check_event_attributes(expected, captured, mismatches): def _event_details(matching_ml_events, captured, mismatches): details = [ - "matching_ml_events=%d" % matching_ml_events, - "mismatches=%s" % mismatches, - "captured_events=%s" % captured, + f"matching_ml_events={matching_ml_events}", + f"mismatches={mismatches}", + f"captured_events={captured}", ] return "\n".join(details) diff --git a/tests/testing_support/validators/validate_non_transaction_error_event.py b/tests/testing_support/validators/validate_non_transaction_error_event.py index 97048d1038..713d9f235d 100644 --- a/tests/testing_support/validators/validate_non_transaction_error_event.py +++ b/tests/testing_support/validators/validate_non_transaction_error_event.py @@ -60,8 +60,8 @@ def _validate_non_transaction_error_event(wrapped, instace, args, kwargs): user_params = event[1] for name, value in required_user.items(): - assert name in user_params, "name=%r, params=%r" % (name, user_params) - assert user_params[name] == value, "name=%r, value=%r, params=%r" % (name, value, user_params) + assert name in user_params, f"name={name!r}, params={user_params!r}" + assert user_params[name] == value, f"name={name!r}, value={value!r}, params={user_params!r}" for param in forgone_user: assert param not in user_params diff --git a/tests/testing_support/validators/validate_span_events.py b/tests/testing_support/validators/validate_span_events.py index 6ffdbc42c3..f33a91e4ba 100644 --- a/tests/testing_support/validators/validate_span_events.py +++ b/tests/testing_support/validators/validate_span_events.py @@ -102,10 +102,10 @@ def stream_capture(wrapped, instance, args, kwargs): def _span_details(): details = [ - "matching_span_events=%d" % matching_span_events, - "count=%d" % count, - "mismatches=%s" % mismatches, - "captured_events=%s" % captured_events, + f"matching_span_events={matching_span_events}", + f"count={count}", + f"mismatches={mismatches}", + f"captured_events={captured_events}", ] return "\n".join(details) @@ -153,7 +153,7 @@ def _check_span_attributes(attrs, exact, expected, unexpected, mismatches): else: for key, value in exact.items(): if not check_value_equals(attrs, key, value): - mismatches.append("key: %s, value:<%s><%s>" % (key, value, attrs.get(key))) + mismatches.append(f"key: {key}, value:<{value}><{attrs.get(key)}>") break else: return True diff --git a/tests/testing_support/validators/validate_synthetics_event.py b/tests/testing_support/validators/validate_synthetics_event.py index 478582f9b4..58143f1333 100644 --- a/tests/testing_support/validators/validate_synthetics_event.py +++ b/tests/testing_support/validators/validate_synthetics_event.py @@ -46,14 +46,14 @@ def _flatten(event): flat_event = _flatten(event) - assert "nr.guid" in flat_event, "name=%r, event=%r" % ("nr.guid", flat_event) + assert "nr.guid" in flat_event, f"name=nr.guid, event={flat_event!r}" for name, value in required_attrs: - assert name in flat_event, "name=%r, event=%r" % (name, flat_event) - assert flat_event[name] == value, "name=%r, value=%r, event=%r" % (name, value, flat_event) + assert name in flat_event, f"name={name!r}, event={flat_event!r}" + assert flat_event[name] == value, f"name={name!r}, value={value!r}, event={flat_event!r}" for name in forgone_attrs: - assert name not in flat_event, "name=%r, event=%r" % (name, flat_event) + assert name not in flat_event, f"name={name!r}, event={flat_event!r}" except Exception as e: failed.append(e) diff --git a/tests/testing_support/validators/validate_synthetics_transaction_trace.py b/tests/testing_support/validators/validate_synthetics_transaction_trace.py index 7227d03272..f3040cb4dd 100644 --- a/tests/testing_support/validators/validate_synthetics_transaction_trace.py +++ b/tests/testing_support/validators/validate_synthetics_transaction_trace.py @@ -42,7 +42,7 @@ def _validate_synthetics_transaction_trace(wrapped, instance, args, kwargs): if should_exist: assert header_key in required_params - assert header[9] == required_params[header_key], "name=%r, header=%r" % (header_key, header) + assert header[9] == required_params[header_key], f"name={header_key!r}, header={header!r}" else: assert header[9] is None @@ -52,15 +52,11 @@ def _validate_synthetics_transaction_trace(wrapped, instance, args, kwargs): tt_intrinsics = pack_data[0][4]["intrinsics"] for name in required_params: - assert name in tt_intrinsics, "name=%r, intrinsics=%r" % (name, tt_intrinsics) - assert tt_intrinsics[name] == required_params[name], "name=%r, value=%r, intrinsics=%r" % ( - name, - required_params[name], - tt_intrinsics, - ) + assert name in tt_intrinsics, f"name={name!r}, intrinsics={tt_intrinsics!r}" + assert tt_intrinsics[name] == required_params[name], f"name={name!r}, value={required_params[name]!r}, intrinsics={tt_intrinsics!r}" for name in forgone_params: - assert name not in tt_intrinsics, "name=%r, intrinsics=%r" % (name, tt_intrinsics) + assert name not in tt_intrinsics, f"name={name!r}, intrinsics={tt_intrinsics!r}" return result diff --git a/tests/testing_support/validators/validate_time_metrics_outside_transaction.py b/tests/testing_support/validators/validate_time_metrics_outside_transaction.py index 1f6173a67d..91c13ace6b 100644 --- a/tests/testing_support/validators/validate_time_metrics_outside_transaction.py +++ b/tests/testing_support/validators/validate_time_metrics_outside_transaction.py @@ -54,13 +54,13 @@ def _validate(metrics, name, count): def _metrics_table(): out = [""] - out.append("Expected: {0}: {1}".format(key, count)) + out.append(f"Expected: {key}: {count}") for metric_key, metric_value in metrics.items(): - out.append("{0}: {1}".format(metric_key, metric_value[0])) + out.append(f"{metric_key}: {metric_value[0]}") return "\n".join(out) def _metric_details(): - return "metric=%r, count=%r" % (key, metric.call_count) + return f"metric={key!r}, count={metric.call_count!r}" if count is not None: assert metric is not None, _metrics_table() diff --git a/tests/testing_support/validators/validate_transaction_errors.py b/tests/testing_support/validators/validate_transaction_errors.py index 04a34d262d..7de5be503b 100644 --- a/tests/testing_support/validators/validate_transaction_errors.py +++ b/tests/testing_support/validators/validate_transaction_errors.py @@ -58,20 +58,16 @@ def _validate_transaction_errors(wrapped, instance, args, kwargs): else: compare_to = sorted([e.type for e in captured]) - assert expected == compare_to, "expected=%r, captured=%r, errors=%r" % (expected, compare_to, captured) + assert expected == compare_to, f"expected={expected!r}, captured={compare_to!r}, errors={captured!r}" for e in captured: assert e.span_id for name, value in required_params: - assert name in e.custom_params, "name=%r, params=%r" % (name, e.custom_params) - assert e.custom_params[name] == value, "name=%r, value=%r, params=%r" % ( - name, - value, - e.custom_params, - ) + assert name in e.custom_params, f"name={name!r}, params={e.custom_params!r}" + assert e.custom_params[name] == value, f"name={name!r}, value={value!r}, params={e.custom_params!r}" for name, value in forgone_params: - assert name not in e.custom_params, "name=%r, params=%r" % (name, e.custom_params) + assert name not in e.custom_params, f"name={name!r}, params={e.custom_params!r}" if e.type in expected_errors: assert e.expected is True diff --git a/tests/testing_support/validators/validate_transaction_metrics.py b/tests/testing_support/validators/validate_transaction_metrics.py index 0cb569d296..e1cbda85ed 100644 --- a/tests/testing_support/validators/validate_transaction_metrics.py +++ b/tests/testing_support/validators/validate_transaction_metrics.py @@ -38,20 +38,20 @@ def validate_transaction_metrics( if background_task: unscoped_metrics = [ "OtherTransaction/all", - "OtherTransaction/%s/%s" % (group, name), + f"OtherTransaction/{group}/{name}", "OtherTransactionTotalTime", - "OtherTransactionTotalTime/%s/%s" % (group, name), + f"OtherTransactionTotalTime/{group}/{name}", ] - transaction_scope_name = "OtherTransaction/%s/%s" % (group, name) + transaction_scope_name = f"OtherTransaction/{group}/{name}" else: unscoped_metrics = [ "WebTransaction", - "WebTransaction/%s/%s" % (group, name), + f"WebTransaction/{group}/{name}", "WebTransactionTotalTime", - "WebTransactionTotalTime/%s/%s" % (group, name), + f"WebTransactionTotalTime/{group}/{name}", "HttpDispatcher", ] - transaction_scope_name = "WebTransaction/%s/%s" % (group, name) + transaction_scope_name = f"WebTransaction/{group}/{name}" @function_wrapper def _validate_wrapper(wrapped, instance, args, kwargs): @@ -100,17 +100,17 @@ def _validate(metrics, name, scope, count): def _metrics_table(): out = [""] - out.append("Expected: {0}: {1}".format(key, count)) + out.append(f"Expected: {key}: {count}") for metric_key, metric_container in metrics.items(): if isinstance(metric_container, dict): for metric_tags, metric_value in metric_container.items(): - out.append("{0}: {1}".format((metric_key, metric_tags), metric_value[0])) + out.append(f"{metric_key, metric_tags}: {metric_value[0]}") else: - out.append("{0}: {1}".format(metric_key, metric_container[0])) + out.append(f"{metric_key}: {metric_container[0]}") return "\n".join(out) def _metric_details(): - return "metric=%r, count=%r" % (key, metric.call_count) + return f"metric={key!r}, count={metric.call_count!r}" if count is not None: assert metric is not None, _metrics_table() diff --git a/tests/testing_support/validators/validate_transaction_slow_sql_count.py b/tests/testing_support/validators/validate_transaction_slow_sql_count.py index 3e68a1ad05..250265dc73 100644 --- a/tests/testing_support/validators/validate_transaction_slow_sql_count.py +++ b/tests/testing_support/validators/validate_transaction_slow_sql_count.py @@ -24,7 +24,7 @@ def _validate_transaction_slow_sql_count(wrapped, instance, args, kwargs): with connections: slow_sql_traces = instance.slow_sql_data(connections) - assert len(slow_sql_traces) == num_slow_sql, "Expected: %s. Got: %d." % (num_slow_sql, len(slow_sql_traces)) + assert len(slow_sql_traces) == num_slow_sql, f"Expected: {num_slow_sql}. Got: {len(slow_sql_traces)}." return result diff --git a/tests/testing_support/validators/validate_tt_collector_json.py b/tests/testing_support/validators/validate_tt_collector_json.py index e411dd928f..331268535e 100644 --- a/tests/testing_support/validators/validate_tt_collector_json.py +++ b/tests/testing_support/validators/validate_tt_collector_json.py @@ -134,7 +134,7 @@ def _check_params_and_start_time(node): if segment_name.startswith("Datastore"): for key in datastore_params: assert key in params, key - assert params[key] == datastore_params[key], "Expected %s. Got %s." % (datastore_params[key], params[key]) + assert params[key] == datastore_params[key], f"Expected {datastore_params[key]}. Got {params[key]}." for key in datastore_forgone_params: assert key not in params, key diff --git a/tests/testing_support/validators/validate_tt_parameters.py b/tests/testing_support/validators/validate_tt_parameters.py index 7e8d8bd89c..33782e9d13 100644 --- a/tests/testing_support/validators/validate_tt_parameters.py +++ b/tests/testing_support/validators/validate_tt_parameters.py @@ -37,15 +37,11 @@ def _validate_tt_parameters(wrapped, instance, args, kwargs): tt_intrinsics = pack_data[0][4]["intrinsics"] for name in required_params: - assert name in tt_intrinsics, "name=%r, intrinsics=%r" % (name, tt_intrinsics) - assert tt_intrinsics[name] == required_params[name], "name=%r, value=%r, intrinsics=%r" % ( - name, - required_params[name], - tt_intrinsics, - ) + assert name in tt_intrinsics, f"name={name!r}, intrinsics={tt_intrinsics!r}" + assert tt_intrinsics[name] == required_params[name], f"name={name!r}, value={required_params[name]!r}, intrinsics={tt_intrinsics!r}" for name in forgone_params: - assert name not in tt_intrinsics, "name=%r, intrinsics=%r" % (name, tt_intrinsics) + assert name not in tt_intrinsics, f"name={name!r}, intrinsics={tt_intrinsics!r}" return result From 8c1a4b9c06910bd8ccf7b8c008ea8c686003a51f Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei <84813886+lrafeei@users.noreply.github.com> Date: Mon, 9 Sep 2024 14:25:28 -0700 Subject: [PATCH 5/7] Remove frameworks that only work in Python 2.7 (#1205) * Remove umemcache * Remove oursql * Remove pywapi * Remove pywapi * Remove pylons * Remove web2py * Remove references to webpy * Remove weberror --- codecov.yml | 7 - newrelic/config.py | 39 ---- newrelic/hooks/database_oursql.py | 43 ----- newrelic/hooks/datastore_umemcache.py | 80 -------- newrelic/hooks/external_pywapi.py | 37 ---- newrelic/hooks/framework_pylons.py | 85 --------- newrelic/hooks/framework_web2py.py | 253 -------------------------- newrelic/hooks/middleware_weberror.py | 31 ---- 8 files changed, 575 deletions(-) delete mode 100644 newrelic/hooks/database_oursql.py delete mode 100644 newrelic/hooks/datastore_umemcache.py delete mode 100644 newrelic/hooks/external_pywapi.py delete mode 100644 newrelic/hooks/framework_pylons.py delete mode 100644 newrelic/hooks/framework_web2py.py delete mode 100644 newrelic/hooks/middleware_weberror.py diff --git a/codecov.yml b/codecov.yml index 6ca30f6407..0f5d4b4bce 100644 --- a/codecov.yml +++ b/codecov.yml @@ -6,20 +6,13 @@ ignore: - "newrelic/hooks/adapter_meinheld.py" - "newrelic/hooks/adapter_paste.py" - "newrelic/hooks/component_piston.py" - - "newrelic/hooks/database_oursql.py" - "newrelic/hooks/database_psycopg2ct.py" - "newrelic/hooks/datastore_aioredis.py" - "newrelic/hooks/datastore_aredis.py" - "newrelic/hooks/datastore_motor.py" - "newrelic/hooks/datastore_pyelasticsearch.py" - - "newrelic/hooks/datastore_umemcache.py" - "newrelic/hooks/external_dropbox.py" - "newrelic/hooks/external_facepy.py" - - "newrelic/hooks/external_pywapi.py" - "newrelic/hooks/external_xmlrpclib.py" - - "newrelic/hooks/framework_pylons.py" - - "newrelic/hooks/framework_web2py.py" - - "newrelic/hooks/framework_webpy.py" - - "newrelic/hooks/middleware_weberror.py" - "newrelic/packages/*" - "newrelic/packages/**/*" diff --git a/newrelic/config.py b/newrelic/config.py index 5b657bbb9a..e3d52978f9 100644 --- a/newrelic/config.py +++ b/newrelic/config.py @@ -2795,26 +2795,6 @@ def _process_module_builtin_defaults(): "sentry_sdk.integrations.asgi", "newrelic.hooks.component_sentry", "instrument_sentry_sdk_integrations_asgi" ) - # _process_module_definition('web.application', - # 'newrelic.hooks.framework_webpy') - # _process_module_definition('web.template', - # 'newrelic.hooks.framework_webpy') - - _process_module_definition( - "gluon.compileapp", - "newrelic.hooks.framework_web2py", - "instrument_gluon_compileapp", - ) - _process_module_definition( - "gluon.restricted", - "newrelic.hooks.framework_web2py", - "instrument_gluon_restricted", - ) - _process_module_definition("gluon.main", "newrelic.hooks.framework_web2py", "instrument_gluon_main") - _process_module_definition("gluon.template", "newrelic.hooks.framework_web2py", "instrument_gluon_template") - _process_module_definition("gluon.tools", "newrelic.hooks.framework_web2py", "instrument_gluon_tools") - _process_module_definition("gluon.http", "newrelic.hooks.framework_web2py", "instrument_gluon_http") - _process_module_definition("httpx._client", "newrelic.hooks.external_httpx", "instrument_httpx_client") _process_module_definition("gluon.contrib.feedparser", "newrelic.hooks.external_feedparser") @@ -2978,10 +2958,6 @@ def _process_module_builtin_defaults(): _process_module_definition("grpc._channel", "newrelic.hooks.framework_grpc", "instrument_grpc__channel") _process_module_definition("grpc._server", "newrelic.hooks.framework_grpc", "instrument_grpc_server") - _process_module_definition("pylons.wsgiapp", "newrelic.hooks.framework_pylons") - _process_module_definition("pylons.controllers.core", "newrelic.hooks.framework_pylons") - _process_module_definition("pylons.templating", "newrelic.hooks.framework_pylons") - _process_module_definition("bottle", "newrelic.hooks.framework_bottle", "instrument_bottle") _process_module_definition( @@ -3086,7 +3062,6 @@ def _process_module_builtin_defaults(): _process_module_definition("mysql.connector", "newrelic.hooks.database_mysql", "instrument_mysql_connector") _process_module_definition("MySQLdb", "newrelic.hooks.database_mysqldb", "instrument_mysqldb") - _process_module_definition("oursql", "newrelic.hooks.database_oursql", "instrument_oursql") _process_module_definition("pymysql", "newrelic.hooks.database_pymysql", "instrument_pymysql") _process_module_definition("pyodbc", "newrelic.hooks.database_pyodbc", "instrument_pyodbc") @@ -3171,7 +3146,6 @@ def _process_module_builtin_defaults(): ) _process_module_definition("memcache", "newrelic.hooks.datastore_memcache", "instrument_memcache") - _process_module_definition("umemcache", "newrelic.hooks.datastore_umemcache", "instrument_umemcache") _process_module_definition( "pylibmc.client", "newrelic.hooks.datastore_pylibmc", @@ -4407,8 +4381,6 @@ def _process_module_builtin_defaults(): "instrument_flup_server_scgi_base", ) - _process_module_definition("pywapi", "newrelic.hooks.external_pywapi", "instrument_pywapi") - _process_module_definition( "meinheld.server", "newrelic.hooks.adapter_meinheld", @@ -4471,17 +4443,6 @@ def _process_module_builtin_defaults(): _process_module_definition("gevent.monkey", "newrelic.hooks.coroutines_gevent", "instrument_gevent_monkey") - _process_module_definition( - "weberror.errormiddleware", - "newrelic.hooks.middleware_weberror", - "instrument_weberror_errormiddleware", - ) - _process_module_definition( - "weberror.reporter", - "newrelic.hooks.middleware_weberror", - "instrument_weberror_reporter", - ) - _process_module_definition("thrift.transport.TSocket", "newrelic.hooks.external_thrift") _process_module_definition( diff --git a/newrelic/hooks/database_oursql.py b/newrelic/hooks/database_oursql.py deleted file mode 100644 index e8c5811538..0000000000 --- a/newrelic/hooks/database_oursql.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from newrelic.common.object_wrapper import wrap_object -from newrelic.api.database_trace import register_database_client - -from newrelic.hooks.database_mysqldb import ConnectionFactory - -def instance_info(args, kwargs): - def _bind_params(host=None, user=None, passwd=None, db=None, - port=None, *args, **kwargs): - return host, port, db - - host, port, db = _bind_params(*args, **kwargs) - - return (host, port, db) - -def instrument_oursql(module): - register_database_client(module, database_product='MySQL', - quoting_style='single+double', explain_query='explain', - explain_stmts=('select',), instance_info=instance_info) - - wrap_object(module, 'connect', ConnectionFactory, (module,)) - - # The connect() function is actually aliased with Connect() and - # Connection, the later actually being the Connection type object. - # Instrument Connect(), but don't instrument Connection in case that - # interferes with direct type usage. If people are using the - # Connection object directly, they should really be using connect(). - - if hasattr(module, 'Connect'): - wrap_object(module, 'Connect', ConnectionFactory, (module,)) diff --git a/newrelic/hooks/datastore_umemcache.py b/newrelic/hooks/datastore_umemcache.py deleted file mode 100644 index 02ff073d2f..0000000000 --- a/newrelic/hooks/datastore_umemcache.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from newrelic.api.datastore_trace import datastore_trace -from newrelic.common.object_wrapper import wrap_function_wrapper, ObjectProxy - -class _nr_umemcache_Client_proxy_(ObjectProxy): - - @datastore_trace('Memcached', None, 'set') - def set(self, *args, **kwargs): - return self.__wrapped__.set(*args, **kwargs) - - @datastore_trace('Memcached', None, 'get') - def get(self, *args, **kwargs): - return self.__wrapped__.get(*args, **kwargs) - - @datastore_trace('Memcached', None, 'gets') - def gets(self, *args, **kwargs): - return self.__wrapped__.gets(*args, **kwargs) - - @datastore_trace('Memcached', None, 'get_multi') - def get_multi(self, *args, **kwargs): - return self.__wrapped__.get_multi(*args, **kwargs) - - @datastore_trace('Memcached', None, 'gets_multi') - def gets_multi(self, *args, **kwargs): - return self.__wrapped__.gets_multi(*args, **kwargs) - - @datastore_trace('Memcached', None, 'add') - def add(self, *args, **kwargs): - return self.__wrapped__.add(*args, **kwargs) - - @datastore_trace('Memcached', None, 'replace') - def replace(self, *args, **kwargs): - return self.__wrapped__.replace(*args, **kwargs) - - @datastore_trace('Memcached', None, 'append') - def append(self, *args, **kwargs): - return self.__wrapped__.append(*args, **kwargs) - - @datastore_trace('Memcached', None, 'prepend') - def prepend(self, *args, **kwargs): - return self.__wrapped__.prepend(*args, **kwargs) - - @datastore_trace('Memcached', None, 'delete') - def delete(self, *args, **kwargs): - return self.__wrapped__.delete(*args, **kwargs) - - @datastore_trace('Memcached', None, 'cas') - def cas(self, *args, **kwargs): - return self.__wrapped__.cas(*args, **kwargs) - - @datastore_trace('Memcached', None, 'incr') - def incr(self, *args, **kwargs): - return self.__wrapped__.incr(*args, **kwargs) - - @datastore_trace('Memcached', None, 'decr') - def decr(self, *args, **kwargs): - return self.__wrapped__.decr(*args, **kwargs) - - @datastore_trace('Memcached', None, 'stats') - def stats(self, *args, **kwargs): - return self.__wrapped__.stats(*args, **kwargs) - -def _nr_umemcache_Client_wrapper_(wrapped, instance, args, kwargs): - return _nr_umemcache_Client_proxy_(wrapped(*args, **kwargs)) - -def instrument_umemcache(module): - wrap_function_wrapper(module, 'Client', _nr_umemcache_Client_wrapper_) diff --git a/newrelic/hooks/external_pywapi.py b/newrelic/hooks/external_pywapi.py deleted file mode 100644 index 1820492c70..0000000000 --- a/newrelic/hooks/external_pywapi.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from newrelic.agent import wrap_external_trace - -def instrument_pywapi(module): - - if hasattr(module, 'get_weather_from_weather_com'): - wrap_external_trace(module, 'get_weather_from_weather_com', 'pywapi', - module.WEATHER_COM_URL) - - if hasattr(module, 'get_countries_from_google'): - wrap_external_trace(module, 'get_countries_from_google', 'pywapi', - module.GOOGLE_COUNTRIES_URL) - - if hasattr(module, 'get_cities_from_google'): - wrap_external_trace(module, 'get_cities_from_google', 'pywapi', - module.GOOGLE_CITIES_URL) - - if hasattr(module, 'get_weather_from_yahoo'): - wrap_external_trace(module, 'get_weather_from_yahoo', 'pywapi', - module.YAHOO_WEATHER_URL) - - if hasattr(module, 'get_weather_from_noaa'): - wrap_external_trace(module, 'get_weather_from_noaa', 'pywapi', - module.NOAA_WEATHER_URL) diff --git a/newrelic/hooks/framework_pylons.py b/newrelic/hooks/framework_pylons.py deleted file mode 100644 index 2880eb10b0..0000000000 --- a/newrelic/hooks/framework_pylons.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import newrelic.api.transaction -import newrelic.api.transaction_name -import newrelic.api.function_trace -import newrelic.api.error_trace -import newrelic.common.object_wrapper -from newrelic.common.object_names import callable_name -import newrelic.api.import_hook - -from newrelic.api.time_trace import notice_error - -def name_controller(self, environ, start_response): - action = environ['pylons.routes_dict']['action'] - return f"{callable_name(self)}.{action}" - -class capture_error(): - def __init__(self, wrapped): - if isinstance(wrapped, tuple): - (instance, wrapped) = wrapped - else: - instance = None - self.__instance = instance - self.__wrapped = wrapped - - def __get__(self, instance, klass): - if instance is None: - return self - descriptor = self.__wrapped.__get__(instance, klass) - return self.__class__((instance, descriptor)) - - def __call__(self, *args, **kwargs): - current_transaction = newrelic.api.transaction.current_transaction() - if current_transaction: - webob_exc = newrelic.api.import_hook.import_module('webob.exc') - try: - return self.__wrapped(*args, **kwargs) - except webob_exc.HTTPException: - raise - except: # Catch all - notice_error() - raise - else: - return self.__wrapped(*args, **kwargs) - - def __getattr__(self, name): - return getattr(self.__wrapped, name) - -def instrument(module): - - if module.__name__ == 'pylons.wsgiapp': - newrelic.api.error_trace.wrap_error_trace(module, 'PylonsApp.__call__') - - elif module.__name__ == 'pylons.controllers.core': - newrelic.api.transaction_name.wrap_transaction_name( - module, 'WSGIController.__call__', name_controller) - newrelic.api.function_trace.wrap_function_trace( - module, 'WSGIController.__call__') - - def name_WSGIController_perform_call(self, func, args): - return callable_name(func) - - newrelic.api.function_trace.wrap_function_trace( - module, 'WSGIController._perform_call', - name_WSGIController_perform_call) - newrelic.common.object_wrapper.wrap_object( - module, 'WSGIController._perform_call', capture_error) - - elif module.__name__ == 'pylons.templating': - - newrelic.api.function_trace.wrap_function_trace(module, 'render_genshi') - newrelic.api.function_trace.wrap_function_trace(module, 'render_mako') - newrelic.api.function_trace.wrap_function_trace(module, 'render_jinja2') diff --git a/newrelic/hooks/framework_web2py.py b/newrelic/hooks/framework_web2py.py deleted file mode 100644 index 73141f287a..0000000000 --- a/newrelic/hooks/framework_web2py.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import os - -import newrelic.api.transaction -import newrelic.api.import_hook -import newrelic.api.wsgi_application -import newrelic.api.external_trace -import newrelic.api.function_trace -import newrelic.api.transaction_name -import newrelic.api.object_wrapper -import newrelic.common.object_wrapper -import newrelic.api.pre_function - -from newrelic.api.time_trace import notice_error - -def instrument_gluon_compileapp(module): - - # Wrap the run_models_in() function as first phase - # in executing a request after URL has been mapped - # to a specific view. The name given to the web - # transaction is combination of the application name - # and view path. - - def transaction_name_run_models_in(environment): - return f"{environment['request'].application}::{environment['response'].view}" - - newrelic.api.transaction_name.wrap_transaction_name(module, - 'run_models_in', name=transaction_name_run_models_in, - group='Web2Py') - - # Wrap functions which coordinate the execution of - # the separate models, controller and view phases of - # the request handling. This is done for timing how - # long taken within these phases of request - # handling. - - def name_function_run_models_in(environment): - return f"{environment['request'].controller}/{environment['request'].function}" - - newrelic.api.function_trace.wrap_function_trace(module, - 'run_models_in', name=name_function_run_models_in, - group='Python/Web2Py/Models') - - def name_function_run_controller_in(controller, function, environment): - return f'{controller}/{function}' - - newrelic.api.function_trace.wrap_function_trace(module, - 'run_controller_in', name=name_function_run_controller_in, - group='Python/Web2Py/Controller') - - def name_function_run_view_in(environment): - return f"{environment['request'].controller}/{environment['request'].function}" - - newrelic.api.function_trace.wrap_function_trace(module, - 'run_view_in', name=name_function_run_view_in, - group='Python/Web2Py/View') - -def instrument_gluon_restricted(module): - - # Wrap function which executes all the compiled - # Python code files. The name used corresponds to - # path of the resource within the context of the - # application directory. The group used is either - # 'Script/Execute' or 'Template/Render' based on - # whether we can work out whether code object - # corresponded to compiled template file or not. - - def name_function_restricted(code, environment={}, layer='Unknown'): - if 'request' in environment: - folder = environment['request'].folder - if layer.startswith(folder): - return layer[len(folder):] - return layer - - def group_function_restricted(code, environment={}, layer='Unknown'): - parts = layer.split('.') - if parts[-1] in ['html'] or parts[-2:] in [['html','pyc']] : - return 'Template/Render' - return 'Script/Execute' - - newrelic.api.function_trace.wrap_function_trace(module, 'restricted', - name=name_function_restricted, group=group_function_restricted) - -def instrument_gluon_main(module): - - newrelic.api.wsgi_application.wrap_wsgi_application(module, 'wsgibase') - - # Wrap main function which dispatches the various - # phases of a request in order to capture any - # errors. Need to use a custom object wrapper as we - # need to ignore exceptions of type HTTP as that - # type of exception is used to programmatically - # return a valid response. For the case of a 404, - # where we want to name the web transactions as - # such, we pick that up later. - - class error_serve_controller(): - def __init__(self, wrapped): - newrelic.api.object_wrapper.update_wrapper(self, wrapped) - self._nr_next_object = wrapped - if not hasattr(self, '_nr_last_object'): - self._nr_last_object = wrapped - def __call__(self, request, response, session): - txn = newrelic.api.transaction.current_transaction() - if txn: - HTTP = newrelic.api.import_hook.import_module('gluon.http').HTTP - try: - return self._nr_next_object(request, response, session) - except HTTP: - raise - except: # Catch all - notice_error() - raise - else: - return self._nr_next_object(request, response, session) - def __getattr__(self, name): - return getattr(self._nr_next_object, name) - - newrelic.common.object_wrapper.wrap_object( - module, 'serve_controller', error_serve_controller) - -def instrument_gluon_template(module): - - # Wrap parsing/compilation of template files, using - # the name of the template relative to the context - # of the application it is contained in. Use a group - # of 'Template/Compile'. Rendering of template is - # picked up when executing the code object created - # from this compilation step. - - def name_function_parse_template(filename, path='views/', - context=dict(), *args, **kwargs): - if 'request' in context: - folder = context['request'].folder - if path.startswith(folder): - return f'{path[len(folder):]}/{filename}' - else: - return f'{path}/{filename}' - - newrelic.api.function_trace.wrap_function_trace(module, 'parse_template', - name=name_function_parse_template, group='Template/Compile') - -def instrument_gluon_tools(module): - - # Wrap utility function for fetching an external URL. - - def url_external_fetch(url, *args, **kwargs): - return url - - newrelic.api.external_trace.wrap_external_trace( - module, 'fetch', library='gluon.tools.fetch', - url=url_external_fetch) - - # Wrap utility function for fetching GEOCODE data. - # The URL in this case is hardwired in code to point - # at Google service and not part of arguments to we - # need to hard code it here as well. - - newrelic.api.external_trace.wrap_external_trace( - module, 'geocode', library='gluon.tools.geocode', - url='http://maps.google.com/maps/geo') - -def instrument_gluon_http(module): - - # This one is tricky. The only way to pick up that a - # static file is being served up is to wrap the to() - # method of a HTTP response object when actual - # response is being generated. We need to qualify - # this so only actually do anything when called from - # the wsgibase() function within 'gluon.main'. To do - # this need to go stack diving and look back at the - # parent stack frame. Doing that we can look at - # details of where calling code is located as well - # as sneak a peak at local variables in the calling - # stack to determine if we were handling a static - # file and what type of file was being served. - # Normally static file URLs would be left alone but - # don't want to risk black hole rule and instead - # generate custom wildcard URLs with precedence to - # extension. When can work out how to reliably get - # the application name then can incorporate that - # into the pattern as well in style used for web - # transaction names for views. The application name - # should normally be the first path segment, but the - # fact that arbitrary rewrite rules can be used may - # mean that isn't always the case. - - def transaction_name_name_not_found(response, *args, **kwargs): - txn = newrelic.api.transaction.current_transaction() - if not txn: - return - - frame = sys._getframe(1) - - if os.path.split(frame.f_code.co_filename)[-1] == 'pre_function.py': - frame = frame.f_back - - if os.path.split(frame.f_code.co_filename)[-1] != 'main.py': - return - - if frame.f_code.co_name != 'wsgibase': - return - - if response.status == 400: - txn.set_transaction_name('400', 'Uri') - return - - if response.status == 404: - txn.set_transaction_name('404', 'Uri') - return - - if 'static_file' not in frame.f_locals: - return - - if frame.f_locals['static_file']: - if 'environ' in frame.f_locals: - environ = frame.f_locals['environ'] - path_info = environ.get('PATH_INFO', '') - - if path_info: - parts = os.path.split(path_info) - if parts[1] == '': - if parts[0] == '/': - txn.set_transaction_name('*', 'Web2Py') - else: - name = f"{parts[0].lstrip('/')}/*" - txn.set_transaction_name(name, 'Web2Py') - else: - extension = os.path.splitext(parts[1])[-1] - name = f"{parts[0].lstrip('/')}/*{extension}" - txn.set_transaction_name(name, 'Web2Py') - else: - txn.set_transaction_name('*', 'Web2Py') - - else: - txn.set_transaction_name('*', 'Web2Py') - - newrelic.api.pre_function.wrap_pre_function( - module, 'HTTP.to', transaction_name_name_not_found) diff --git a/newrelic/hooks/middleware_weberror.py b/newrelic/hooks/middleware_weberror.py deleted file mode 100644 index 1b575805f9..0000000000 --- a/newrelic/hooks/middleware_weberror.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from newrelic.api.external_trace import wrap_external_trace -from newrelic.api.function_trace import wrap_function_trace - -def instrument_weberror_errormiddleware(module): - - wrap_function_trace(module, 'handle_exception') - -def instrument_weberror_reporter(module): - - def smtp_url(reporter, *args, **kwargs): - return f"smtp://{reporter.smtp_server}" - - wrap_external_trace(module, 'EmailReporter.report', 'weberror', smtp_url) - wrap_function_trace(module, 'EmailReporter.report') - - wrap_function_trace(module, 'LogReporter.report') - wrap_function_trace(module, 'FileReporter.report') From 21f033f7d583949c7c7e8109268dbd23c97c8ccc Mon Sep 17 00:00:00 2001 From: Tim Pansino Date: Mon, 23 Sep 2024 10:21:03 -0700 Subject: [PATCH 6/7] Lint f-strings --- newrelic/hooks/messagebroker_confluentkafka.py | 4 ++-- newrelic/hooks/messagebroker_kafkapython.py | 4 ++-- tests/datastore_aiomcache/test_aiomcache.py | 2 +- tests/datastore_pymemcache/test_memcache.py | 4 ++-- tests/messagebroker_confluentkafka/test_consumer.py | 4 ++-- tests/messagebroker_confluentkafka/test_producer.py | 2 +- tests/messagebroker_kafkapython/test_consumer.py | 4 ++-- tests/messagebroker_kafkapython/test_producer.py | 2 +- tests/mlmodel_langchain/test_chain.py | 2 +- 9 files changed, 14 insertions(+), 14 deletions(-) diff --git a/newrelic/hooks/messagebroker_confluentkafka.py b/newrelic/hooks/messagebroker_confluentkafka.py index 3040155675..626056e1dc 100644 --- a/newrelic/hooks/messagebroker_confluentkafka.py +++ b/newrelic/hooks/messagebroker_confluentkafka.py @@ -61,7 +61,7 @@ def wrap_Producer_produce(wrapped, instance, args, kwargs): transaction.add_messagebroker_info("Confluent-Kafka", get_package_version("confluent-kafka")) if hasattr(instance, "_nr_bootstrap_servers"): for server_name in instance._nr_bootstrap_servers: - transaction.record_custom_metric("MessageBroker/Kafka/Nodes/%s/Produce/%s" % (server_name, topic), 1) + transaction.record_custom_metric(f"MessageBroker/Kafka/Nodes/{server_name}/Produce/{topic}", 1) with MessageTrace( library="Kafka", @@ -173,7 +173,7 @@ def wrap_Consumer_poll(wrapped, instance, args, kwargs): if hasattr(instance, "_nr_bootstrap_servers"): for server_name in instance._nr_bootstrap_servers: transaction.record_custom_metric( - "MessageBroker/Kafka/Nodes/%s/Consume/%s" % (server_name, destination_name), 1 + f"MessageBroker/Kafka/Nodes/{server_name}/Consume/{destination_name}", 1 ) transaction.add_messagebroker_info("Confluent-Kafka", get_package_version("confluent-kafka")) diff --git a/newrelic/hooks/messagebroker_kafkapython.py b/newrelic/hooks/messagebroker_kafkapython.py index cd4f015ecb..763bbc67a0 100644 --- a/newrelic/hooks/messagebroker_kafkapython.py +++ b/newrelic/hooks/messagebroker_kafkapython.py @@ -69,7 +69,7 @@ def wrap_KafkaProducer_send(wrapped, instance, args, kwargs): if hasattr(instance, "config"): for server_name in instance.config.get("bootstrap_servers", []): - transaction.record_custom_metric("MessageBroker/Kafka/Nodes/%s/Produce/%s" % (server_name, topic), 1) + transaction.record_custom_metric(f"MessageBroker/Kafka/Nodes/{server_name}/Produce/{topic}", 1) try: return wrapped( topic, value=value, key=key, headers=dt_headers, partition=partition, timestamp_ms=timestamp_ms @@ -161,7 +161,7 @@ def wrap_kafkaconsumer_next(wrapped, instance, args, kwargs): if hasattr(instance, "config"): for server_name in instance.config.get("bootstrap_servers", []): transaction.record_custom_metric( - "MessageBroker/Kafka/Nodes/%s/Consume/%s" % (server_name, destination_name), 1 + f"MessageBroker/Kafka/Nodes/{server_name}/Consume/{destination_name}", 1 ) transaction.add_messagebroker_info( "Kafka-Python", get_package_version("kafka-python") or get_package_version("kafka-python-ng") diff --git a/tests/datastore_aiomcache/test_aiomcache.py b/tests/datastore_aiomcache/test_aiomcache.py index 9cccbd4612..3d66e1671e 100644 --- a/tests/datastore_aiomcache/test_aiomcache.py +++ b/tests/datastore_aiomcache/test_aiomcache.py @@ -29,7 +29,7 @@ MEMCACHED_HOST = DB_SETTINGS["host"] MEMCACHED_PORT = DB_SETTINGS["port"] MEMCACHED_NAMESPACE = str(os.getpid()) -INSTANCE_METRIC_NAME = "Datastore/instance/Memcached/%s/%s" % (MEMCACHED_HOST, MEMCACHED_PORT) +INSTANCE_METRIC_NAME = f"Datastore/instance/Memcached/{MEMCACHED_HOST}/{MEMCACHED_PORT}" _test_bt_set_get_delete_scoped_metrics = [ ("Datastore/operation/Memcached/set", 1), diff --git a/tests/datastore_pymemcache/test_memcache.py b/tests/datastore_pymemcache/test_memcache.py index 611e6e8e7d..ce2cf6aa23 100644 --- a/tests/datastore_pymemcache/test_memcache.py +++ b/tests/datastore_pymemcache/test_memcache.py @@ -43,7 +43,7 @@ ("Datastore/operation/Memcached/set", 1), ("Datastore/operation/Memcached/get", 1), ("Datastore/operation/Memcached/delete", 1), - ("Datastore/instance/Memcached/%s/%s" % (MEMCACHED_HOST, MEMCACHED_PORT), 3), + (f"Datastore/instance/Memcached/{MEMCACHED_HOST}/{MEMCACHED_PORT}", 3), ] @@ -81,7 +81,7 @@ def test_bt_set_get_delete(): ("Datastore/operation/Memcached/set", 1), ("Datastore/operation/Memcached/get", 1), ("Datastore/operation/Memcached/delete", 1), - ("Datastore/instance/Memcached/%s/%s" % (MEMCACHED_HOST, MEMCACHED_PORT), 3), + (f"Datastore/instance/Memcached/{MEMCACHED_HOST}/{MEMCACHED_PORT}", 3), ] diff --git a/tests/messagebroker_confluentkafka/test_consumer.py b/tests/messagebroker_confluentkafka/test_consumer.py index 4e889da8d2..5df1414cf5 100644 --- a/tests/messagebroker_confluentkafka/test_consumer.py +++ b/tests/messagebroker_confluentkafka/test_consumer.py @@ -187,9 +187,9 @@ def _test(): @pytest.fixture(scope="function") def expected_broker_metrics(broker, topic): - return [("MessageBroker/Kafka/Nodes/%s/Consume/%s" % (server, topic), 1) for server in broker.split(",")] + return [(f"MessageBroker/Kafka/Nodes/{server}/Consume/{topic}", 1) for server in broker.split(",")] @pytest.fixture(scope="function") def expected_missing_broker_metrics(broker, topic): - return [("MessageBroker/Kafka/Nodes/%s/Consume/%s" % (server, topic), None) for server in broker.split(",")] + return [(f"MessageBroker/Kafka/Nodes/{server}/Consume/{topic}", None) for server in broker.split(",")] diff --git a/tests/messagebroker_confluentkafka/test_producer.py b/tests/messagebroker_confluentkafka/test_producer.py index 22a5b1799b..675bb3acba 100644 --- a/tests/messagebroker_confluentkafka/test_producer.py +++ b/tests/messagebroker_confluentkafka/test_producer.py @@ -172,4 +172,4 @@ def test(): @pytest.fixture(scope="function") def expected_broker_metrics(broker, topic): - return [("MessageBroker/Kafka/Nodes/%s/Produce/%s" % (server, topic), 1) for server in broker.split(",")] + return [(f"MessageBroker/Kafka/Nodes/{server}/Produce/{topic}", 1) for server in broker.split(",")] diff --git a/tests/messagebroker_kafkapython/test_consumer.py b/tests/messagebroker_kafkapython/test_consumer.py index 79d0aab495..4199e71a6b 100644 --- a/tests/messagebroker_kafkapython/test_consumer.py +++ b/tests/messagebroker_kafkapython/test_consumer.py @@ -189,9 +189,9 @@ def _poll(*args, **kwargs): @pytest.fixture(scope="function") def expected_broker_metrics(broker, topic): - return [("MessageBroker/Kafka/Nodes/%s/Consume/%s" % (server, topic), 1) for server in broker] + return [(f"MessageBroker/Kafka/Nodes/{server}/Consume/{topic}", 1) for server in broker] @pytest.fixture(scope="function") def expected_missing_broker_metrics(broker, topic): - return [("MessageBroker/Kafka/Nodes/%s/Consume/%s" % (server, topic), None) for server in broker] + return [(f"MessageBroker/Kafka/Nodes/{server}/Consume/{topic}", None) for server in broker] diff --git a/tests/messagebroker_kafkapython/test_producer.py b/tests/messagebroker_kafkapython/test_producer.py index b1cf04eaae..ba856d445d 100644 --- a/tests/messagebroker_kafkapython/test_producer.py +++ b/tests/messagebroker_kafkapython/test_producer.py @@ -104,4 +104,4 @@ def test(): @pytest.fixture(scope="function") def expected_broker_metrics(broker, topic): - return [("MessageBroker/Kafka/Nodes/%s/Produce/%s" % (server, topic), 1) for server in broker] + return [(f"MessageBroker/Kafka/Nodes/{server}/Produce/{topic}", 1) for server in broker] diff --git a/tests/mlmodel_langchain/test_chain.py b/tests/mlmodel_langchain/test_chain.py index 22266c2f95..6d8b2943d5 100644 --- a/tests/mlmodel_langchain/test_chain.py +++ b/tests/mlmodel_langchain/test_chain.py @@ -1801,7 +1801,7 @@ def _test(): scoped_metrics=[("Llm/chain/LangChain/invoke", 3)], rollup_metrics=[("Llm/chain/LangChain/invoke", 3)], custom_metrics=[ - ("Supportability/Python/ML/LangChain/%s" % langchain.__version__, 1), + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), ], background_task=True, ) From f305ba7a98af32a9896d779c214b5a8883006202 Mon Sep 17 00:00:00 2001 From: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> Date: Wed, 25 Sep 2024 10:14:24 -0700 Subject: [PATCH 7/7] Merge main into develop-remove-python27 (#1217) * Add env var for GC runtime metrics. * [Mega-Linter] Apply linters fixes * Remove falcon master tests for py37 (unsupported) * Bump tests * Fix Psycopg3 Api Incompatibilities (#1197) * Fix DBAPI2 wrappers to allow additional kwargs to executemany * Pass extra psycopg3 kwargs through executemany * Rename arguments in psycopg3 wrappers to match upstream * Add tests for psycopg3 returning param --------- Co-authored-by: Uma Annamalai * Fix bugs * Test w/ latest langchain * Add support for new vectorstores * Remove unreachable [] * Use replace instead of lstrip to strip dns:// lstrip strips all the characters from the begining of the string so strings that begin in d, n, or s get the first character(s) stripped. Replace is actually what we want in this case. * Add dispatcher for uvicorn_worker * Fix bedrock expected error * Add metadata field to expected events * Adjust location of expected pydantic error * Langchain does not support python3.8 * Remove graphql master from tests * Add support for kafka-python-ng * Capture pymemcache,aiomcache,bmemcached host&port * Add bootstrap server metric for kafka (#1207) * Add bootstrap server metric * [Mega-Linter] Apply linters fixes --------- Co-authored-by: hmstepanek * Lint f-strings * Fix Issues in PyMongo and Botocore Tests (#1218) * Fix schema issues in botocore tests * Update pymongo hooks for new module locations * Remove unsupported py38 tornadomaster tests * Updated matrix for urllib3 tests (#1216) --------- Co-authored-by: Uma Annamalai Co-authored-by: umaannamalai Co-authored-by: Hannah Stepanek Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: hmstepanek --- newrelic/config.py | 68 ++++- newrelic/core/config.py | 2 +- newrelic/core/environment.py | 2 +- newrelic/hooks/database_dbapi2.py | 7 +- newrelic/hooks/database_dbapi2_async.py | 7 +- newrelic/hooks/database_psycopg.py | 68 ++--- newrelic/hooks/datastore_aiomcache.py | 20 +- newrelic/hooks/datastore_bmemcached.py | 24 +- newrelic/hooks/datastore_pymemcache.py | 20 +- newrelic/hooks/datastore_pymongo.py | 27 +- newrelic/hooks/framework_grpc.py | 4 +- .../hooks/messagebroker_confluentkafka.py | 39 ++- newrelic/hooks/messagebroker_kafkapython.py | 19 +- newrelic/hooks/mlmodel_langchain.py | 75 +++-- newrelic/hooks/mlmodel_openai.py | 6 +- tests/agent_unittests/test_environment.py | 11 + tests/datastore_aiomcache/test_aiomcache.py | 7 +- tests/datastore_bmemcached/test_memcache.py | 5 +- tests/datastore_psycopg/test_cursor.py | 12 +- tests/datastore_pymemcache/test_memcache.py | 4 +- tests/datastore_pymongo/test_pymongo.py | 23 +- .../test_bedrock_chat_completion.py | 4 +- .../test_botocore_dynamodb.py | 2 - tests/framework_grpc/test_get_url.py | 30 +- .../messagebroker_confluentkafka/conftest.py | 26 +- .../test_consumer.py | 30 +- .../test_producer.py | 19 +- tests/messagebroker_kafkapython/conftest.py | 31 +- .../test_consumer.py | 31 +- .../test_producer.py | 19 +- .../_mock_external_openai_server.py | 94 ++++++ tests/mlmodel_langchain/test_chain.py | 279 +++++++++++++++++- tests/mlmodel_langchain/test_tool.py | 24 +- tox.ini | 20 +- 34 files changed, 858 insertions(+), 201 deletions(-) diff --git a/newrelic/config.py b/newrelic/config.py index e3d52978f9..093c178bd8 100644 --- a/newrelic/config.py +++ b/newrelic/config.py @@ -2072,6 +2072,12 @@ def _process_module_builtin_defaults(): "newrelic.hooks.mlmodel_openai", "instrument_openai_resources_chat_completions", ) + + _process_module_definition( + "openai.resources.completions", + "newrelic.hooks.mlmodel_openai", + "instrument_openai_resources_chat_completions", + ) _process_module_definition( "openai._base_client", "newrelic.hooks.mlmodel_openai", @@ -2089,6 +2095,11 @@ def _process_module_builtin_defaults(): "newrelic.hooks.mlmodel_langchain", "instrument_langchain_runnables_chains_base", ) + _process_module_definition( + "langchain_core.runnables.config", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_core_runnables_config", + ) _process_module_definition( "langchain.chains.base", "newrelic.hooks.mlmodel_langchain", @@ -2119,6 +2130,11 @@ def _process_module_builtin_defaults(): "newrelic.hooks.mlmodel_langchain", "instrument_langchain_vectorstore_similarity_search", ) + _process_module_definition( + "langchain_community.vectorstores.aerospike", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) _process_module_definition( "langchain_community.vectorstores.analyticdb", "newrelic.hooks.mlmodel_langchain", @@ -2134,6 +2150,11 @@ def _process_module_builtin_defaults(): "newrelic.hooks.mlmodel_langchain", "instrument_langchain_vectorstore_similarity_search", ) + _process_module_definition( + "langchain_community.vectorstores.aperturedb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) _process_module_definition( "langchain_community.vectorstores.astradb", "newrelic.hooks.mlmodel_langchain", @@ -2149,6 +2170,11 @@ def _process_module_builtin_defaults(): "newrelic.hooks.mlmodel_langchain", "instrument_langchain_vectorstore_similarity_search", ) + _process_module_definition( + "langchain_community.vectorstores.azure_cosmos_db_no_sql", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) _process_module_definition( "langchain_community.vectorstores.azure_cosmos_db", "newrelic.hooks.mlmodel_langchain", @@ -2335,6 +2361,12 @@ def _process_module_builtin_defaults(): "instrument_langchain_vectorstore_similarity_search", ) + _process_module_definition( + "langchain_community.vectorstores.manticore_search", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( "langchain_community.vectorstores.marqo", "newrelic.hooks.mlmodel_langchain", @@ -2383,6 +2415,12 @@ def _process_module_builtin_defaults(): "instrument_langchain_vectorstore_similarity_search", ) + _process_module_definition( + "langchain_community.vectorstores.thirdai_neuraldb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( "langchain_community.vectorstores.nucliadb", "newrelic.hooks.mlmodel_langchain", @@ -2611,6 +2649,12 @@ def _process_module_builtin_defaults(): "instrument_langchain_vectorstore_similarity_search", ) + _process_module_definition( + "langchain_community.vectorstores.zep_cloud", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( "langchain_community.vectorstores.zep", "newrelic.hooks.mlmodel_langchain", @@ -3481,21 +3525,39 @@ def _process_module_builtin_defaults(): "instrument_pyelasticsearch_client", ) + # Newer pymongo module locations _process_module_definition( - "pymongo.connection", + "pymongo.synchronous.pool", "newrelic.hooks.datastore_pymongo", - "instrument_pymongo_connection", + "instrument_pymongo_pool", ) _process_module_definition( - "pymongo.mongo_client", + "pymongo.synchronous.collection", + "newrelic.hooks.datastore_pymongo", + "instrument_pymongo_collection", + ) + _process_module_definition( + "pymongo.synchronous.mongo_client", "newrelic.hooks.datastore_pymongo", "instrument_pymongo_mongo_client", ) + + # Older pymongo module locations + _process_module_definition( + "pymongo.connection", + "newrelic.hooks.datastore_pymongo", + "instrument_pymongo_pool", + ) _process_module_definition( "pymongo.collection", "newrelic.hooks.datastore_pymongo", "instrument_pymongo_collection", ) + _process_module_definition( + "pymongo.mongo_client", + "newrelic.hooks.datastore_pymongo", + "instrument_pymongo_mongo_client", + ) # Redis v4.2+ _process_module_definition( diff --git a/newrelic/core/config.py b/newrelic/core/config.py index 26dbbf0cbd..fec4bf6a93 100644 --- a/newrelic/core/config.py +++ b/newrelic/core/config.py @@ -743,7 +743,7 @@ def default_otlp_host(host): _settings.thread_profiler.enabled = True _settings.cross_application_tracer.enabled = False -_settings.gc_runtime_metrics.enabled = False +_settings.gc_runtime_metrics.enabled = _environ_as_bool("NEW_RELIC_GC_RUNTIME_METRICS_ENABLED", default=False) _settings.gc_runtime_metrics.top_object_count_limit = 5 _settings.memory_runtime_pid_metrics.enabled = _environ_as_bool( diff --git a/newrelic/core/environment.py b/newrelic/core/environment.py index db59e51b50..1ac78b3ecb 100644 --- a/newrelic/core/environment.py +++ b/newrelic/core/environment.py @@ -152,7 +152,7 @@ def environment_settings(): dispatcher.append(("Dispatcher", "gunicorn (gevent)")) elif "gunicorn.workers.geventlet" in sys.modules: dispatcher.append(("Dispatcher", "gunicorn (eventlet)")) - elif "uvicorn.workers" in sys.modules: + elif "uvicorn.workers" in sys.modules or "uvicorn_worker" in sys.modules: dispatcher.append(("Dispatcher", "gunicorn (uvicorn)")) uvicorn = sys.modules.get("uvicorn") if hasattr(uvicorn, "__version__"): diff --git a/newrelic/hooks/database_dbapi2.py b/newrelic/hooks/database_dbapi2.py index 17d3243d07..ce6e933175 100644 --- a/newrelic/hooks/database_dbapi2.py +++ b/newrelic/hooks/database_dbapi2.py @@ -51,12 +51,13 @@ def execute(self, sql, parameters=DEFAULT, *args, **kwargs): ): return self.__wrapped__.execute(sql, **kwargs) - def executemany(self, sql, seq_of_parameters): + def executemany(self, sql, seq_of_parameters, *args, **kwargs): try: seq_of_parameters = list(seq_of_parameters) parameters = seq_of_parameters[0] except (TypeError, IndexError): parameters = DEFAULT + if parameters is not DEFAULT: with DatabaseTrace( sql=sql, @@ -66,7 +67,7 @@ def executemany(self, sql, seq_of_parameters): sql_parameters=parameters, source=self.__wrapped__.executemany, ): - return self.__wrapped__.executemany(sql, seq_of_parameters) + return self.__wrapped__.executemany(sql, seq_of_parameters, *args, **kwargs) else: with DatabaseTrace( sql=sql, @@ -75,7 +76,7 @@ def executemany(self, sql, seq_of_parameters): cursor_params=self._nr_cursor_params, source=self.__wrapped__.executemany, ): - return self.__wrapped__.executemany(sql, seq_of_parameters) + return self.__wrapped__.executemany(sql, seq_of_parameters, *args, **kwargs) def callproc(self, procname, parameters=DEFAULT): with DatabaseTrace( diff --git a/newrelic/hooks/database_dbapi2_async.py b/newrelic/hooks/database_dbapi2_async.py index fba2126818..88f988f906 100644 --- a/newrelic/hooks/database_dbapi2_async.py +++ b/newrelic/hooks/database_dbapi2_async.py @@ -49,12 +49,13 @@ async def execute(self, sql, parameters=DEFAULT, *args, **kwargs): ): return await self.__wrapped__.execute(sql, **kwargs) - async def executemany(self, sql, seq_of_parameters): + async def executemany(self, sql, seq_of_parameters, *args, **kwargs): try: seq_of_parameters = list(seq_of_parameters) parameters = seq_of_parameters[0] except (TypeError, IndexError): parameters = DEFAULT + if parameters is not DEFAULT: with DatabaseTrace( sql=sql, @@ -64,7 +65,7 @@ async def executemany(self, sql, seq_of_parameters): sql_parameters=parameters, source=self.__wrapped__.executemany, ): - return await self.__wrapped__.executemany(sql, seq_of_parameters) + return await self.__wrapped__.executemany(sql, seq_of_parameters, *args, **kwargs) else: with DatabaseTrace( sql=sql, @@ -73,7 +74,7 @@ async def executemany(self, sql, seq_of_parameters): cursor_params=self._nr_cursor_params, source=self.__wrapped__.executemany, ): - return await self.__wrapped__.executemany(sql, seq_of_parameters) + return await self.__wrapped__.executemany(sql, seq_of_parameters, *args, **kwargs) async def callproc(self, procname, parameters=DEFAULT): with DatabaseTrace( diff --git a/newrelic/hooks/database_psycopg.py b/newrelic/hooks/database_psycopg.py index 9b46b7afd0..04fec9b714 100644 --- a/newrelic/hooks/database_psycopg.py +++ b/newrelic/hooks/database_psycopg.py @@ -80,41 +80,41 @@ def __enter__(self): return self - def execute(self, sql, parameters=DEFAULT, *args, **kwargs): - if hasattr(sql, "as_string"): - sql = sql.as_string(self) + def execute(self, query, params=DEFAULT, *args, **kwargs): + if hasattr(query, "as_string"): + query = query.as_string(self) - return super(CursorWrapper, self).execute(sql, parameters, *args, **kwargs) + return super(CursorWrapper, self).execute(query, params, *args, **kwargs) - def executemany(self, sql, seq_of_parameters): - if hasattr(sql, "as_string"): - sql = sql.as_string(self) + def executemany(self, query, params_seq, *args, **kwargs): + if hasattr(query, "as_string"): + query = query.as_string(self) - return super(CursorWrapper, self).executemany(sql, seq_of_parameters) + return super(CursorWrapper, self).executemany(query, params_seq, *args, **kwargs) class ConnectionSaveParamsWrapper(DBAPI2ConnectionWrapper): __cursor_wrapper__ = CursorWrapper - def execute(self, sql, parameters=DEFAULT, *args, **kwargs): - if hasattr(sql, "as_string"): - sql = sql.as_string(self) + def execute(self, query, params=DEFAULT, *args, **kwargs): + if hasattr(query, "as_string"): + query = query.as_string(self) - if parameters is not DEFAULT: + if params is not DEFAULT: with DatabaseTrace( - sql=sql, + sql=query, dbapi2_module=self._nr_dbapi2_module, connect_params=self._nr_connect_params, cursor_params=None, - sql_parameters=parameters, + sql_parameters=params, execute_params=(args, kwargs), source=self.__wrapped__.execute, ): - cursor = self.__wrapped__.execute(sql, parameters, *args, **kwargs) + cursor = self.__wrapped__.execute(query, params, *args, **kwargs) else: with DatabaseTrace( - sql=sql, + sql=query, dbapi2_module=self._nr_dbapi2_module, connect_params=self._nr_connect_params, cursor_params=None, @@ -122,7 +122,7 @@ def execute(self, sql, parameters=DEFAULT, *args, **kwargs): execute_params=(args, kwargs), source=self.__wrapped__.execute, ): - cursor = self.__wrapped__.execute(sql, **kwargs) + cursor = self.__wrapped__.execute(query, **kwargs) return self.__cursor_wrapper__(cursor, self._nr_dbapi2_module, self._nr_connect_params, (args, kwargs)) @@ -226,41 +226,41 @@ async def __aenter__(self): return self - async def execute(self, sql, parameters=DEFAULT, *args, **kwargs): - if hasattr(sql, "as_string"): - sql = sql.as_string(self) + async def execute(self, query, params=DEFAULT, *args, **kwargs): + if hasattr(query, "as_string"): + query = query.as_string(self) - return await super(AsyncCursorWrapper, self).execute(sql, parameters, *args, **kwargs) + return await super(AsyncCursorWrapper, self).execute(query, params, *args, **kwargs) - async def executemany(self, sql, seq_of_parameters): - if hasattr(sql, "as_string"): - sql = sql.as_string(self) + async def executemany(self, query, params_seq, *args, **kwargs): + if hasattr(query, "as_string"): + query = query.as_string(self) - return await super(AsyncCursorWrapper, self).executemany(sql, seq_of_parameters) + return await super(AsyncCursorWrapper, self).executemany(query, params_seq, *args, **kwargs) class AsyncConnectionSaveParamsWrapper(DBAPI2AsyncConnectionWrapper): __cursor_wrapper__ = AsyncCursorWrapper - async def execute(self, sql, parameters=DEFAULT, *args, **kwargs): - if hasattr(sql, "as_string"): - sql = sql.as_string(self) + async def execute(self, query, params=DEFAULT, *args, **kwargs): + if hasattr(query, "as_string"): + query = query.as_string(self) - if parameters is not DEFAULT: + if params is not DEFAULT: with DatabaseTrace( - sql=sql, + sql=query, dbapi2_module=self._nr_dbapi2_module, connect_params=self._nr_connect_params, cursor_params=None, - sql_parameters=parameters, + sql_parameters=params, execute_params=(args, kwargs), source=self.__wrapped__.execute, ): - cursor = await self.__wrapped__.execute(sql, parameters, *args, **kwargs) + cursor = await self.__wrapped__.execute(query, params, *args, **kwargs) else: with DatabaseTrace( - sql=sql, + sql=query, dbapi2_module=self._nr_dbapi2_module, connect_params=self._nr_connect_params, cursor_params=None, @@ -268,7 +268,7 @@ async def execute(self, sql, parameters=DEFAULT, *args, **kwargs): execute_params=(args, kwargs), source=self.__wrapped__.execute, ): - cursor = await self.__wrapped__.execute(sql, **kwargs) + cursor = await self.__wrapped__.execute(query, **kwargs) return self.__cursor_wrapper__(cursor, self._nr_dbapi2_module, self._nr_connect_params, (args, kwargs)) diff --git a/newrelic/hooks/datastore_aiomcache.py b/newrelic/hooks/datastore_aiomcache.py index e2d605392e..7afb76faeb 100644 --- a/newrelic/hooks/datastore_aiomcache.py +++ b/newrelic/hooks/datastore_aiomcache.py @@ -18,7 +18,25 @@ ) +def capture_host(self, *args, **kwargs): + if hasattr(self, "_pool") and hasattr(self._pool, "_host"): + return self._pool._host + + +def capture_port(self, *args, **kwargs): + if hasattr(self, "_pool") and hasattr(self._pool, "_port"): + return self._pool._port + + def instrument_aiomcache_client(module): for name in _memcache_client_methods: if hasattr(module.Client, name): - wrap_datastore_trace(module, f"Client.{name}", product="Memcached", target=None, operation=name) + wrap_datastore_trace( + module, + f"Client.{name}", + product="Memcached", + target=None, + operation=name, + host=capture_host, + port_path_or_id=capture_port, + ) diff --git a/newrelic/hooks/datastore_bmemcached.py b/newrelic/hooks/datastore_bmemcached.py index 3e891f85e3..3bf2a1cb3b 100644 --- a/newrelic/hooks/datastore_bmemcached.py +++ b/newrelic/hooks/datastore_bmemcached.py @@ -32,7 +32,29 @@ ) +def capture_host(self, *args, **kwargs): + if hasattr(self, "servers"): + for s in self.servers: + if hasattr(s, "host"): + return s.host + + +def capture_port(self, *args, **kwargs): + if hasattr(self, "servers"): + for s in self.servers: + if hasattr(s, "port"): + return s.port + + def instrument_bmemcached_client(module): for name in _memcache_client_methods: if hasattr(module.Client, name): - wrap_datastore_trace(module, f"Client.{name}", product="Memcached", target=None, operation=name) + wrap_datastore_trace( + module, + f"Client.{name}", + product="Memcached", + target=None, + operation=name, + host=capture_host, + port_path_or_id=capture_port, + ) diff --git a/newrelic/hooks/datastore_pymemcache.py b/newrelic/hooks/datastore_pymemcache.py index 9edb1d7230..1c754c0039 100644 --- a/newrelic/hooks/datastore_pymemcache.py +++ b/newrelic/hooks/datastore_pymemcache.py @@ -37,7 +37,25 @@ ) +def capture_host(self, *args, **kwargs): + if hasattr(self, "server") and self.server and len(self.server) >= 2: + return self.server[0] + + +def capture_port(self, *args, **kwargs): + if hasattr(self, "server") and self.server and len(self.server) >= 2: + return self.server[1] + + def instrument_pymemcache_client(module): for name in _memcache_client_methods: if hasattr(module.Client, name): - wrap_datastore_trace(module, f"Client.{name}", product="Memcached", target=None, operation=name) + wrap_datastore_trace( + module, + f"Client.{name}", + product="Memcached", + target=None, + operation=name, + host=capture_host, + port_path_or_id=capture_port, + ) diff --git a/newrelic/hooks/datastore_pymongo.py b/newrelic/hooks/datastore_pymongo.py index d185ce2c6c..80d2ab8c5c 100644 --- a/newrelic/hooks/datastore_pymongo.py +++ b/newrelic/hooks/datastore_pymongo.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys + from newrelic.api.datastore_trace import wrap_datastore_trace from newrelic.api.function_trace import wrap_function_trace @@ -59,29 +61,44 @@ ) -def instrument_pymongo_connection(module): - # Must name function explicitly as pymongo overrides the - # __getattr__() method in a way that breaks introspection. +def instrument_pymongo_pool(module): + # Exit early if this is a reimport of code from the newer module location + moved_module = "pymongo.synchronous.pool" + if module.__name__ != moved_module and moved_module in sys.modules: + return rollup = ("Datastore/all", "Datastore/MongoDB/all") + # Must name function explicitly as pymongo overrides the + # __getattr__() method in a way that breaks introspection. + wrap_function_trace( module, "Connection.__init__", name=f"{module.__name__}:Connection.__init__", terminal=True, rollup=rollup ) def instrument_pymongo_mongo_client(module): - # Must name function explicitly as pymongo overrides the - # __getattr__() method in a way that breaks introspection. + # Exit early if this is a reimport of code from the newer module location + moved_module = "pymongo.synchronous.mongo_client" + if module.__name__ != moved_module and moved_module in sys.modules: + return rollup = ("Datastore/all", "Datastore/MongoDB/all") + # Must name function explicitly as pymongo overrides the + # __getattr__() method in a way that breaks introspection. + wrap_function_trace( module, "MongoClient.__init__", name=f"{module.__name__}:MongoClient.__init__", terminal=True, rollup=rollup ) def instrument_pymongo_collection(module): + # Exit early if this is a reimport of code from the newer module location + moved_module = "pymongo.synchronous.collection" + if module.__name__ != moved_module and moved_module in sys.modules: + return + def _collection_name(collection, *args, **kwargs): return collection.name diff --git a/newrelic/hooks/framework_grpc.py b/newrelic/hooks/framework_grpc.py index dc59634e71..70f296132c 100644 --- a/newrelic/hooks/framework_grpc.py +++ b/newrelic/hooks/framework_grpc.py @@ -24,7 +24,7 @@ def _get_uri_method(instance, *args, **kwargs): - target = instance._channel.target().decode("utf-8").lstrip("dns:///") + target = instance._channel.target().decode("utf-8").replace("dns:///", "") method = instance._method.decode("utf-8").lstrip("/") uri = f"grpc://{target}/{method}" return (uri, method) @@ -43,7 +43,6 @@ def _prepare_request_stream(transaction, guid, request_iterator, *args, **kwargs def wrap_call(module, object_path, prepare): - def _call_wrapper(wrapped, instance, args, kwargs): transaction = current_transaction() if transaction is None: @@ -58,7 +57,6 @@ def _call_wrapper(wrapped, instance, args, kwargs): def wrap_future(module, object_path, prepare): - def _future_wrapper(wrapped, instance, args, kwargs): transaction = current_transaction() if transaction is None: diff --git a/newrelic/hooks/messagebroker_confluentkafka.py b/newrelic/hooks/messagebroker_confluentkafka.py index 8a4c5d3f8c..626056e1dc 100644 --- a/newrelic/hooks/messagebroker_confluentkafka.py +++ b/newrelic/hooks/messagebroker_confluentkafka.py @@ -56,14 +56,18 @@ def wrap_Producer_produce(wrapped, instance, args, kwargs): args = args[1:] else: topic = kwargs.pop("topic", None) + topic = topic or "Default" transaction.add_messagebroker_info("Confluent-Kafka", get_package_version("confluent-kafka")) + if hasattr(instance, "_nr_bootstrap_servers"): + for server_name in instance._nr_bootstrap_servers: + transaction.record_custom_metric(f"MessageBroker/Kafka/Nodes/{server_name}/Produce/{topic}", 1) with MessageTrace( library="Kafka", operation="Produce", destination_type="Topic", - destination_name=topic or "Default", + destination_name=topic, source=wrapped, ): dt_headers = {k: v.encode("utf-8") for k, v in MessageTrace.generate_request_headers(transaction)} @@ -166,6 +170,11 @@ def wrap_Consumer_poll(wrapped, instance, args, kwargs): name = f"Named/{destination_name}" transaction.record_custom_metric(f"{group}/{name}/Received/Bytes", received_bytes) transaction.record_custom_metric(f"{group}/{name}/Received/Messages", message_count) + if hasattr(instance, "_nr_bootstrap_servers"): + for server_name in instance._nr_bootstrap_servers: + transaction.record_custom_metric( + f"MessageBroker/Kafka/Nodes/{server_name}/Consume/{destination_name}", 1 + ) transaction.add_messagebroker_info("Confluent-Kafka", get_package_version("confluent-kafka")) return record @@ -219,6 +228,32 @@ def wrap_DeserializingConsumer_init(wrapped, instance, args, kwargs): instance._value_deserializer = wrap_serializer("Deserialization/Value", "Message")(instance._value_deserializer) +def wrap_Producer_init(wrapped, instance, args, kwargs): + wrapped(*args, **kwargs) + + # Try to capture the boostrap server info that is passed in in the configuration. + try: + conf = args[0] + servers = conf.get("bootstrap.servers") + if servers: + instance._nr_bootstrap_servers = servers.split(",") + except Exception: + pass + + +def wrap_Consumer_init(wrapped, instance, args, kwargs): + wrapped(*args, **kwargs) + + # Try to capture the boostrap server info that is passed in in the configuration. + try: + conf = args[0] + servers = conf.get("bootstrap.servers") + if servers: + instance._nr_bootstrap_servers = servers.split(",") + except Exception: + pass + + def wrap_immutable_class(module, class_name): # Wrap immutable binary extension class with a mutable Python subclass new_class = type(class_name, (getattr(module, class_name),), {}) @@ -230,10 +265,12 @@ def instrument_confluentkafka_cimpl(module): if hasattr(module, "Producer"): wrap_immutable_class(module, "Producer") wrap_function_wrapper(module, "Producer.produce", wrap_Producer_produce) + wrap_function_wrapper(module, "Producer.__init__", wrap_Producer_init) if hasattr(module, "Consumer"): wrap_immutable_class(module, "Consumer") wrap_function_wrapper(module, "Consumer.poll", wrap_Consumer_poll) + wrap_function_wrapper(module, "Consumer.__init__", wrap_Consumer_init) def instrument_confluentkafka_serializing_producer(module): diff --git a/newrelic/hooks/messagebroker_kafkapython.py b/newrelic/hooks/messagebroker_kafkapython.py index e556beacf2..763bbc67a0 100644 --- a/newrelic/hooks/messagebroker_kafkapython.py +++ b/newrelic/hooks/messagebroker_kafkapython.py @@ -47,15 +47,18 @@ def wrap_KafkaProducer_send(wrapped, instance, args, kwargs): return wrapped(*args, **kwargs) topic, value, key, headers, partition, timestamp_ms = _bind_send(*args, **kwargs) + topic = topic or "Default" headers = list(headers) if headers else [] - transaction.add_messagebroker_info("Kafka-Python", get_package_version("kafka-python")) + transaction.add_messagebroker_info( + "Kafka-Python", get_package_version("kafka-python") or get_package_version("kafka-python-ng") + ) with MessageTrace( library="Kafka", operation="Produce", destination_type="Topic", - destination_name=topic or "Default", + destination_name=topic, source=wrapped, terminal=False, ): @@ -64,6 +67,9 @@ def wrap_KafkaProducer_send(wrapped, instance, args, kwargs): if headers: dt_headers.extend(headers) + if hasattr(instance, "config"): + for server_name in instance.config.get("bootstrap_servers", []): + transaction.record_custom_metric(f"MessageBroker/Kafka/Nodes/{server_name}/Produce/{topic}", 1) try: return wrapped( topic, value=value, key=key, headers=dt_headers, partition=partition, timestamp_ms=timestamp_ms @@ -152,7 +158,14 @@ def wrap_kafkaconsumer_next(wrapped, instance, args, kwargs): name = f"Named/{destination_name}" transaction.record_custom_metric(f"{group}/{name}/Received/Bytes", received_bytes) transaction.record_custom_metric(f"{group}/{name}/Received/Messages", message_count) - transaction.add_messagebroker_info("Kafka-Python", get_package_version("kafka-python")) + if hasattr(instance, "config"): + for server_name in instance.config.get("bootstrap_servers", []): + transaction.record_custom_metric( + f"MessageBroker/Kafka/Nodes/{server_name}/Consume/{destination_name}", 1 + ) + transaction.add_messagebroker_info( + "Kafka-Python", get_package_version("kafka-python") or get_package_version("kafka-python-ng") + ) return record diff --git a/newrelic/hooks/mlmodel_langchain.py b/newrelic/hooks/mlmodel_langchain.py index cb7998580b..48d281a246 100644 --- a/newrelic/hooks/mlmodel_langchain.py +++ b/newrelic/hooks/mlmodel_langchain.py @@ -18,26 +18,29 @@ import uuid from newrelic.api.function_trace import FunctionTrace -from newrelic.api.time_trace import get_trace_linking_metadata +from newrelic.api.time_trace import current_trace, get_trace_linking_metadata from newrelic.api.transaction import current_transaction from newrelic.common.object_wrapper import wrap_function_wrapper from newrelic.common.package_version_utils import get_package_version from newrelic.common.signature import bind_args from newrelic.core.config import global_settings +from newrelic.core.context import context_wrapper _logger = logging.getLogger(__name__) LANGCHAIN_VERSION = get_package_version("langchain") EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE = "Exception occurred in langchain instrumentation: While reporting an exception in langchain, another exception occurred. Report this issue to New Relic Support.\n%s" RECORD_EVENTS_FAILURE_LOG_MESSAGE = "Exception occurred in langchain instrumentation: Failed to record LLM events. Report this issue to New Relic Support.\n%s" - VECTORSTORE_CLASSES = { + "langchain_community.vectorstores.aerospike": "Aerospike", "langchain_community.vectorstores.alibabacloud_opensearch": "AlibabaCloudOpenSearch", "langchain_community.vectorstores.analyticdb": "AnalyticDB", "langchain_community.vectorstores.annoy": "Annoy", "langchain_community.vectorstores.apache_doris": "ApacheDoris", + "langchain_community.vectorstores.aperturedb": "ApertureDB", "langchain_community.vectorstores.astradb": "AstraDB", "langchain_community.vectorstores.atlas": "AtlasDB", "langchain_community.vectorstores.awadb": "AwaDB", + "langchain_community.vectorstores.azure_cosmos_db_no_sql": "AzureCosmosDBNoSqlVectorSearch", "langchain_community.vectorstores.azure_cosmos_db": "AzureCosmosDBVectorSearch", "langchain_community.vectorstores.azuresearch": "AzureSearch", "langchain_community.vectorstores.baiduvectordb": "BaiduVectorDB", @@ -71,6 +74,7 @@ "langchain_community.vectorstores.lancedb": "LanceDB", "langchain_community.vectorstores.lantern": "Lantern", "langchain_community.vectorstores.llm_rails": "LLMRails", + "langchain_community.vectorstores.manticore_search": "ManticoreSearch", "langchain_community.vectorstores.marqo": "Marqo", "langchain_community.vectorstores.matching_engine": "MatchingEngine", "langchain_community.vectorstores.meilisearch": "Meilisearch", @@ -79,7 +83,7 @@ "langchain_community.vectorstores.mongodb_atlas": "MongoDBAtlasVectorSearch", "langchain_community.vectorstores.myscale": "MyScale", "langchain_community.vectorstores.neo4j_vector": "Neo4jVector", - "langchain_community.vectorstores.thirdai_neuraldb": "NeuralDBVectorStore", + "langchain_community.vectorstores.thirdai_neuraldb": ["NeuralDBClientVectorStore", "NeuralDBVectorStore"], "langchain_community.vectorstores.nucliadb": "NucliaDB", "langchain_community.vectorstores.oraclevs": "OracleVS", "langchain_community.vectorstores.opensearch_vector_search": "OpenSearchVectorSearch", @@ -118,17 +122,35 @@ "langchain_community.vectorstores.weaviate": "Weaviate", "langchain_community.vectorstores.xata": "XataVectorStore", "langchain_community.vectorstores.yellowbrick": "Yellowbrick", + "langchain_community.vectorstores.zep_cloud": "ZepCloudVectorStore", "langchain_community.vectorstores.zep": "ZepVectorStore", "langchain_community.vectorstores.docarray.hnsw": "DocArrayHnswSearch", "langchain_community.vectorstores.docarray.in_memory": "DocArrayInMemorySearch", } -def _create_error_vectorstore_events(transaction, search_id, args, kwargs, linking_metadata): +def bind_submit(func, *args, **kwargs): + return {"func": func, "args": args, "kwargs": kwargs} + + +def wrap_ContextThreadPoolExecutor_submit(wrapped, instance, args, kwargs): + trace = current_trace() + if not trace: + return wrapped(*args, **kwargs) + + # Use hardened function signature bind so we have safety net catchall of args and kwargs. + bound_args = bind_submit(*args, **kwargs) + bound_args["func"] = context_wrapper(bound_args["func"], trace=trace, strict=True) + return wrapped(bound_args["func"], *bound_args["args"], **bound_args["kwargs"]) + + +def _create_error_vectorstore_events(transaction, search_id, args, kwargs, linking_metadata, wrapped): settings = transaction.settings if transaction.settings is not None else global_settings() span_id = linking_metadata.get("span.id") trace_id = linking_metadata.get("trace.id") - request_query, request_k = bind_similarity_search(*args, **kwargs) + bound_args = bind_args(wrapped, args, kwargs) + request_query = bound_args["query"] + request_k = bound_args["k"] llm_metadata_dict = _get_llm_metadata(transaction) vectorstore_error_dict = { "request.k": request_k, @@ -169,21 +191,17 @@ async def wrap_asimilarity_search(wrapped, instance, args, kwargs): except Exception as exc: ft.notice_error(attributes={"vector_store_id": search_id}) ft.__exit__(*sys.exc_info()) - _create_error_vectorstore_events(transaction, search_id, args, kwargs, linking_metadata) + _create_error_vectorstore_events(transaction, search_id, args, kwargs, linking_metadata, wrapped) raise ft.__exit__(None, None, None) if not response: return response - _record_vector_search_success(transaction, linking_metadata, ft, search_id, args, kwargs, response) + _record_vector_search_success(transaction, linking_metadata, ft, search_id, args, kwargs, response, wrapped) return response -def bind_similarity_search(query, k, *args, **kwargs): - return query, k - - def wrap_similarity_search(wrapped, instance, args, kwargs): transaction = current_transaction() if not transaction: @@ -206,20 +224,22 @@ def wrap_similarity_search(wrapped, instance, args, kwargs): except Exception as exc: ft.notice_error(attributes={"vector_store_id": search_id}) ft.__exit__(*sys.exc_info()) - _create_error_vectorstore_events(transaction, search_id, args, kwargs, linking_metadata) + _create_error_vectorstore_events(transaction, search_id, args, kwargs, linking_metadata, wrapped) raise ft.__exit__(None, None, None) if not response: return response - _record_vector_search_success(transaction, linking_metadata, ft, search_id, args, kwargs, response) + _record_vector_search_success(transaction, linking_metadata, ft, search_id, args, kwargs, response, wrapped) return response -def _record_vector_search_success(transaction, linking_metadata, ft, search_id, args, kwargs, response): +def _record_vector_search_success(transaction, linking_metadata, ft, search_id, args, kwargs, response, wrapped): settings = transaction.settings if transaction.settings is not None else global_settings() - request_query, request_k = bind_similarity_search(*args, **kwargs) + bound_args = bind_args(wrapped, args, kwargs) + request_query = bound_args["query"] + request_k = bound_args["k"] duration = ft.duration * 1000 response_number_of_documents = len(response) llm_metadata_dict = _get_llm_metadata(transaction) @@ -854,12 +874,20 @@ def instrument_langchain_chains_base(module): def instrument_langchain_vectorstore_similarity_search(module): - vector_class = VECTORSTORE_CLASSES.get(module.__name__) - - if vector_class and hasattr(getattr(module, vector_class, ""), "similarity_search"): - wrap_function_wrapper(module, f"{vector_class}.similarity_search", wrap_similarity_search) - if vector_class and hasattr(getattr(module, vector_class, ""), "asimilarity_search"): - wrap_function_wrapper(module, f"{vector_class}.asimilarity_search", wrap_asimilarity_search) + def _instrument_class(module, vector_class): + if hasattr(getattr(module, vector_class, ""), "similarity_search"): + wrap_function_wrapper(module, f"{vector_class}.similarity_search", wrap_similarity_search) + if hasattr(getattr(module, vector_class, ""), "asimilarity_search"): + wrap_function_wrapper(module, f"{vector_class}.asimilarity_search", wrap_asimilarity_search) + + vector_classes = VECTORSTORE_CLASSES.get(module.__name__) + if vector_classes is None: + return + if isinstance(vector_classes, list): + for vector_class in vector_classes: + _instrument_class(module, vector_class) + else: + _instrument_class(module, vector_classes) def instrument_langchain_core_tools(module): @@ -878,3 +906,8 @@ def instrument_langchain_callbacks_manager(module): wrap_function_wrapper(module, "CallbackManager.on_chain_start", wrap_on_chain_start) if hasattr(getattr(module, "AsyncCallbackManager"), "on_chain_start"): wrap_function_wrapper(module, "AsyncCallbackManager.on_chain_start", wrap_async_on_chain_start) + + +def instrument_langchain_core_runnables_config(module): + if hasattr(module, "ContextThreadPoolExecutor"): + wrap_function_wrapper(module, "ContextThreadPoolExecutor.submit", wrap_ContextThreadPoolExecutor_submit) diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index 4479d1e966..96228fd853 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -492,7 +492,9 @@ def _record_completion_success(transaction, linking_metadata, completion_id, kwa finish_reason = None choices = response.get("choices") or [] if choices: - output_message_list = [choices[0].get("message")] + output_message_list = [ + choices[0].get("message") or {"content": choices[0].get("text"), "role": "assistant"} + ] finish_reason = choices[0].get("finish_reason") else: response_model = kwargs.get("response.model") @@ -507,7 +509,7 @@ def _record_completion_success(transaction, linking_metadata, completion_id, kwa request_id = response_headers.get("x-request-id") organization = response_headers.get("openai-organization") or getattr(response, "organization", None) - messages = kwargs.get("messages", None) or [] + messages = kwargs.get("messages") or [{"content": kwargs.get("prompt"), "role": "user"}] input_message_list = list(messages) full_chat_completion_summary_dict = { "id": completion_id, diff --git a/tests/agent_unittests/test_environment.py b/tests/agent_unittests/test_environment.py index 58b6bcd29d..22a102cd14 100644 --- a/tests/agent_unittests/test_environment.py +++ b/tests/agent_unittests/test_environment.py @@ -113,6 +113,17 @@ def test_plugin_list_uses_no_sys_modules_iterator(monkeypatch): "1.2.3", "4.5.6", ), + # New replacement module uvicorn_worker should function the same + ( + { + "gunicorn": module("1.2.3"), + "uvicorn": module("4.5.6"), + "uvicorn_worker": object(), + }, + "gunicorn (uvicorn)", + "1.2.3", + "4.5.6", + ), ({"uvicorn": object()}, "uvicorn", None, None), ( { diff --git a/tests/datastore_aiomcache/test_aiomcache.py b/tests/datastore_aiomcache/test_aiomcache.py index 9641c6d70e..3d66e1671e 100644 --- a/tests/datastore_aiomcache/test_aiomcache.py +++ b/tests/datastore_aiomcache/test_aiomcache.py @@ -16,6 +16,7 @@ import aiomcache from testing_support.db_settings import memcached_settings +from testing_support.fixture.event_loop import event_loop as loop from testing_support.validators.validate_transaction_metrics import ( validate_transaction_metrics, ) @@ -23,14 +24,12 @@ from newrelic.api.background_task import background_task from newrelic.api.transaction import set_background_task -from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics -from testing_support.fixture.event_loop import event_loop as loop - DB_SETTINGS = memcached_settings()[0] MEMCACHED_HOST = DB_SETTINGS["host"] MEMCACHED_PORT = DB_SETTINGS["port"] MEMCACHED_NAMESPACE = str(os.getpid()) +INSTANCE_METRIC_NAME = f"Datastore/instance/Memcached/{MEMCACHED_HOST}/{MEMCACHED_PORT}" _test_bt_set_get_delete_scoped_metrics = [ ("Datastore/operation/Memcached/set", 1), @@ -43,6 +42,7 @@ ("Datastore/allOther", 3), ("Datastore/Memcached/all", 3), ("Datastore/Memcached/allOther", 3), + (INSTANCE_METRIC_NAME, 3), ("Datastore/operation/Memcached/set", 1), ("Datastore/operation/Memcached/get", 1), ("Datastore/operation/Memcached/delete", 1), @@ -81,6 +81,7 @@ def test_bt_set_get_delete(loop): ("Datastore/allWeb", 3), ("Datastore/Memcached/all", 3), ("Datastore/Memcached/allWeb", 3), + (INSTANCE_METRIC_NAME, 3), ("Datastore/operation/Memcached/set", 1), ("Datastore/operation/Memcached/get", 1), ("Datastore/operation/Memcached/delete", 1), diff --git a/tests/datastore_bmemcached/test_memcache.py b/tests/datastore_bmemcached/test_memcache.py index cb43d63e29..fa052c670e 100644 --- a/tests/datastore_bmemcached/test_memcache.py +++ b/tests/datastore_bmemcached/test_memcache.py @@ -23,14 +23,13 @@ from newrelic.api.background_task import background_task from newrelic.api.transaction import set_background_task -from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics - DB_SETTINGS = memcached_settings()[0] MEMCACHED_HOST = DB_SETTINGS["host"] MEMCACHED_PORT = DB_SETTINGS["port"] MEMCACHED_NAMESPACE = str(os.getpid()) MEMCACHED_ADDR = f"{MEMCACHED_HOST}:{MEMCACHED_PORT}" +INSTANCE_METRIC_NAME = f"Datastore/instance/Memcached/{MEMCACHED_HOST}/{MEMCACHED_PORT}" _test_bt_set_get_delete_scoped_metrics = [ ("Datastore/operation/Memcached/set", 1), @@ -43,6 +42,7 @@ ("Datastore/allOther", 3), ("Datastore/Memcached/all", 3), ("Datastore/Memcached/allOther", 3), + (INSTANCE_METRIC_NAME, 3), ("Datastore/operation/Memcached/set", 1), ("Datastore/operation/Memcached/get", 1), ("Datastore/operation/Memcached/delete", 1), @@ -80,6 +80,7 @@ def test_bt_set_get_delete(): ("Datastore/allWeb", 3), ("Datastore/Memcached/all", 3), ("Datastore/Memcached/allWeb", 3), + (INSTANCE_METRIC_NAME, 3), ("Datastore/operation/Memcached/set", 1), ("Datastore/operation/Memcached/get", 1), ("Datastore/operation/Memcached/delete", 1), diff --git a/tests/datastore_psycopg/test_cursor.py b/tests/datastore_psycopg/test_cursor.py index 77b8a1e1fb..3eff3ed7ab 100644 --- a/tests/datastore_psycopg/test_cursor.py +++ b/tests/datastore_psycopg/test_cursor.py @@ -99,9 +99,17 @@ async def _execute(connection, cursor, row_type, wrapper): sql = f"create table {DB_SETTINGS['table_name']} (a integer, b real, c text)" await maybe_await(cursor.execute(wrapper(sql))) - sql = f"insert into {DB_SETTINGS['table_name']} values (%s, %s, %s)" + sql = f"insert into {DB_SETTINGS['table_name']} values (%s, %s, %s) returning a, b, c" params = [(1, 1.0, "1.0"), (2, 2.2, "2.2"), (3, 3.3, "3.3")] - await maybe_await(cursor.executemany(wrapper(sql), params)) + await maybe_await(cursor.executemany(wrapper(sql), params, returning=True)) + + # Consume inserted records to check that returning param functions + records = [] + while True: + records.append(cursor.fetchone()) + if not cursor.nextset(): + break + assert len(records) == len(params) sql = f"select * from {DB_SETTINGS['table_name']}" await maybe_await(cursor.execute(wrapper(sql))) diff --git a/tests/datastore_pymemcache/test_memcache.py b/tests/datastore_pymemcache/test_memcache.py index 7fad815b41..ce2cf6aa23 100644 --- a/tests/datastore_pymemcache/test_memcache.py +++ b/tests/datastore_pymemcache/test_memcache.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - import pymemcache.client from testing_support.db_settings import memcached_settings from testing_support.validators.validate_transaction_metrics import ( @@ -45,6 +43,7 @@ ("Datastore/operation/Memcached/set", 1), ("Datastore/operation/Memcached/get", 1), ("Datastore/operation/Memcached/delete", 1), + (f"Datastore/instance/Memcached/{MEMCACHED_HOST}/{MEMCACHED_PORT}", 3), ] @@ -82,6 +81,7 @@ def test_bt_set_get_delete(): ("Datastore/operation/Memcached/set", 1), ("Datastore/operation/Memcached/get", 1), ("Datastore/operation/Memcached/delete", 1), + (f"Datastore/instance/Memcached/{MEMCACHED_HOST}/{MEMCACHED_PORT}", 3), ] diff --git a/tests/datastore_pymongo/test_pymongo.py b/tests/datastore_pymongo/test_pymongo.py index 507b556cb7..6a0dfe5ef4 100644 --- a/tests/datastore_pymongo/test_pymongo.py +++ b/tests/datastore_pymongo/test_pymongo.py @@ -29,6 +29,15 @@ MONGODB_COLLECTION = DB_SETTINGS["collection"] +# Find correct metric name based on import availability. +try: + from pymongo.synchronous.mongo_client import MongoClient # noqa + INIT_FUNCTION_METRIC = "Function/pymongo.synchronous.mongo_client:MongoClient.__init__" +except ImportError: + from pymongo.mongo_client import MongoClient # noqa + INIT_FUNCTION_METRIC = "Function/pymongo.mongo_client:MongoClient.__init__" + + def _exercise_mongo_v3(db): db[MONGODB_COLLECTION].save({"x": 10}) db[MONGODB_COLLECTION].save({"x": 8}) @@ -114,7 +123,7 @@ def _exercise_mongo(db): _test_pymongo_scoped_metrics_v3 = [ - ("Function/pymongo.mongo_client:MongoClient.__init__", 1), + (INIT_FUNCTION_METRIC, 1), (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/create_index", 1), (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find", 3), (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one", 1), @@ -141,7 +150,7 @@ def _exercise_mongo(db): _test_pymongo_scoped_metrics_v4 = [ - ("Function/pymongo.mongo_client:MongoClient.__init__", 1), + (INIT_FUNCTION_METRIC, 1), (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/create_index", 1), (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find", 3), (f"Datastore/statement/MongoDB/{MONGODB_COLLECTION}/find_one", 1), @@ -163,7 +172,7 @@ def _exercise_mongo(db): ] _test_pymongo_rollup_metrics_v3 = [ - ("Function/pymongo.mongo_client:MongoClient.__init__", 1), + (INIT_FUNCTION_METRIC, 1), ("Datastore/all", 28), ("Datastore/allOther", 28), ("Datastore/MongoDB/all", 28), @@ -215,7 +224,7 @@ def _exercise_mongo(db): ] _test_pymongo_rollup_metrics_v4 = [ - ("Function/pymongo.mongo_client:MongoClient.__init__", 1), + (INIT_FUNCTION_METRIC, 1), ("Datastore/all", 25), ("Datastore/allOther", 25), ("Datastore/MongoDB/all", 25), @@ -276,7 +285,7 @@ def test_mongodb_client_operation(): ) @background_task() def _test(): - client = pymongo.MongoClient(MONGODB_HOST, MONGODB_PORT) + client = MongoClient(MONGODB_HOST, MONGODB_PORT) db = client.test _exercise_mongo(db) @@ -286,7 +295,7 @@ def _test(): @validate_database_duration() @background_task() def test_mongodb_database_duration(): - client = pymongo.MongoClient(MONGODB_HOST, MONGODB_PORT) + client = MongoClient(MONGODB_HOST, MONGODB_PORT) db = client.test _exercise_mongo(db) @@ -297,7 +306,7 @@ def test_mongodb_and_sqlite_database_duration(): # Make mongodb queries - client = pymongo.MongoClient(MONGODB_HOST, MONGODB_PORT) + client = MongoClient(MONGODB_HOST, MONGODB_PORT) db = client.test _exercise_mongo(db) diff --git a/tests/external_botocore/test_bedrock_chat_completion.py b/tests/external_botocore/test_bedrock_chat_completion.py index 460b26b7d6..8cc1fdaa8a 100644 --- a/tests/external_botocore/test_bedrock_chat_completion.py +++ b/tests/external_botocore/test_bedrock_chat_completion.py @@ -696,7 +696,7 @@ def test_bedrock_chat_completion_error_malformed_response_streaming_chunk( @validate_custom_events(chat_completion_expected_malformed_response_streaming_chunk_events) @validate_custom_event_count(count=2) @validate_error_trace_attributes( - "botocore.eventstream:InvalidHeadersLength", + "botocore.eventstream:ChecksumMismatch", exact_attrs={ "agent": {}, "intrinsic": {}, @@ -723,7 +723,7 @@ def test_bedrock_chat_completion_error_malformed_response_streaming_chunk( def _test(): model = "amazon.titan-text-express-v1" body = (chat_completion_payload_templates[model] % ("Malformed Streaming Chunk", 0.7, 100)).encode("utf-8") - with pytest.raises(botocore.eventstream.InvalidHeadersLength): + with pytest.raises(botocore.eventstream.ChecksumMismatch): set_trace_info() add_custom_attribute("llm.conversation_id", "my-awesome-id") add_custom_attribute("llm.foo", "bar") diff --git a/tests/external_botocore/test_botocore_dynamodb.py b/tests/external_botocore/test_botocore_dynamodb.py index ab4ea5d12c..c031f543f6 100644 --- a/tests/external_botocore/test_botocore_dynamodb.py +++ b/tests/external_botocore/test_botocore_dynamodb.py @@ -124,7 +124,6 @@ def test_dynamodb(): Key={ "Id": {"N": "101"}, "Foo": {"S": "hello_world"}, - "SomeValue": {"S": "some_random_attribute"}, }, ) assert resp["Item"]["SomeValue"]["S"] == "some_random_attribute" @@ -135,7 +134,6 @@ def test_dynamodb(): Key={ "Id": {"N": "101"}, "Foo": {"S": "hello_world"}, - "SomeValue": {"S": "some_random_attribute"}, }, AttributeUpdates={ "Foo2": {"Value": {"S": "hello_world2"}, "Action": "PUT"}, diff --git a/tests/framework_grpc/test_get_url.py b/tests/framework_grpc/test_get_url.py index 43fad0adf4..0b42e6a9a0 100644 --- a/tests/framework_grpc/test_get_url.py +++ b/tests/framework_grpc/test_get_url.py @@ -18,32 +18,28 @@ from newrelic.hooks.framework_grpc import _get_uri_method - _test_get_url_unary_unary = [ - ('localhost:1234', '/sample/method', - 'grpc://localhost:1234/sample/method'), - ('localhost:1234', 'method/without/leading/slash', - 'grpc://localhost:1234/method/without/leading/slash'), - ('localhost', '/no/port', - 'grpc://localhost/no/port'), + ("localhost:1234", "/sample/method", "grpc://localhost:1234/sample/method"), + ("localhost:1234", "method/without/leading/slash", "grpc://localhost:1234/method/without/leading/slash"), + ("localhost", "/no/port", "grpc://localhost/no/port"), + ("newrelic-otel-productcatalogservice", "/no/port", "grpc://newrelic-otel-productcatalogservice/no/port"), ] _test_channel_types = [ - ('unary_unary', grpc._channel._UnaryUnaryMultiCallable), - ('unary_stream', grpc._channel._UnaryStreamMultiCallable), - ('stream_unary', grpc._channel._StreamUnaryMultiCallable), - ('stream_stream', grpc._channel._StreamStreamMultiCallable), + ("unary_unary", grpc._channel._UnaryUnaryMultiCallable), + ("unary_stream", grpc._channel._UnaryStreamMultiCallable), + ("stream_unary", grpc._channel._StreamUnaryMultiCallable), + ("stream_stream", grpc._channel._StreamStreamMultiCallable), ] -@pytest.mark.parametrize('url,method,expected', _test_get_url_unary_unary) -@pytest.mark.parametrize('channel_type,channel_class', _test_channel_types) -def test_get_url_method(url, method, expected, channel_type, - channel_class): +@pytest.mark.parametrize("url,method,expected", _test_get_url_unary_unary) +@pytest.mark.parametrize("channel_type,channel_class", _test_channel_types) +def test_get_url_method(url, method, expected, channel_type, channel_class): channel = grpc.insecure_channel(url) unary_unary = getattr(channel, channel_type)(method) - assert type(unary_unary) == channel_class + assert isinstance(unary_unary, channel_class) actual_url, actual_method = _get_uri_method(unary_unary) assert actual_url == expected - assert actual_method == method.lstrip('/') + assert actual_method == method.lstrip("/") diff --git a/tests/messagebroker_confluentkafka/conftest.py b/tests/messagebroker_confluentkafka/conftest.py index eddaa84d8b..850d3872b0 100644 --- a/tests/messagebroker_confluentkafka/conftest.py +++ b/tests/messagebroker_confluentkafka/conftest.py @@ -27,7 +27,11 @@ DB_SETTINGS = kafka_settings()[0] -BROKER = f"{DB_SETTINGS['host']}:{DB_SETTINGS['port']}" + +@pytest.fixture(scope="session") +def broker(): + BROKER = f"{DB_SETTINGS['host']}:{DB_SETTINGS['port']}" + return BROKER _default_settings = { @@ -58,15 +62,15 @@ def skip_if_not_serializing(client_type): @pytest.fixture(scope="function") -def producer(topic, client_type, json_serializer): +def producer(topic, client_type, json_serializer, broker): from confluent_kafka import Producer, SerializingProducer if client_type == "cimpl": - producer = Producer({"bootstrap.servers": BROKER}) + producer = Producer({"bootstrap.servers": broker}) elif client_type == "serializer_function": producer = SerializingProducer( { - "bootstrap.servers": BROKER, + "bootstrap.servers": broker, "value.serializer": lambda v, c: json.dumps(v).encode("utf-8"), "key.serializer": lambda v, c: json.dumps(v).encode("utf-8") if v is not None else None, } @@ -74,7 +78,7 @@ def producer(topic, client_type, json_serializer): elif client_type == "serializer_object": producer = SerializingProducer( { - "bootstrap.servers": BROKER, + "bootstrap.servers": broker, "value.serializer": json_serializer, "key.serializer": json_serializer, } @@ -87,13 +91,13 @@ def producer(topic, client_type, json_serializer): @pytest.fixture(scope="function") -def consumer(group_id, topic, producer, client_type, json_deserializer): +def consumer(group_id, topic, producer, client_type, json_deserializer, broker): from confluent_kafka import Consumer, DeserializingConsumer if client_type == "cimpl": consumer = Consumer( { - "bootstrap.servers": BROKER, + "bootstrap.servers": broker, "auto.offset.reset": "earliest", "heartbeat.interval.ms": 1000, "group.id": group_id, @@ -102,7 +106,7 @@ def consumer(group_id, topic, producer, client_type, json_deserializer): elif client_type == "serializer_function": consumer = DeserializingConsumer( { - "bootstrap.servers": BROKER, + "bootstrap.servers": broker, "auto.offset.reset": "earliest", "heartbeat.interval.ms": 1000, "group.id": group_id, @@ -113,7 +117,7 @@ def consumer(group_id, topic, producer, client_type, json_deserializer): elif client_type == "serializer_object": consumer = DeserializingConsumer( { - "bootstrap.servers": BROKER, + "bootstrap.servers": broker, "auto.offset.reset": "earliest", "heartbeat.interval.ms": 1000, "group.id": group_id, @@ -168,12 +172,12 @@ def __call__(self, obj, ctx): @pytest.fixture(scope="function") -def topic(): +def topic(broker): from confluent_kafka.admin import AdminClient, NewTopic topic = f"test-topic-{str(uuid.uuid4())}" - admin = AdminClient({"bootstrap.servers": BROKER}) + admin = AdminClient({"bootstrap.servers": broker}) new_topics = [NewTopic(topic, num_partitions=1, replication_factor=1)] topics = admin.create_topics(new_topics) for _, f in topics.items(): diff --git a/tests/messagebroker_confluentkafka/test_consumer.py b/tests/messagebroker_confluentkafka/test_consumer.py index 7c759c91b8..5df1414cf5 100644 --- a/tests/messagebroker_confluentkafka/test_consumer.py +++ b/tests/messagebroker_confluentkafka/test_consumer.py @@ -36,11 +36,11 @@ from newrelic.common.object_names import callable_name -def test_custom_metrics(get_consumer_record, topic): +def test_custom_metrics(get_consumer_record, topic, expected_broker_metrics): custom_metrics = [ (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", 1), (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", 1), - ] + ] + expected_broker_metrics @validate_transaction_metrics( f"Named/{topic}", @@ -64,7 +64,7 @@ def _test(): _test() -def test_custom_metrics_on_existing_transaction(get_consumer_record, topic): +def test_custom_metrics_on_existing_transaction(get_consumer_record, topic, expected_broker_metrics): from confluent_kafka import __version__ as version @validate_transaction_metrics( @@ -73,7 +73,8 @@ def test_custom_metrics_on_existing_transaction(get_consumer_record, topic): (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", 1), (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", 1), (f"Python/MessageBroker/Confluent-Kafka/{version}", 1), - ], + ] + + expected_broker_metrics, background_task=True, ) @validate_transaction_count(1) @@ -84,13 +85,15 @@ def _test(): _test() -def test_custom_metrics_inactive_transaction(get_consumer_record, topic): +def test_custom_metrics_inactive_transaction(get_consumer_record, topic, expected_missing_broker_metrics): + @validate_transaction_metrics( "test_consumer:test_custom_metrics_inactive_transaction.._test", custom_metrics=[ (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", None), (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", None), - ], + ] + + expected_missing_broker_metrics, background_task=True, ) @validate_transaction_count(1) @@ -139,7 +142,7 @@ def _test(): _test() -def test_distributed_tracing_headers(topic, producer, consumer, serialize): +def test_distributed_tracing_headers(topic, producer, consumer, serialize, expected_broker_metrics): # Produce the messages inside a transaction, making sure to close it. @validate_transaction_count(1) @background_task() @@ -153,7 +156,8 @@ def _produce(): rollup_metrics=[ ("Supportability/DistributedTrace/AcceptPayload/Success", None), ("Supportability/TraceContext/Accept/Success", 1), - ], + ] + + expected_broker_metrics, background_task=True, ) @validate_transaction_count(1) @@ -179,3 +183,13 @@ def _test(): _produce() _consume() + + +@pytest.fixture(scope="function") +def expected_broker_metrics(broker, topic): + return [(f"MessageBroker/Kafka/Nodes/{server}/Consume/{topic}", 1) for server in broker.split(",")] + + +@pytest.fixture(scope="function") +def expected_missing_broker_metrics(broker, topic): + return [(f"MessageBroker/Kafka/Nodes/{server}/Consume/{topic}", None) for server in broker.split(",")] diff --git a/tests/messagebroker_confluentkafka/test_producer.py b/tests/messagebroker_confluentkafka/test_producer.py index bafb556aee..675bb3acba 100644 --- a/tests/messagebroker_confluentkafka/test_producer.py +++ b/tests/messagebroker_confluentkafka/test_producer.py @@ -96,7 +96,7 @@ def producer_callback2(err, msg): assert callback2_called.wait(5), "Callback never called." -def test_trace_metrics(topic, send_producer_message): +def test_trace_metrics(topic, send_producer_message, expected_broker_metrics): from confluent_kafka import __version__ as version scoped_metrics = [(f"MessageBroker/Kafka/Topic/Produce/Named/{topic}", 1)] @@ -106,7 +106,7 @@ def test_trace_metrics(topic, send_producer_message): "test_producer:test_trace_metrics..test", scoped_metrics=scoped_metrics, rollup_metrics=unscoped_metrics, - custom_metrics=[(f"Python/MessageBroker/Confluent-Kafka/{version}", 1)], + custom_metrics=[(f"Python/MessageBroker/Confluent-Kafka/{version}", 1)] + expected_broker_metrics, background_task=True, ) @background_task() @@ -116,13 +116,14 @@ def test(): test() -def test_distributed_tracing_headers(topic, send_producer_message): +def test_distributed_tracing_headers(topic, send_producer_message, expected_broker_metrics): @validate_transaction_metrics( "test_producer:test_distributed_tracing_headers..test", rollup_metrics=[ ("Supportability/TraceContext/Create/Success", 1), ("Supportability/DistributedTrace/CreatePayload/Success", 1), - ], + ] + + expected_broker_metrics, background_task=True, ) @background_task() @@ -134,13 +135,14 @@ def test(): test() -def test_distributed_tracing_headers_under_terminal(topic, send_producer_message): +def test_distributed_tracing_headers_under_terminal(topic, send_producer_message, expected_broker_metrics): @validate_transaction_metrics( "test_distributed_tracing_headers_under_terminal", rollup_metrics=[ ("Supportability/TraceContext/Create/Success", 1), ("Supportability/DistributedTrace/CreatePayload/Success", 1), - ], + ] + + expected_broker_metrics, background_task=True, ) @background_task(name="test_distributed_tracing_headers_under_terminal") @@ -166,3 +168,8 @@ def test(): producer.flush() test() + + +@pytest.fixture(scope="function") +def expected_broker_metrics(broker, topic): + return [(f"MessageBroker/Kafka/Nodes/{server}/Produce/{topic}", 1) for server in broker.split(",")] diff --git a/tests/messagebroker_kafkapython/conftest.py b/tests/messagebroker_kafkapython/conftest.py index 63080d4d4e..3e1ff26965 100644 --- a/tests/messagebroker_kafkapython/conftest.py +++ b/tests/messagebroker_kafkapython/conftest.py @@ -28,8 +28,11 @@ DB_SETTINGS = kafka_settings()[0] -BOOTSTRAP_SERVER = f"{DB_SETTINGS['host']}:{DB_SETTINGS['port']}" -BROKER = [BOOTSTRAP_SERVER] + +@pytest.fixture(scope="session") +def broker(): + BOOTSTRAP_SERVER = f"{DB_SETTINGS['host']}:{DB_SETTINGS['port']}" + return [BOOTSTRAP_SERVER] _default_settings = { @@ -62,24 +65,24 @@ def skip_if_not_serializing(client_type): @pytest.fixture(scope="function") -def producer(client_type, json_serializer, json_callable_serializer): +def producer(client_type, json_serializer, json_callable_serializer, broker): if client_type == "no_serializer": - producer = kafka.KafkaProducer(bootstrap_servers=BROKER) + producer = kafka.KafkaProducer(bootstrap_servers=broker) elif client_type == "serializer_function": producer = kafka.KafkaProducer( - bootstrap_servers=BROKER, + bootstrap_servers=broker, value_serializer=lambda v: json.dumps(v).encode("utf-8") if v else None, key_serializer=lambda v: json.dumps(v).encode("utf-8") if v else None, ) elif client_type == "callable_object": producer = kafka.KafkaProducer( - bootstrap_servers=BROKER, + bootstrap_servers=broker, value_serializer=json_callable_serializer, key_serializer=json_callable_serializer, ) elif client_type == "serializer_object": producer = kafka.KafkaProducer( - bootstrap_servers=BROKER, + bootstrap_servers=broker, value_serializer=json_serializer, key_serializer=json_serializer, ) @@ -89,11 +92,11 @@ def producer(client_type, json_serializer, json_callable_serializer): @pytest.fixture(scope="function") -def consumer(group_id, topic, producer, client_type, json_deserializer, json_callable_deserializer): +def consumer(group_id, topic, producer, client_type, json_deserializer, json_callable_deserializer, broker): if client_type == "no_serializer": consumer = kafka.KafkaConsumer( topic, - bootstrap_servers=BROKER, + bootstrap_servers=broker, auto_offset_reset="earliest", consumer_timeout_ms=100, heartbeat_interval_ms=1000, @@ -102,7 +105,7 @@ def consumer(group_id, topic, producer, client_type, json_deserializer, json_cal elif client_type == "serializer_function": consumer = kafka.KafkaConsumer( topic, - bootstrap_servers=BROKER, + bootstrap_servers=broker, key_deserializer=lambda v: json.loads(v.decode("utf-8")) if v else None, value_deserializer=lambda v: json.loads(v.decode("utf-8")) if v else None, auto_offset_reset="earliest", @@ -113,7 +116,7 @@ def consumer(group_id, topic, producer, client_type, json_deserializer, json_cal elif client_type == "callable_object": consumer = kafka.KafkaConsumer( topic, - bootstrap_servers=BROKER, + bootstrap_servers=broker, key_deserializer=json_callable_deserializer, value_deserializer=json_callable_deserializer, auto_offset_reset="earliest", @@ -124,7 +127,7 @@ def consumer(group_id, topic, producer, client_type, json_deserializer, json_cal elif client_type == "serializer_object": consumer = kafka.KafkaConsumer( topic, - bootstrap_servers=BROKER, + bootstrap_servers=broker, key_deserializer=json_deserializer, value_deserializer=json_deserializer, auto_offset_reset="earliest", @@ -190,13 +193,13 @@ def __call__(self, obj): @pytest.fixture(scope="function") -def topic(): +def topic(broker): from kafka.admin.client import KafkaAdminClient from kafka.admin.new_topic import NewTopic topic = f"test-topic-{str(uuid.uuid4())}" - admin = KafkaAdminClient(bootstrap_servers=BROKER) + admin = KafkaAdminClient(bootstrap_servers=broker) new_topics = [NewTopic(topic, num_partitions=1, replication_factor=1)] admin.create_topics(new_topics) diff --git a/tests/messagebroker_kafkapython/test_consumer.py b/tests/messagebroker_kafkapython/test_consumer.py index 2ed3d7ae67..4199e71a6b 100644 --- a/tests/messagebroker_kafkapython/test_consumer.py +++ b/tests/messagebroker_kafkapython/test_consumer.py @@ -36,14 +36,15 @@ from newrelic.common.object_names import callable_name -def test_custom_metrics(get_consumer_record, topic): +def test_custom_metrics(get_consumer_record, topic, expected_broker_metrics): @validate_transaction_metrics( f"Named/{topic}", group="Message/Kafka/Topic", custom_metrics=[ (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", 1), (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", 1), - ], + ] + + expected_broker_metrics, background_task=True, ) def _test(): @@ -61,7 +62,7 @@ def _test(): _test() -def test_custom_metrics_on_existing_transaction(get_consumer_record, topic): +def test_custom_metrics_on_existing_transaction(get_consumer_record, topic, expected_broker_metrics): from kafka.version import __version__ as version @validate_transaction_metrics( @@ -70,7 +71,8 @@ def test_custom_metrics_on_existing_transaction(get_consumer_record, topic): (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", 1), (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", 1), (f"Python/MessageBroker/Kafka-Python/{version}", 1), - ], + ] + + expected_broker_metrics, background_task=True, ) @validate_transaction_count(1) @@ -81,13 +83,15 @@ def _test(): _test() -def test_custom_metrics_inactive_transaction(get_consumer_record, topic): +def test_custom_metrics_inactive_transaction(get_consumer_record, topic, expected_missing_broker_metrics): + @validate_transaction_metrics( "test_consumer:test_custom_metrics_inactive_transaction.._test", custom_metrics=[ (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", None), (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", None), - ], + ] + + expected_missing_broker_metrics, background_task=True, ) @validate_transaction_count(1) @@ -130,7 +134,7 @@ def _test(): _test() -def test_distributed_tracing_headers(topic, producer, consumer, serialize): +def test_distributed_tracing_headers(topic, producer, consumer, serialize, expected_broker_metrics): # Produce the messages inside a transaction, making sure to close it. @background_task() def _produce(): @@ -143,7 +147,8 @@ def _produce(): rollup_metrics=[ ("Supportability/DistributedTrace/AcceptPayload/Success", None), ("Supportability/TraceContext/Accept/Success", 1), - ], + ] + + expected_broker_metrics, background_task=True, ) @validate_transaction_count(1) @@ -180,3 +185,13 @@ def _poll(*args, **kwargs): consumer.poll = _poll return consumer + + +@pytest.fixture(scope="function") +def expected_broker_metrics(broker, topic): + return [(f"MessageBroker/Kafka/Nodes/{server}/Consume/{topic}", 1) for server in broker] + + +@pytest.fixture(scope="function") +def expected_missing_broker_metrics(broker, topic): + return [(f"MessageBroker/Kafka/Nodes/{server}/Consume/{topic}", None) for server in broker] diff --git a/tests/messagebroker_kafkapython/test_producer.py b/tests/messagebroker_kafkapython/test_producer.py index 816d33ac3f..ba856d445d 100644 --- a/tests/messagebroker_kafkapython/test_producer.py +++ b/tests/messagebroker_kafkapython/test_producer.py @@ -29,7 +29,7 @@ from newrelic.common.object_names import callable_name -def test_trace_metrics(topic, send_producer_message): +def test_trace_metrics(topic, send_producer_message, expected_broker_metrics): from kafka.version import __version__ as version scoped_metrics = [(f"MessageBroker/Kafka/Topic/Produce/Named/{topic}", 1)] @@ -39,7 +39,7 @@ def test_trace_metrics(topic, send_producer_message): "test_producer:test_trace_metrics..test", scoped_metrics=scoped_metrics, rollup_metrics=unscoped_metrics, - custom_metrics=[(f"Python/MessageBroker/Kafka-Python/{version}", 1)], + custom_metrics=[(f"Python/MessageBroker/Kafka-Python/{version}", 1)] + expected_broker_metrics, background_task=True, ) @background_task() @@ -49,13 +49,14 @@ def test(): test() -def test_distributed_tracing_headers(topic, send_producer_message): +def test_distributed_tracing_headers(topic, send_producer_message, expected_broker_metrics): @validate_transaction_metrics( "test_producer:test_distributed_tracing_headers..test", rollup_metrics=[ ("Supportability/TraceContext/Create/Success", 1), ("Supportability/DistributedTrace/CreatePayload/Success", 1), - ], + ] + + expected_broker_metrics, background_task=True, ) @background_task() @@ -67,13 +68,14 @@ def test(): test() -def test_distributed_tracing_headers_under_terminal(topic, send_producer_message): +def test_distributed_tracing_headers_under_terminal(topic, send_producer_message, expected_broker_metrics): @validate_transaction_metrics( "test_distributed_tracing_headers_under_terminal", rollup_metrics=[ ("Supportability/TraceContext/Create/Success", 1), ("Supportability/DistributedTrace/CreatePayload/Success", 1), - ], + ] + + expected_broker_metrics, background_task=True, ) @background_task(name="test_distributed_tracing_headers_under_terminal") @@ -98,3 +100,8 @@ def test(): producer.flush() test() + + +@pytest.fixture(scope="function") +def expected_broker_metrics(broker, topic): + return [(f"MessageBroker/Kafka/Nodes/{server}/Produce/{topic}", 1) for server in broker] diff --git a/tests/mlmodel_langchain/_mock_external_openai_server.py b/tests/mlmodel_langchain/_mock_external_openai_server.py index b956c9abce..8899a1412a 100644 --- a/tests/mlmodel_langchain/_mock_external_openai_server.py +++ b/tests/mlmodel_langchain/_mock_external_openai_server.py @@ -199,6 +199,100 @@ ], } RESPONSES_V1 = { + "3923": [ + { + "content-type": "application/json", + "openai-model": "text-embedding-ada-002", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "26", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "3000", + "x-ratelimit-limit-tokens": "1000000", + "x-ratelimit-remaining-requests": "2999", + "x-ratelimit-remaining-tokens": "999992", + "x-ratelimit-reset-requests": "20ms", + "x-ratelimit-reset-tokens": "0s", + "x-request-id": "req_222ee158a955e783854f6e7cf52e6e5a", + }, + 200, + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": "0ylWOiHvhzsp+JM8/ZzpvC4vFL115AW8j02kvJIXjbvOdue8Trg4vGvz4ruG0Mw74dUKvcnKrLvBwSA8fWmAO3ZRnTzwXN265+7cPLqpvbvjM8u8rfVlPPjoi7puxe67gLBGuwWlBrmF2G87W+ltvIixrzx5Gwa9bGD6PJi7pLxoIde8DwzTOpQAE7yIsS88+OgLvN/shDs9vZo8n1Dlu51uEzxwMRe77oKuO3qQwLsSWs28bswiPF67+TxLYpu8mbMBvMWLiTweHus7KW3Oun5ShrvECGe89KpXO5N9cLwrVlS84sazOweH2LqrG7c8Kl53vMKjcrx3M2+8fktSPN54ubx6DK+6dsZXvD29Gj0Lza+8Q2GyPDF2Wjw3Es88VzUQvC27yLw5Agk9ihakvPrRkbwh6NO82yo/uqbktjzatvM8eZ8Xvefu3LtXqsq7wNgau+wlXbxmONG7X7qKPJ9XGbzIXRW8pPT8OmJ9vzyjh+U7UQazPMhOvrxKeRU9/C/Su1iiJz3tkvS8EeWSvCl0gryiKSU7jPeGvMjZA7x/O4w7LijguzeW4Dv5VSM8e3nGuqZgJbztknS8o44ZPV+rszxkV+68Rqj4uxnnarrP4g88IINfvBUkNrzneSK82FF/PJx2NjuzHKA8RFLbOjmGGj3/CBK8vf9aPKQKiLw1QMO87/fovGEQKD1NS6E8yrOyPKOOmTxABOG6fd66PPM1nbyqI9o8o4dlvF+z1ruG0Ew8eRTSPG1QNLwlLqu73BtoO0TOST30orQ8vnMmPZ3ypLxUXNC8j8kSvOb9M7s7WKY88Gu0PP4QtTzf9Cc8tf0CvM75iTwWkU28xnQPOxUzDb2WZYe84zPLPPWTXbzbrtA7nPrHPALMxjyalVM7NxJPvGt2hbzlFK680csVPOMk9LtB/L08dzPvO9MwijyV6Zi66kMLvNFHhLy0BaY7g/5AO1GKRDxeu3k7xQBEPEGHA71hEKg77RUXPKExyDt6BXu7hd8jPBtMXzy2apo6oiLxucwRczyoSSu/sqjUvD29GjzzuS69vmxyvJmzAT1agwo8LihgPF4+HLwiZMI8hPYdOnAxl7ydbpO6WYstOgYSHju30P28M9tOPHsEDLxhjJa88zUdunILRrwAbgY9mpwHvOBhv7viQqI8iR5Huv2URjwYgQe94zNLvBaYgTw/Io+8vA4yO8E+/jtl2hC8/ZzpPB+Stjz5Xca8WYutPAd/tbs/G9s8YYyWvE64uLsGC2o89ou6OnqQQLxD1my8ZU/LPO96izzTKdY8dzojPPJMl7wUsGq86ksuPEATODy7oZo65v0zvJXi5DwNI807IfB2O7T+cbwdLcK7eZhjO/8IkrxrdoW7ftaXvDkCCTxz9Eu96kuuPPO5rjzkHFG6wcGgO1geFj0B1Gk8vA6yPC8YGjy8kkM7/KtAPaBAn7zFdX48aCiLPEcrm7weHmu7TcDbvBWox7tKcuE8E86YvBHttbzpWoU8sqjUO614CDpXJjk9VFStPEALlbxPsJW8SJAPPD06+Dz3/4U7C0kePJ3ypLvBPQ88CHeSuil8JbwyZpQ8EHHHOvLJ9DvzNZ08e3nGvGIBUTqaIJk8ui3PvGa84rp3vjQ7wTbbuz29GjzObsS6llawvEJwCTw7YEk8JMGTu+fu3LxT4GE8BLwAvMbpSTt93jq8O2BJvIe5Ujwh7we84GG/vLVyvbzDm8+8f0MvO69hjjzRSHO6m4Z8vPYPzDx11a67juCMO8jZA7vatvM8+OiLvKC9fLy9e0m7DhOHPM/jfrtD1mw8/C9SvFiahDt3M285nIWNO5qV0zsbTN+76sccu8hdFby317E7w6KDPHkjKbyk+zC7sTMaO/u6Fzz5XUa7pPR8u2cwLjzNAS28PjmJvIEkkrzWAha8FhVfvPSxizxePhy8ucC3vGbDlryvYQ690cuVu/JMlzyt/Jm84yT0PJCymLvJRpu8zn2bvMWLCbxs45w7PEnPPKwEPTsOEwc8SInbuQ6XGDyDgtI8oxIrvLZjZrwz4gK7YCDuO9QS3DxPqWG8p828PBrXJDz60RE7yUabPMV1/rkeFsg8l9IePb17yTw/l8k8s5l9PGAYy7tV0Js7kDYqu27Fbjml8407sTs9PSnxX7pgJ6K6XcrQvOfuXLw+rsO8cDEXPMV8MjxAE7g71n4EvKDEsLvfcJY87RUXPMBN1bolNk48XdGEPIg1QTsfDiU8AG/1OzNXPT34ZWk8YZS5vBB56ryvWto6V6rKPOQjhTwzVz28rIjOOg4bKjwBX687fs/jPC4vFLm8DrI7Sv2mPFiT0Dzo3pa8cZf6Oy4vFLxYF+I8aoZLu7X9gjxwrYU7rfVlPKm2wrykf0K8VrLtO8Z0jzmJLR68oEAfO9q28zrWApa7bOu/PE64uDzAycM7+V3GushOPryUABM8ipo1POrHnLvY3MS8N50UPF67ebvxVLq8vJJDvCAGgrtVRdY6gLBGPOlT0TpgJ6I8UXvtu954OTtHp4m6xAhnvOb2/7zXb607JwjaPNZ30LsvGJo7lHyBO+6Rhbqe4807nuqBPIOJBrz2D0y5qDrUvKOOmTu317G7SYE4vM7y1TyXTg08ZFduOlGKRLo66447g3qvOzxJTzvcngq82UlcOyl0gjscPJk7iwfNvEPdoLvuitG8R6eJu6IhArxgGEs8QnH4u3P0yztVwUS7tA1JvI9FgbxYF2I8DDKku8b4oDtfugq8Tri4O9frm7xZhHk9EPXYPKuXpTsOFPY7Rq8svLG3qzvO+Qm94yT0O31iTDusgKs7q5DxuvYAdbs2pbc8/Zxpu+lahTwXgvY7+z4pvLWBlLtS/g87Nq1aPLKoVLqWZQc8+dm0PEPWbDzCqqa7hefGO4oWJLzNAa087/fou6QD1LzNEIQ89g/MOxLWOzz9Hww7leLkuqOH5bu2Y+Y6pllxPGa8YryG1wC6pH/Cu3Rh4zsgBgI7YCeiux0tQjzkHNE7EWmkvAtCajzuBsA8kDYqvP2c6bpuxW48/DaGvNwb6Ds687E5egX7O0GHA70pfKW8khcNvdyeirw+OQk86OY5vAFfLzzMnLi8flKGu59Xmbwnkx+8xvigu2RmRbxgJyK8bVA0PLwHfry6Jay8Fw28uy6kzjxr+hY893RAPMovITxpERG9coe0PIx7mLxK/aa8gagjvC4vFLx0YWM8AG6Gu1tskDscRLw6+k0AvIuDuzxvObo8Y+rWu31izDsuq4K86c+/POfu3DyBoIC8Pxvbu+dqSzsQgJ68B381PIe50rwxdlq80ynWvJi0cLxFO2E8PM1gPNo5Frue6gG96N4WvWisHLxFxqa81vM+PEpy4Tp5I6k83oDcPMyUFTy1gZQ712eKvNlJ3Lwsyp88hGvYvOfu3Lor0kK8Q9ZsvLG3q7w+rkM8llYwvdMhs7wLSZ47+GXpu0enCTvzua68IAYCO37HwLxRe+07aCiLO2CjED3MlBU8zJQVPMjSz7uIsS87S2q+vHbNC7yf0we9JwjavABuBr3egNw7y6sPvL5zpjxdRr87oL18PPWaEbyiGs45oTFIOmkZtLyc+ke8k4SkvMQXvjw7WKY8BgvqPIINmDynzbw7eZ8XPJx2tjsuKGA74VLouRHe3ruMdGS8eCvMPCtOsTzatvM7agI6u8OiAz1ESjg8znZnu89moTu7oRq7FhXfu6T7sLx9Wqm7QngsvCplq7xr+pa8W+ltO+pDCzzzuS68yjfEuxnuHjySGPy8SJgyPV+6CrsI7My5NTggPN/0pzxd0QS9m42wvJMItrqpK/07pexZPDJfYDoY9sE8Pb0au7ORWjwZ7h68+tGROkL0Gr1GqHg8cCrjukzeibuHPWQ8BRpBvMjZA71sYHq84VkcujxJzzwnkx+90rSbPLG3K7xhlLm8cgMjOjPigrwR7TU9Uv6PPPwv0jzRy5U8MmaUPBJLdrxYF+K7Rq8sOlRN+bucdrY8AGfSPNdvLbwUv0G81ndQu+/+nDtGqHi8vmzyvERKuLvpzz+8TM+yuytOsTo3nRS7z2YhvSCKkzvP4o+8kaNBvLWBFLwgihO8YCBuvBagpDxiCAW9qL5lPOjeljzOfZu8x9ryu0tim7wZag08y6uPOwHbnbt1zvo8g/7AvNHLFTuFYzU8qbZCvGisHL1T7zg8LMNrPPWT3TyJoti8PxtbvKOH5by1/QI9tAUmO5dODTzivhC8uTymu+d5ory5uJQ8ucA3PE1LoTxmPwW84r//O/vCOruHudK8GIEHvRj2Qby9/9o85gwLvJN98LsIcF68t1tDu859mzw73Lc5lHwBu9o5FrvAycM81gKWvL7vlDy8B/67MXZaPVVMCr3O+Qm7IINfvNfk57y6qb08sTMavIixr7zA2Jo7Zyl6POhb9LrFhFW8UB2tvM7y1Txb8CE8Y/L5OhvPgTn2izq8wT0PPaMSKzuU8Ts8QAThPNwbaLop8V87yjfEPBD8DDyTfXA7Jw+Ou852Z7x6Bfu7oLXZO5EuBzzBNts79ZoRva5i/bsAZ1K8HLn2uqC8jTyteAg5Rr4DvA8MUzsJ3XU7RcYmPE1EbTqX0h69b7Uou1XQm7zZSVy8ZrzivP99TLzoW3Q8EmGBuwrchjsAdqm8CPujuqBAHzyd6/C83RNFPD+fbDufSMI8TccPPFXBxLzJP2c8vnMmvA0jzTyIsa+8uM8OumzjnLuPyRK8rmJ9uycXsTzzPUC81BLcu4z3BrxNRG284r4QvMnKrDteu/m5i4M7vCxGDrv44Vc8YggFPQh3kjyJHsc7U+cVvb17yTu9Bg+7n8xTPGGF4rqN6K+85BzROgYLarzqwGg7p808vIRyDLydbpM8YCDuvBUzjTyDiQY8TcePO13RhDpKcuE8LD/aPHAiQLxWsu06mLTwvAYSnrxs6788ybtVvAB2qbugvXy8ewQMO0H8vbzliei8JS4rPGXLubsbTF+8YggFPYzw0jsOlxi9DpcYvIZbErwI84C8mLsku9bzvrx7BIy7htcAPQBuBrut9WU7aY5uPEa3z7y9/1q7ux2JPEao+Lu317E7AdRpPhiBh7zXYNY8rXgIPVL+D7yTCLY8ElIqPfyrQLtQFvk367CiO+fuXDyteIg8f7+dvKR/QjwxfQ48Da4SvDmGGr0K1dK8r1pavGkRkTqrExQ8BgtqubZj5rtYoie8uFMgPYGggDxdVZa83oeQuz6uw7ocwKq6mbOBvOFS6Dv+jCO88y7pPCTJtrzA0ea70zAKPc0J0LwQeeo7iKr7PIc9ZDxIids6VNg+vK/WSLpwIkA7uTymOwBvdbotu8i8/aMdvDJf4DxQDta86OY5u8Kj8jvhWRw9JS4rvNQZkLp6iB08BaUGvOlaBbxOuLg6lHXNOdwbaDwduAe92FH/PKuQcbtsZ648C0kevRlqDbr0sQu7ig/wvOwlXbs2rVo7y6RbvDAJwzpIDW284VJouxCAHruHRBi8sMaCPB8OpTz5XUa85ZAcO5sRQrzX6xu8OfvUvCLgMLx2zYs8u6Gau9V/czy9e0k7Y3UcPJdHWbxCeKy8DDIkPGzjnDyO4Iw8HakwutFHBD1jdRy6tmoavJsJn7zmBdc8Tri4O7ZqGjt3M++7qEEIu0tiG7wybjc7DDrHPIGhb7wduAe7TUuhvIe5Ujw8xb27khDZvPf4UTw1Mey6cDEXu5i7pDt1WcC8IfeqOxFppLwPBDC8gCy1PCB7vDkkwZO8hedGOwS8gDsohEi8DafevAWlhrsyX2C7OAqsPNhRfzsqXve7zQEtPAtCarv3/wW8gaCAvGAgbjzsHbo5VyY5vDTE1LwvGJq5YBjLPJEuB73n9RA8tA3Ju7k8prxrdgW9PUGsvM/ijzvLqw89S2q+vIXY7ztc4cq8JxexvP8IEr2TjMc89ZoRvJ7qAbyuYv27hdjvO6C9fDx1WUC9G0zfuzPTK76hMcg7cDGXOjkCCbwWkU285Jg/POfuXDzlieg7yqx+vExTxDwUv8E8egyvvJyFDbytcVQ8NqU3vFa5Ibu8kkM8tYEUPHztkTy5NXK7Sv0mPejmObxWPbM6lHVNu1gXYjwj0Vm8LqROPJ9Q5TzMlBU8Q+XDvHGWC7w5Aok79hYAvB4WSDtdVRY8t9exvBaRzbvPZqG7PElPuz+XSTzy0Kg8MIUxPDa0jrteu/m7Omjsu37HwDzlieg7iS2eul1OYrttWNe8TUTtPKIazrq31zE8JTZOuAnd9TzA0eY7tP7xvEgUoTvatvO7n0jCODJuNzlNx4+7/R8MPYRyjLzhWZw82Uncu4x05Dut7UI8sTs9vC+Vdzv1k108GefqvF4+HDyWZQe9FhwTPHXOejx0aJc7pAoIvF8vxTtGr6w7qp9IuT8ijzzA0Wa8kKvkO5CyGLv1mpG7MX2Ou0avrDykCog8T6E+vH7HQDzP4/68lekYvKIpJbywx/E8jHRkPCn4Ez1JgTg8YCeiPJCr5LwxfQ48Trg4vCAGAr2UfAE8cDGXPI9NpDuVbao8BLwAPMMmFTylaEg8OvMxvHGX+rqWVjA8KXSCPDchpjxl2pA7BDG7vAYLartGr6y7KXQCOw8EMD2SFw27lW0qvAHU6TzIXZW7ZFfuu2GF4r0cuXa7yqz+O852Zz3W8z48PyIPPSUnd7xNRG08BaWGPGN1HD3ukQW8Y+rWvHv9VzxRgqE8PM1gvLbuKzy+awO8AlDYvOFKxbzhWZw8bsVuPPaLurx4p7o87ZJ0vEYzPry317G8nevwvIfABjx13dE78tCouSAGAj3RQNC8NqW3umRmRbyLg7s7HqGNPKi+5bvVhqe8gaCAPLjPDr37upe7gaFvu8yUFbtwrQW9aCHXvJmszTrcIpy7JT2COxnnajyPRnC8c/TLvJ3rcLwh74e8qiqOPBj2QbxNwFu7AG/1O8BN1TwvlAi7J4zrubySQ7u31zE6xYTVvL0GDzy0Dck8Vj0zPEYzPrsvlIg8zBinPLE7vTrv/hy8PUEsO/u6l7s+MtU68GMRvSgANzwfkra8jPDSOxw8GT3zua670U8nvM7yVbxYmgQ7LqsCvNhRfzwYgQc8VdAbvIIGZLzsHbo89KrXu1tskLt8cSM9mLTwPDchprwz2068+7oXPMhdFbuMexi9UnPKPMKyybtntD+8YYyWvJZlh73sJV084rfcO9nFyrwbz4E8VdAbPCAGAjy3W8O85+5cO+2SdDzveou8BwPHPDeW4Lz/fUy873qLvEVCFTuQuju5lk98O6sTlDuJLR669hYAva9a2rtV0Js87pEFPCU9Ajzf5dA8fHGjvAWe0juLDgG9eKe6vN2I/zyn3BO8GIEHu638GT1UYwS8GefqvFgelrwxfY48L5V3Ol1OYrvm9v87cvzuvJ9Q5bunUU68Om+gPHIDo7yF2G+7ftaXPKC9/Dy+bPK84N0tPGxg+jvHZTi8bOu/vDzN4Lu317G8pXcfvIOJhrxEUts8uTwmvdK0mzy5wDc5FhXfPOYMizvqwOg8Da6Su/w2BrzGbVu5H5I2vMwR87zqx5y7+VWju0gN7TtD5cM88GORPOu4RbwjTUi7LqROPOQjBb24REm7ZjjRO+d5IrxmvOK7Om+gu3qIHT3v9+g0znZnOxS3njwY9kG8F4J2PEALFbwdLcI71Y7KvJ9Q5bxBh4M81Qq5u9fkZ7zivhC8x+GmPCnx3zwQeeq7Li8UOw6XGL2rExS88OcivJmzgbvMlJW8/ZTGvE3Hjzw81BQ9U+DhPPYHKT0LSZ68KAA3PPWT3buBoW+7flIGvRvITbwNKgG9+kZMPKDEsLxM3gk8j00kPVCZG7ySGPw7/33Mu3ZCRjwAb3W8EHnqOwnddTzA2Bq8rmkxvEJwCbw73De8cvxuus9mITxCadW8AleMPImiWDtPoT49AG91PCFzmbuteIg8dlEdvHsEjDy7mua69/hRO7KviLyoxRm9TyyEPE3A27xy/O48W+ltuHxqbzxVwcS8vJJDO5C6uzzyyIW8rIArPKMSq7qvUjc7+OHXPLX2zjwp8V+8NMuIO2Y40Tx7eca8htDMvIoP8Lztmai8MX0OPEH8Pb25PKa8ycqsPDPiAr0XgnY8ycosvGEQqDzFhNU65Jg/vCTBkzxPsBW9ucC3u7EzGjzTMIo5O1imvCD/zbtONCe9", + } + ], + "model": "text-embedding-ada-002", + "usage": {"prompt_tokens": 8, "total_tokens": 8}, + }, + ], + "10590": [ + { + "content-type": "application/json", + "openai-model": "text-embedding-ada-002", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "19", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "3000", + "x-ratelimit-limit-tokens": "1000000", + "x-ratelimit-remaining-requests": "2999", + "x-ratelimit-remaining-tokens": "999998", + "x-ratelimit-reset-requests": "20ms", + "x-ratelimit-reset-tokens": "0s", + "x-request-id": "req_f6e05c43962fe31877c469d6e15861ee", + }, + 200, + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": "kpYxPGKcETvy0vc77wK5vCbHl7z8bR+8/z3evOW4lbzlAJw7Q/2vvNETfTxppdg8Gew9vCi/wzwBp6e7bXZBPDPCCj03suI6vMe8PMRpDjt9QXM7xGmOPIjcGjzSPJS8R6YrvFbBtzvkR3g7WboNvGa1ALzKCja8FhvVPJfPubxOZ+y8odHWvGw2D732e/M7RPXbvPhUMLzqOSQ8ZYzpu1Og9DwFMIo8n5EkO9UsbLyUHmq7iNyaPAthvruiOfa8246LvNPsOTuHvAE81SzsPEBM4Ls3smK8ZJS9vAppkruMPZA8Ljh+PCd3PTsvYZW7iJSUu2ALW7xyr0m8ch89PH/Sqbvqgaq70YPwO/mctjvUNMC8VVkYPJP+UDsBz5Q8JjcLPJAFeztiVAs9OYsfvB0Frby6pnm7tW1xO0Rlzzylor88NXKwvK6sMLviT8w6HJ2NPPFCa7wftdK8NOIjPKKpabsY9JG75biVPGvuCLv/zeo6jPWJPKFBSrxzP1a8vA/Du5g32TzEsRQ8yMoDvauLbTvpYZE8bea0u1e5Y7wDn1O7KN/cup8BmDtzF+m78JJFPFV5sTuU1uO80xSnPNGDcDyk8hm9IUYJvKSCprxpzUU8DKnEvDuj5DuAgk+8LfjLO794jLumUuU8WiItu7FdAD1LLw48EAo6vCVWerxq7d67ddCMu60cJD15mHc81rx4PAUwijwDV028ZtRvPA96rbwcnY08saUGvatDZ7zxQuu7x8nZPPTzOrwYhJ65QiUdvdCr3TzJWhA9dsi4PEemK7v262a87Sl8u7dGrrwcVYc8vzCGPDVyMLwmx5e6YlSLvDvL0TwvGY+7vMc8PBIif7wKsZi7lmcaPNPMIDvKmsI7ORusPJ0A7jr2e/O69PO6vBarYbwWY1u8CrEYO0Y+DL2JtC0873IsvCAdcjz/PV47zNp0u8AIGbwlVvo7cxfpu1YJvjxHpqs84DcHPXAnkbyvFNA8eXmIPJwIQjwv0Yg8LRhlvJF2GDvcriQ9Kf91uz90zbs1cjC/gGK2vC2IWLuEe6W8RYXoPLunozyBEtw8bXZBPBxVh7wOyoe7nXDhOs/TSruoCwm86RmLvLsXFzxsfhW8Sp8BPI+d27xSONW8vedVOgw50bw5Qxk8DzInvHwhWrsRKlM8LLBFOw96rTydAG68E3MDuoZLZLyxXQA8Gey9OxBSwLy2JpW8T5CDPGKckTvUxMy7CCDiPIBiNjyuZCo9LRjlvI7FyLzcHhg9EXLZu1raprzUDFM8vS/cPD8sx7ukOiA8QNxsvMapQDxLv5q8t7YhPHegyzyri208UWBCvEvnB7skplS8h7wBPGSUPTtgw1S8tk6CvBIif7wddSC6j3VuvJF2mLv7BQC9EAq6PLgeQTxRQKm76akXPIoczTyN7TW8rmSqO4n8szwsaD88pMosPUlW0bzTXC281rz4uwI3tDuiOXa84A8avZoQFrwFMAo9EAo6PGml2Dtdgvg6UGgWPVtCxjxP2Ak9wmjkPMDgKzzY3bu8NpJJPEqfgTwYhB67qks7PCXvBLtcGtm84MeTvCu4mbyayA88SVbRO+nRBDyx7Yw8vH82u5h/3zzXvSI9Sk59vDwUgrxgU+E77lITvIIThjtsDiK6zUsSvVrapjy23o67pKqTPLgeQbwiPrU7L9GIPNET/brVnF+8KW/pO/KziLt07/s7nQDuOsdZZrrD2YG7rUQRPIcECDvZtc67lj+tvE1H0zyF40Q6GPSROyHWFby8x7w83n7jvN7G6byj0oA7z9PKO/SDR7xy98+7AIcOvGcdoLtLdxQ8UxDouyH+Aj3cPrE7gcpVPJswLzxHzpg69GOuPLO9y7y2log56dGEODkbrLz63Og7PVyIOxp8yjxQ+KK8vA/DO1HQtTncZp68LNiyvEEFhDzA4Ku7O6PkvKy0BDxyr8m8Q7UpO0NFNjxY2fy7T5ADPG8m57yM9Ym8P7xTvE1H07suqPG5KnATu0KVEL2oU487kS4SPThrBjt/GrA8EAq6PJCehbwvYRU7KiiNO8rCrzwt0N67RoaSPIPrGDwk7lo7iCQhO5tYHDxmtYA7PMP9PEq+8DwLiSs8+eS8PJSudrx1YJk8OCMAvYQLMjxOZ+y8ofnDPIz1ibrNSxI8QJRmvKTyGb0NEeS86xE3PC44fjxAlOY6IB1yO9mNYbyhGV28MaHHuzlDmbw/LMc87XoAu20uO7zmsEE7SO6xPIA6STykyqy6+wUAvR4lxjsLQaU7xCGIOiMWyDxHzpi5ssWfPMSxlDxw34q6BTAKPUyXrTyz5bi8D6KaPIe8gTxUyYu8pDqgPO5Sk7wuOP48mDdZvGALWzvoYGc8LYhYvNcFqTzcPjG93K6kPPNDlTwYhB678rOIPIr03zycmM48cN8KPVD4Ijzvcqw8lh+UPBg8mDuSJr4777oyu147HDsV+zu97pqZvH/SKbtb0tI701ytu5VHgTuS3je8eXkIPL94DDt7AcG6rvQ2vKrbxzrxQus7CmkSvLgewbx64ac8CmmSPO7CBrxFhei7TW/AvAHPFLxRYEK8qMMCPFzy6zvWnYk75ZAoPI3ttbtPkAO9BAfzuywgOTs0Uhc8mjiDvDIJZ7uPdW68NSqqvPVbWrwsaL+8XWMJPALvrbtWwTc8qmtUvCVW+rwYrIu85QAcvFgqgbzuUpO8lUcBPIOjkjyFK0s7K0gmvKgLCb3sCeM83RZEPHu5urwwga68cbedvAyBVzpcGlk9TmdsPIr0X7wcnQ09fxqwO5XXDbpe85W6TtffvAuJqzwmNws69IPHO5rID7yODc88XPJrPKTKLDxe8xU8PDNxO7A0abzShJo7QpWQvN/vgLsV00684DeHPLTd5Dsq4AY8gRLcuZxQSLyNpS89UGgWuYJ6+7wqKA08YlQLPSuQLLzlAJw8h0wOvIGi6Dyu9LY88rOIO4gkITyaOAO9If6CPIQLsjw2ksm7PBSCvHxpYLz55Dy7FUPCvH5qijwdLRq8BlAjvOuhQzsC7y08hHslvJb3JryaOAO7ygq2PBisCzyEe6U6lq+gvDlDmbwF6IO8ARebu5g3WTz9tSW8kk4rvMza9LxY2fy8xGmOOrZum7x3WMU7bFaouxY77rz55Ly8H7VSvLtfnTwZ7D279aPgPE/3+DzeNl28U8jhPKkrorzzGyi873IsuOtZvbyrs1q8JDbhPGKckTtl/Fy8npB6Ox0FLbtMJ7o7RK1VPNm1zjxsNo+75mg7uoNbDDxJLuQ8oqlpPN6m0DzUDFO8ZYxpO2+287xXKVc7oqlpu/SrtDtKn4G7dO/7OgCHDj2AYjY7kAV7vC1A0jrp0YS7qiNOPBBSwDtjBLE8Big2PINbjDycwDs8To/ZPBarYbzJMqO7OCOAvAnZhTyxXYA8kk6rvKibFTwYrIs8gDrJvCZXJLyJ/LM8lY+HuwyBVzzShBq8W4rMvEQdybylWrm8C9ExvA0RZLxA3Oy7t0auO5P+0Lu7z5A8dvAlPGZk/LxoPTk70+w5vTQKkbzb1hG896SKunxp4DzQY9e6lj+tPHBvF7zS9A28sg0muTZq3LwnLze8DKlEPGbU7zwirqg83B4YPaSqE7lu3uA8au1ePLZOArzx+mQ7RRV1OrE1kzuSJr68Pgyuu1D4ojyboKI6OjvFPGEr9DtUyQs8fdF/PDrzPjr37BC8Ht2/O7E1E7yFK8s8Tf9Mu0P9rzvAUJ886amXvEku5DsHAEm75CgJOpTWY7ykqhO8xdGtPNKsh7rm2K48QNzsu/qUYjzbRgW9D+qgvKTKrLzswdw8q4ttOwtBpbspb+k8qXOovPtNhjxn1Zm71SxsO9KsB716mSG8jPWJO92Gt7zuwga8E7sJvLjWurvLKk+8NAoRuxisizufcQu8mH/fO+Vwj7wDV028bZ6uPE5n7Lv1o+A8zrOxPKTymTyk8pk8dO/7OkKVkLyomxU8mhAWPJWPhzpcGlk8hMMrPM5rKzxLd5S7AF53vOMn37lr7oi8lK72vKficTzvKia7WEnwumJUizkW82c8Y+QXvagLiTw/5MC7ZtTvupeHs7zeNl28dzBYu4CqvDzAmKU6uGZHPMCYJTyVRwE8+SzDuuf4x7uk8hm9fUHzun3RfzvDiP08ZtTvvBvkaTziB0Y8n0kePM27hbxG9oU7WXKHuuzB3DyboKK8X6M7OQ6CAbtIDsu7iNwaPAohjLvhnya7AaenvGnNxbykOiC8GjREPK3UnTuYx+W8cte2O0wHobrEIQi8Sp8BOvMbKLzYTS88i8xyvOHnrLxrDXi77FHpO4QLMjra1Wc8pDoguu2Zb7yIJKE8ZtTvu1QREj1sfpW7BpgpuN7u1rydAO47+7T7O3CXhLwYzKQ8Z42TvEFsebyqSzs5I15OuscRYLtIxsS7lY+Hu6hTj7s1Kqo8/UUyvANXzbzf74C82mV0PGldUrx3MNi6TN8zu7X9/bsLiSu6tAXSOwQHczs68747LfjLvF7LqLyWZxq8NAoROlI41bwxEbu8XYL4vDQKkTolVno7k27EPFoirTvzQxW6Z40TPL5PdbyC6m67DzKnPPZ787yt/Aq9tAVSvERlT7wzwgo73578O9FkgTsaNMQ8MlHtOww50bwUszW8VIGFOhfL+rw+xCe8q4ttvAaYKT1sfpU8jjW8O+kZC7tmtYA8M3qEuybHlzyMhZa8n5Gku+APGrzc9iq7Ac8UO71XSbx+aoq8xCEIvNpl9Dw5i5+7qbuuPFrapjyfSR68ch89vA96rbyIlBQ8i1z/PEcWH7x8aWA8rqywvNadibzOkxg8H/1YO5WPh7x2gLK70RP9u+dATrxGPow8eHhevPUTVLyHvAG82JW1vK+EwzwvYRU8hMOrOxWLyDtsNg88IdaVurqHijg50yW7gKo8va2Ml7ydKNs8D1qUvP39q7tk3EO7m1gcPCFGiTv55Ly7ORssvPekirs7E1i8KwCgO/gMqrrV5OW8t440OyZXpDwhjo+8BwDJvO6aGbz/zeq7rUSRvLaWiLuU1mM7eXmIuWumAr3BcDi8HFWHu6wb+rsd5ZO7VlFEPkg2uLwpJ2M6o9IAPX4ihDpwJ5E73578PC6o8btFhWi7AIeOOlVZmDm2lgi8EMIzvGfVmTtGPoy7j1VVvJ4phbyHTA694Z8mvLO9S7qr+2A8QiUdPHVgmbzZ/VS8qmtUOtWc3zvQq928zbsFOqlzqDyAqry8FJMcvF/rQbvqgao84A8auzN6hLyUjt07lUeBuyrghjxbisw8jc2cPGWMaTzoYGc7LGi/PFgqgbx2yDg7BeiDPM/TSrwwOSi8mjgDvA9alDxf68G8kJ4FvBAKOrsJsO482tVnuiGODzvRE/08nXDhumg9ubsBzxQ9RvaFvERlTzxQsJy7jK2DusEAxbyomxW7DsoHvfAi0rv4xKO7ssWfutAbUbz7TQY7vZ9PvHCXhDwf/di8mKfMvCHWlTtTEGg8vk/1Oyu4GTtjvCq8dWCZu9KEGjzlcI+8cJeEvEku5Lwkfmc8cz/WPClvaTtHzhg8/dU+vL2fT7zvciy7E7uJvLe2ITtnjRM8tN1kvJ1w4TwCNzS83B4YO65kKr03+mg8p+LxPOdATjxN/8w8D1oUPJavILxGhhK8uxeXu+WQKLwe3b85pepFvEjusbincn47rLSEPOK/PzxJ5l08KwCgO9PsuTwHcDw8ImaivGxWqLxphb87t460PHRAALyAgk+8EgLmvAnZBbtsxhs6mzAvvVzyazxPkAO86WGROxSTnDuvXFa7jc0cO50oW7tGPoy8rYwXvK3UHTyow4I71rz4uyJmIjyDoxK8T5ADPHrhp7wMOVE8m3i1O7YmFTxTyOG8M+H5vI2lr7tUEZK8D3qtvDHpzTvFGbS8/mVLvR9tzLxJLmQ8D+qgPKdyfrzY3Tu8IR4cPSefqrwFMAq7BcCWvNjdO77KCra7sDTpOQzxSryqI0677poZPN7GaTxakqA5MPGhvODHE7teqw88A1dNu47FyLxo9bK6wJilPPyVDLwyUe05vedVvAUwijzYlbU8x8lZPdyupLytjJc81VTZugD3gbpUyYu7aaVYPJjH5Ts8M3E87gqNvH6yELxdgni7FLM1PIhsJ7svGY+8DcndOXOHXLzkKAm8To/ZvEku5Dt90X+8r6TcPJo4gzvdFsQ7D+ogvNiVNTvlcA88jsVIvFI41TwM8cq8+pRiPEKVkDsIkNU8BJf/u+Mn3zz/heS7b7bzvOxRaTv+9de6WElwOuRH+LtsVii9i1z/O//NajzXLRa74gfGvOsxULtTyGE8EgLmvJQeajxdgng8Kf/1u+kZCz1/0im8nJjOOyqYAD0yUe27lvemO3IfvborSCa8OLOMuy8ZDz11GBO7+00GvZF2GDwiPjW8g1uMuxMDEDzk4II8w9kBvcjKAzzKeqm8IY4PPEemq7xsVig9nXDhPPMbKDp5wQ69YpwRPaI59ru1bfG7JVb6OoZLZLzD+PA74Xe5PJrIj7yyDaY7f9KpOtGD8DxVMas5m+iovBO7ibzluJU7APcBPCgHyjwBzxS7H7VSOfjEI7xTyGG7HFUHOph/Xz0Tu4k7yVoQvHRfbzwruBk8D3qtvN5+473ShBq9zbuFuyH+Aj0bVF28w4j9PEJto7z1E9S7ZtRvuwZQIz2FK8u8GPSRvNflj7yboCI8neDUvPTzurpasjk8s71LOtI8lLyfcQs9uNY6PIpkU7z9/Ss7r6RcPPdchDs8M/E7GKwLvX/SKT0BX6E8OCOAPOIHRjxIxsS7zbuFPNd1HL29V8m7aRXMu4ITBr2lWjk7qpNBPAdwvLxTyOE7k25EPJcXwLu/eIy8M+H5OzlDGby2bpu5B0jPPJjvUroL0bG86dGEvHWIhjm7F5e6RK3Vuzey4jz6lOK7rRwkPCywxTxb+r86wUjLO9KEmrx7Scc7/G2fu2Zk/DxmtYA76skwPKuLbbwKaZK7Q7WpPMP48LuJRDo8AsdAPFXpJL3vujI9BXgQvUnmXTyJRDq9wFCfOlfh0DweJca8ssUfvH6yELtLLw481lUDOqHR1jzsUek6MIEuvMVBobswOSg80KtdO+bYrroqmAA6/N0SPe4KDbzyswi7sV0AvHOHXLxvtnO8UPgiu4PrmDuUrva7TW9AvIcEiL3x+mQ84A+auXFHKrzdzj07m1icvNtGhbcOyge9vVdJO8OIfTsLGbi84A8aunCXhLyurDC78tL3vGumAr2kOiA8LCC5PEqfATzlkCg8yVqQPEq+cDvvcqw8qmvUO1e54zpdYwk8z0O+O8+LRDwEB/O60vSNu2bUbzwF6AM7XWOJPA3J3TyWP627oUFKvHyRzbtLLw48fGlgvCL2Lj1eqw+75CgJvYTDq7utjJc7G3R2PD7EJzx+shC7XYL4O2j1sjpb0tK74ydfPIr03zuncn68vMe8N1m6jbx5wQ69dxA/Oxo0RDx6URs8PsSnvJ4phTxQsJw8KE9QO7HtDL2XX8a7CbDuO2ALW7wqmIA708wgOpIGJb3xst68H41lvEsvDjsR4sw75tiuPNTETLwNyd27VemkO2bUb7yNzZw8ZCTKPKprVDzZbUi84A8aPYZL5Dw+VLS6Q0W2vI2lL7y6h4o7KW9pOnegSzzXLRa6G3R2vA+iGjzuwoY8gaJoPJTWYzhtLju8rLQEPLZOAj3Z/VS7B3A8vA4SDr2LXP+7Hk2zug8ypzxOZ+y67poZvcY5zTujYg09sg2mPInURjrJ6pw62Y1hPFjZ/LsVQ8I8j1VVvLe2obzi31i8ivTfPKTKrLsfjeU8PlQ0PJQeajvP+zc9keYLvPu0+zuXF0C8lUcBPcWJJzxP2Ak7bMYbvPBKPztl/Nw8I87Bu8P4cLwMqcS4CJBVPAFfIby7pyM9fvqWO4m0LbxrDfg60WQBvODHkzvrWb06DVlqPJJOq7wU26K88muCPEpOfbyWZ5q8je01vPekCrxVoZ474H+NvL1XyTwyCee6jRUjvHrhJzs+fKE850DOPKAhsTssILm8hAuyvC+pmzxn/Qa87poZvEsvDrzEIYg8PewUPAUwiryd4NS8H21MPFOg9Lyri+27ztuevCCt/jV07/s8ZrWAvOJPzDzM2vS8cUcqvF1jCbw3inW7pKoTvf21pbzvciy9", + } + ], + "model": "text-embedding-ada-002", + "usage": {"prompt_tokens": 1, "total_tokens": 1}, + }, + ], + "You are a generator of quiz questions for a seminar. Use the following pieces of retrieved context to generate 5 multiple choice questions (A,B,C,D) on the subject matter. Use a three sentence maximum and keep the answer concise. Render the output as HTML\n\nWhat is 2 + 4?": [ + { + "content-type": "application/json", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "4977", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "10000", + "x-ratelimit-limit-tokens": "200000", + "x-ratelimit-remaining-requests": "9999", + "x-ratelimit-remaining-tokens": "199912", + "x-ratelimit-reset-requests": "8.64s", + "x-ratelimit-reset-tokens": "26ms", + "x-request-id": "req_942efbd5ead41ff093d2f8bfb7833fcb", + }, + 200, + { + "id": "chatcmpl-A0tPUPHiRvco7ONEyOMrW88Qk95vl", + "object": "chat.completion", + "created": 1724776360, + "model": "gpt-3.5-turbo-0125", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "```html\n\n\n\n Math Quiz\n\n\n

Math Quiz Questions

\n
    \n
  1. What is the result of 5 + 3?
  2. \n
      \n
    • A) 7
    • \n
    • B) 8
    • \n
    • C) 9
    • \n
    • D) 10
    • \n
    \n
  3. What is the product of 6 x 7?
  4. \n
      \n
    • A) 36
    • \n
    • B) 42
    • \n
    • C) 48
    • \n
    • D) 56
    • \n
    \n
  5. What is the square root of 64?
  6. \n
      \n
    • A) 6
    • \n
    • B) 7
    • \n
    • C) 8
    • \n
    • D) 9
    • \n
    \n
  7. What is the result of 12 / 4?
  8. \n
      \n
    • A) 2
    • \n
    • B) 3
    • \n
    • C) 4
    • \n
    • D) 5
    • \n
    \n
  9. What is the sum of 15 + 9?
  10. \n
      \n
    • A) 22
    • \n
    • B) 23
    • \n
    • C) 24
    • \n
    • D) 25
    • \n
    \n
\n\n\n```", + "refusal": None, + }, + "logprobs": None, + "finish_reason": "stop", + } + ], + "usage": {"prompt_tokens": 73, "completion_tokens": 375, "total_tokens": 448}, + "system_fingerprint": None, + }, + ], "You are a world class algorithm for extracting information in structured formats.": [ { "content-type": "application/json", diff --git a/tests/mlmodel_langchain/test_chain.py b/tests/mlmodel_langchain/test_chain.py index 5226e60e6d..6d8b2943d5 100644 --- a/tests/mlmodel_langchain/test_chain.py +++ b/tests/mlmodel_langchain/test_chain.py @@ -16,14 +16,15 @@ import uuid import langchain +import langchain_core import openai import pytest +from langchain.chains.combine_documents import create_stuff_documents_chain from langchain.chains.openai_functions import ( create_structured_output_chain, create_structured_output_runnable, ) -from langchain.prompts import ChatPromptTemplate -from langchain.schema import BaseOutputParser +from langchain_community.vectorstores.faiss import FAISS from mock import patch from testing_support.fixtures import reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import ( # noqa: F401 @@ -346,6 +347,219 @@ ), ] +recorded_events_retrieval_chain_response = [ + [ + {"type": "LlmEmbedding"}, + { + "id": None, + "span_id": None, + "trace_id": "trace-id", + "request.model": "text-embedding-ada-002", + "request_id": None, + "duration": None, + "response.model": "text-embedding-ada-002", + "response.organization": "new-relic-nkmd8b", + "response.headers.llmVersion": "2020-10-01", + "response.headers.ratelimitLimitRequests": 3000, + "response.headers.ratelimitLimitTokens": 1000000, + "response.headers.ratelimitResetTokens": "0s", + "response.headers.ratelimitResetRequests": "20ms", + "response.headers.ratelimitRemainingTokens": 999992, + "response.headers.ratelimitRemainingRequests": 2999, + "vendor": "openai", + "ingest_source": "Python", + "input": "[[3923, 374, 220, 17, 489, 220, 19, 30]]", + }, + ], + [ + {"type": "LlmEmbedding"}, + { + "id": None, + "span_id": None, + "trace_id": "trace-id", + "request.model": "text-embedding-ada-002", + "request_id": None, + "duration": None, + "response.model": "text-embedding-ada-002", + "response.organization": "new-relic-nkmd8b", + "response.headers.llmVersion": "2020-10-01", + "response.headers.ratelimitLimitRequests": 3000, + "response.headers.ratelimitLimitTokens": 1000000, + "response.headers.ratelimitResetTokens": "0s", + "response.headers.ratelimitResetRequests": "20ms", + "response.headers.ratelimitRemainingTokens": 999998, + "response.headers.ratelimitRemainingRequests": 2999, + "vendor": "openai", + "ingest_source": "Python", + "input": "[[10590]]", + }, + ], + [ + {"type": "LlmVectorSearch"}, + { + "request.k": 4, + "duration": None, + "response.number_of_documents": 1, + "span_id": None, + "trace_id": "trace-id", + "id": None, + "vendor": "langchain", + "ingest_source": "Python", + "request.query": "math", + }, + ], + [ + {"type": "LlmVectorSearchResult"}, + { + "id": None, + "search_id": None, + "sequence": 0, + "span_id": None, + "trace_id": "trace-id", + "vendor": "langchain", + "ingest_source": "Python", + "page_content": "What is 2 + 4?", + }, + ], + [ + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "completion_id": None, + "sequence": 1, + "vendor": "langchain", + "ingest_source": "Python", + "is_response": True, + "virtual_llm": True, + "content": "page_content='What is 2 + 4?'", + }, + ], + [ + {"type": "LlmChatCompletionSummary"}, + { + "id": None, + "span_id": None, + "trace_id": "trace-id", + "request.model": "gpt-3.5-turbo", + "request.temperature": 0.7, + "vendor": "openai", + "ingest_source": "Python", + "request_id": None, + "duration": None, + "response.model": "gpt-3.5-turbo-0125", + "response.organization": "new-relic-nkmd8b", + "response.choices.finish_reason": "stop", + "response.headers.llmVersion": "2020-10-01", + "response.headers.ratelimitLimitRequests": 10000, + "response.headers.ratelimitLimitTokens": 200000, + "response.headers.ratelimitResetTokens": "26ms", + "response.headers.ratelimitResetRequests": "8.64s", + "response.headers.ratelimitRemainingTokens": 199912, + "response.headers.ratelimitRemainingRequests": 9999, + "response.number_of_messages": 3, + }, + ], + [ + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "role": "system", + "completion_id": None, + "sequence": 0, + "response.model": "gpt-3.5-turbo-0125", + "vendor": "openai", + "ingest_source": "Python", + "content": "You are a generator of quiz questions for a seminar. Use the following pieces of retrieved context to generate 5 multiple choice questions (A,B,C,D) on the subject matter. Use a three sentence maximum and keep the answer concise. Render the output as HTML\n\nWhat is 2 + 4?", + }, + ], + [ + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "role": "user", + "completion_id": None, + "sequence": 1, + "response.model": "gpt-3.5-turbo-0125", + "vendor": "openai", + "ingest_source": "Python", + "content": "math", + }, + ], + [ + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "role": "assistant", + "completion_id": None, + "sequence": 2, + "response.model": "gpt-3.5-turbo-0125", + "vendor": "openai", + "ingest_source": "Python", + "is_response": True, + "content": "```html\n\n\n\n Math Quiz\n\n\n

Math Quiz Questions

\n
    \n
  1. What is the result of 5 + 3?
  2. \n
      \n
    • A) 7
    • \n
    • B) 8
    • \n
    • C) 9
    • \n
    • D) 10
    • \n
    \n
  3. What is the product of 6 x 7?
  4. \n
      \n
    • A) 36
    • \n
    • B) 42
    • \n
    • C) 48
    • \n
    • D) 56
    • \n
    \n
  5. What is the square root of 64?
  6. \n
      \n
    • A) 6
    • \n
    • B) 7
    • \n
    • C) 8
    • \n
    • D) 9
    • \n
    \n
  7. What is the result of 12 / 4?
  8. \n
      \n
    • A) 2
    • \n
    • B) 3
    • \n
    • C) 4
    • \n
    • D) 5
    • \n
    \n
  9. What is the sum of 15 + 9?
  10. \n
      \n
    • A) 22
    • \n
    • B) 23
    • \n
    • C) 24
    • \n
    • D) 25
    • \n
    \n
\n\n\n```", + }, + ], + [ + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "completion_id": None, + "sequence": 0, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + "content": "{'input': 'math', 'context': [Document(metadata={}, page_content='What is 2 + 4?')]}", + }, + ], + [ + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "completion_id": None, + "sequence": 1, + "vendor": "langchain", + "ingest_source": "Python", + "is_response": True, + "virtual_llm": True, + "content": "`", + }, + ], + [ + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "completion_id": None, + "sequence": 1, + "vendor": "langchain", + "ingest_source": "Python", + "is_response": True, + "virtual_llm": True, + "content": "{'input': 'math', 'context': [Document(metadata={}, page_content='What is 2 + 4?')], 'answer': '```html\\n\\n\\n\\n Math Quiz\\n\\n\\n

Math Quiz Questions

\\n
    \\n
  1. What is the result of 5 + 3?
  2. \\n
      \\n
    • A) 7
    • \\n
    • B) 8
    • \\n
    • C) 9
    • \\n
    • D) 10
    • \\n
    \\n
  3. What is the product of 6 x 7?
  4. \\n
      \\n
    • A) 36
    • \\n
    • B) 42
    • \\n
    • C) 48
    • \\n
    • D) 56
    • \\n
    \\n
  5. What is the square root of 64?
  6. \\n
      \\n
    • A) 6
    • \\n
    • B) 7
    • \\n
    • C) 8
    • \\n
    • D) 9
    • \\n
    \\n
  7. What is the result of 12 / 4?
  8. \\n
      \\n
    • A) 2
    • \\n
    • B) 3
    • \\n
    • C) 4
    • \\n
    • D) 5
    • \\n
    \\n
  9. What is the sum of 15 + 9?
  10. \\n
      \\n
    • A) 22
    • \\n
    • B) 23
    • \\n
    • C) 24
    • \\n
    • D) 25
    • \\n
    \\n
\\n\\n\\n```'}", + }, + ], +] + chat_completion_recorded_events_list_response = [ ( {"type": "LlmChatCompletionSummary"}, @@ -499,7 +713,7 @@ def test_langchain_chain_list_response(set_trace_info, comma_separated_list_outp ONLY return a comma separated list, and nothing more.""" human_template = "{text}" - chat_prompt = ChatPromptTemplate.from_messages( + chat_prompt = langchain_core.prompts.ChatPromptTemplate.from_messages( [ ("system", template), ("human", human_template), @@ -1026,7 +1240,7 @@ def test_async_langchain_chain_list_response( ONLY return a comma separated list, and nothing more.""" human_template = "{text}" - chat_prompt = ChatPromptTemplate.from_messages( + chat_prompt = langchain_core.prompts.ChatPromptTemplate.from_messages( [ ("system", template), ("human", human_template), @@ -1071,7 +1285,7 @@ def test_async_langchain_chain_list_response_no_content( ONLY return a comma separated list, and nothing more.""" human_template = "{text}" - chat_prompt = ChatPromptTemplate.from_messages( + chat_prompt = langchain_core.prompts.ChatPromptTemplate.from_messages( [ ("system", template), ("human", human_template), @@ -1579,6 +1793,35 @@ def _test(): _test() +@reset_core_stats_engine() +@validate_custom_events(recorded_events_retrieval_chain_response) +@validate_custom_event_count(count=17) +@validate_transaction_metrics( + name="test_chain:test_retrieval_chains", + scoped_metrics=[("Llm/chain/LangChain/invoke", 3)], + rollup_metrics=[("Llm/chain/LangChain/invoke", 3)], + custom_metrics=[ + (f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1), + ], + background_task=True, +) +@background_task() +def test_retrieval_chains(set_trace_info, retrieval_chain_prompt, embedding_openai_client, chat_openai_client): + set_trace_info() + documents = [langchain_core.documents.Document(page_content="What is 2 + 4?")] + vectordb = FAISS.from_documents(documents=documents, embedding=embedding_openai_client) + retriever = vectordb.as_retriever() + question_answer_chain = create_stuff_documents_chain( + llm=chat_openai_client, + prompt=retrieval_chain_prompt, + ) + + rag_chain = langchain.chains.create_retrieval_chain(retriever, question_answer_chain) + response = rag_chain.invoke({"input": "math"}) + + assert response + + @pytest.fixture def json_schema(): return { @@ -1598,9 +1841,29 @@ def json_schema(): } +@pytest.fixture +def retrieval_chain_prompt(): + return langchain_core.prompts.ChatPromptTemplate.from_messages( + [ + ( + "system", + ( + "You are a generator of quiz questions for a seminar. " + "Use the following pieces of retrieved context to generate " + "5 multiple choice questions (A,B,C,D) on the subject matter. Use a three sentence " + "maximum and keep the answer concise. Render the output as HTML" + "\n\n" + "{context}" + ), + ), + ("human", "{input}"), + ] + ) + + @pytest.fixture def prompt(): - return ChatPromptTemplate.from_messages( + return langchain_core.prompts.ChatPromptTemplate.from_messages( [ ( "system", @@ -1617,7 +1880,7 @@ def prompt(): @pytest.fixture def prompt_openai_error(): - return ChatPromptTemplate.from_messages( + return langchain_core.prompts.ChatPromptTemplate.from_messages( [ ( "system", @@ -1634,7 +1897,7 @@ def prompt_openai_error(): @pytest.fixture def comma_separated_list_output_parser(): - class _CommaSeparatedListOutputParser(BaseOutputParser): + class _CommaSeparatedListOutputParser(langchain.schema.BaseOutputParser): """Parse the output of an LLM call to a comma-separated list.""" def parse(self, text): diff --git a/tests/mlmodel_langchain/test_tool.py b/tests/mlmodel_langchain/test_tool.py index ba187784e1..a153c8200c 100644 --- a/tests/mlmodel_langchain/test_tool.py +++ b/tests/mlmodel_langchain/test_tool.py @@ -17,7 +17,7 @@ import uuid import langchain -import pydantic +import pydantic_core import pytest from langchain.tools import tool from mock import patch @@ -80,7 +80,7 @@ def events_sans_content(event): "run_id": None, "output": "Python Agent", "name": "_single_arg_tool", - "description": "_single_arg_tool(query: str) - A test tool that returns query string", + "description": "A test tool that returns query string", "span_id": None, "trace_id": "trace-id", "input": "{'query': 'Python Agent'}", @@ -178,7 +178,7 @@ def test_langchain_single_arg_tool_async_no_content(set_trace_info, single_arg_t "run_id": None, "output": "81", "name": "_multi_arg_tool", - "description": "_multi_arg_tool(first_num: int, second_num: int) - A test tool that adds two integers together", + "description": "A test tool that adds two integers together", "span_id": None, "trace_id": "trace-id", "input": "{'first_num': 53, 'second_num': 28}", @@ -250,7 +250,7 @@ def test_langchain_multi_arg_tool_async(set_trace_info, multi_arg_tool, loop): "id": None, # UUID that varies with each run "run_id": None, # No run ID created on error "name": "_multi_arg_tool", - "description": "_multi_arg_tool(first_num: int, second_num: int) - A test tool that adds two integers together", + "description": "A test tool that adds two integers together", "span_id": None, "trace_id": "trace-id", "input": "{'first_num': 53}", @@ -269,7 +269,7 @@ def test_langchain_multi_arg_tool_async(set_trace_info, multi_arg_tool, loop): @reset_core_stats_engine() @validate_transaction_error_event_count(1) @validate_error_trace_attributes( - callable_name(pydantic.v1.error_wrappers.ValidationError), + callable_name(pydantic_core._pydantic_core.ValidationError), exact_attrs={ "agent": {}, "intrinsic": {}, @@ -289,7 +289,7 @@ def test_langchain_multi_arg_tool_async(set_trace_info, multi_arg_tool, loop): ) @background_task() def test_langchain_error_in_run(set_trace_info, multi_arg_tool): - with pytest.raises(pydantic.v1.error_wrappers.ValidationError): + with pytest.raises(pydantic_core._pydantic_core.ValidationError): set_trace_info() # Only one argument is provided while the tool expects two to create an error multi_arg_tool.run( @@ -301,7 +301,7 @@ def test_langchain_error_in_run(set_trace_info, multi_arg_tool): @disabled_ai_monitoring_record_content_settings @validate_transaction_error_event_count(1) @validate_error_trace_attributes( - callable_name(pydantic.v1.error_wrappers.ValidationError), + callable_name(pydantic_core._pydantic_core.ValidationError), exact_attrs={ "agent": {}, "intrinsic": {}, @@ -321,7 +321,7 @@ def test_langchain_error_in_run(set_trace_info, multi_arg_tool): ) @background_task() def test_langchain_error_in_run_no_content(set_trace_info, multi_arg_tool): - with pytest.raises(pydantic.v1.error_wrappers.ValidationError): + with pytest.raises(pydantic_core._pydantic_core.ValidationError): set_trace_info() # Only one argument is provided while the tool expects two to create an error multi_arg_tool.run( @@ -332,7 +332,7 @@ def test_langchain_error_in_run_no_content(set_trace_info, multi_arg_tool): @reset_core_stats_engine() @validate_transaction_error_event_count(1) @validate_error_trace_attributes( - callable_name(pydantic.v1.error_wrappers.ValidationError), + callable_name(pydantic_core._pydantic_core.ValidationError), exact_attrs={ "agent": {}, "intrinsic": {}, @@ -352,7 +352,7 @@ def test_langchain_error_in_run_no_content(set_trace_info, multi_arg_tool): ) @background_task() def test_langchain_error_in_run_async(set_trace_info, multi_arg_tool, loop): - with pytest.raises(pydantic.v1.error_wrappers.ValidationError): + with pytest.raises(pydantic_core._pydantic_core.ValidationError): set_trace_info() # Only one argument is provided while the tool expects two to create an error loop.run_until_complete( @@ -366,7 +366,7 @@ def test_langchain_error_in_run_async(set_trace_info, multi_arg_tool, loop): @disabled_ai_monitoring_record_content_settings @validate_transaction_error_event_count(1) @validate_error_trace_attributes( - callable_name(pydantic.v1.error_wrappers.ValidationError), + callable_name(pydantic_core._pydantic_core.ValidationError), exact_attrs={ "agent": {}, "intrinsic": {}, @@ -386,7 +386,7 @@ def test_langchain_error_in_run_async(set_trace_info, multi_arg_tool, loop): ) @background_task() def test_langchain_error_in_run_async_no_content(set_trace_info, multi_arg_tool, loop): - with pytest.raises(pydantic.v1.error_wrappers.ValidationError): + with pytest.raises(pydantic_core._pydantic_core.ValidationError): set_trace_info() # Only one argument is provided while the tool expects two to create an error loop.run_until_complete( diff --git a/tox.ini b/tox.ini index 98f528d051..3ab7c0562f 100644 --- a/tox.ini +++ b/tox.ini @@ -53,6 +53,7 @@ envlist = kafka-messagebroker_confluentkafka-py39-confluentkafka{0108}, kafka-messagebroker_kafkapython-py38-kafkapython{020001,020000}, kafka-messagebroker_kafkapython-{py37,py38,pypy310}-kafkapythonlatest, + kafka-messagebroker_kafkapython-{py38,py39,py310,py311,py312,pypy310}-kafkapythonnglatest, memcached-datastore_bmemcached-{py37,py38,py39,py310,py311,py312}-memcached030, memcached-datastore_aiomcache-{py38,py39,py310,py311,py312}-memcached030, memcached-datastore_memcache-{py37,py38,py39,py310,py311,py312,pypy310}-memcached01, @@ -111,7 +112,7 @@ envlist = python-external_httpx-{py37,py38,py39,py310,py311,py312}, python-external_requests-{py37,py38,py39,py310,py311,py312,pypy310}, python-external_urllib3-{py37,py38,py39,py310,py311,py312,pypy310}-urllib3latest, - python-external_urllib3-py37-urllib3{0109}, + python-external_urllib3-{py37,py312,pypy310}-urllib30126, python-framework_aiohttp-{py37,py38,py39,py310,py311,py312,pypy310}-aiohttp03, python-framework_ariadne-{py37,py38,py39,py310,py311,py312}-ariadnelatest, python-framework_ariadne-py37-ariadne{0011,0012,0013}, @@ -127,7 +128,8 @@ envlist = python-framework_flask-{py38,py39,py310,py311,py312,pypy310}-flask{020205,latest,master}, python-framework_graphene-{py37,py38,py39,py310,py311,py312}-graphenelatest, python-framework_graphql-{py37,py38,py39,py310,py311,py312,pypy310}-graphql03, - python-framework_graphql-{py37,py38,py39,py310,py311,py312,pypy310}-graphql{latest,master}, + ; Remove graphqlmaster tests. + python-framework_graphql-{py37,py38,py39,py310,py311,py312,pypy310}-graphql{latest}, python-framework_graphql-py37-graphql{0301,0302}, python-framework_pyramid-{py37,py38,py39,py310,py311,py312,pypy310}-Pyramidlatest, python-framework_pyramid-{py37,py38,py39,py310,py311,py312,pypy310}-Pyramid0110-cornice, @@ -138,12 +140,13 @@ envlist = python-framework_starlette-{py37,py38}-starlette002001, python-framework_strawberry-{py38,py39,py310,py311,py312}-strawberry02352, python-framework_strawberry-{py37,py38,py39,py310,py311,py312}-strawberrylatest, - python-framework_tornado-{py38,py39,py310,py311,py312}-tornado{latest,master}, + python-framework_tornado-{py38,py39,py310,py311,py312}-tornadolatest, + python-framework_tornado-{py39,py310,py311,py312}-tornadomaster, python-logger_logging-{py37,py38,py39,py310,py311,py312,pypy310}, python-logger_loguru-{py37,py38,py39,py310,py311,py312,pypy310}-logurulatest, python-logger_loguru-py39-loguru{06,05}, python-logger_structlog-{py37,py38,py39,py310,py311,py312,pypy310}-structloglatest, - python-mlmodel_langchain-{py38,py39,py310,py311,py312}, + python-mlmodel_langchain-{py39,py310,py311,py312}, python-mlmodel_openai-openai0-{py37,py38,py39,py310,py311,py312}, python-mlmodel_openai-openai107-py312, python-mlmodel_openai-openailatest-{py37,py38,py39,py310,py311,py312}, @@ -277,7 +280,7 @@ deps = external_httpx: httpx<0.17 external_requests: urllib3 external_requests: requests - external_urllib3-urllib30109: urllib3<1.10 + external_urllib3-urllib30126: urllib3<1.27 external_urllib3-urllib3latest: urllib3 framework_aiohttp-aiohttp03: aiohttp<4 framework_aiohttp-aiohttp030900rc0: aiohttp==3.9.0rc0 @@ -361,16 +364,16 @@ deps = ; Required for openai testing mlmodel_openai: protobuf ; Pinning to 0.1.16 while adding support for with_structured_output in chain tests - mlmodel_langchain: langchain<0.1.17 + mlmodel_langchain: langchain mlmodel_langchain: langchain-community - mlmodel_langchain: openai[datalib] + mlmodel_langchain: langchain-core + mlmodel_langchain: langchain-openai ; Required for langchain testing mlmodel_langchain: pypdf mlmodel_langchain: tiktoken mlmodel_langchain: faiss-cpu mlmodel_langchain: mock mlmodel_langchain: asyncio - mlmodel_langchain: langchain-openai logger_loguru-logurulatest: loguru logger_loguru-loguru06: loguru<0.7 logger_loguru-loguru05: loguru<0.6 @@ -381,6 +384,7 @@ deps = messagebroker_confluentkafka-confluentkafka0108: confluent-kafka<1.9 messagebroker_confluentkafka-confluentkafka0107: confluent-kafka<1.8 messagebroker_confluentkafka-confluentkafka0106: confluent-kafka<1.7 + messagebroker_kafkapython-kafkapythonnglatest: kafka-python-ng messagebroker_kafkapython-kafkapythonlatest: kafka-python messagebroker_kafkapython-kafkapython020001: kafka-python<2.0.2 messagebroker_kafkapython-kafkapython020000: kafka-python<2.0.1