Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(typing): update types to be compatible with latest mypy (backport #4234) #4265

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
615 changes: 615 additions & 0 deletions ddtrace/debugging/_debugger.py

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion ddtrace/internal/atexit.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def register(
return func

def unregister(func):
# type: (typing.Callable[..., None]) -> None
# type: (typing.Callable[..., typing.Any]) -> None
"""
Unregister an exit function which was previously registered using
atexit.register.
Expand Down
2 changes: 1 addition & 1 deletion ddtrace/internal/runtime/runtime_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def flush(self):
log.debug("Writing metric %s:%s", key, value)
self._dogstatsd_client.distribution(key, value)

def _stop_service(self): # type: ignore[override]
def _stop_service(self):
# type: (...) -> None
# De-register span hook
super(RuntimeWorker, self)._stop_service()
Expand Down
48 changes: 45 additions & 3 deletions ddtrace/internal/utils/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,8 @@
miss = object()

T = TypeVar("T")
S = TypeVar("S")
F = Callable[[T], S]
M = Callable[[Any, T], S]
F = Callable[[T], Any]
M = Callable[[Any, T], Any]


def cached(maxsize=256):
Expand All @@ -26,6 +25,7 @@ def cached(maxsize=256):
the requested size is O(log(size)).
"""

<<<<<<< HEAD
def cached_wrapper(f):
# type: (F) -> F
cache = {} # type: Dict[Any, Tuple[Any, int]]
Expand All @@ -38,6 +38,33 @@ def cached_f(key):
del cache[h]

_ = cache.get(key, miss)
=======
def __init__(self, maxsize=256):
# type: (int) -> None
self.maxsize = maxsize
self.lock = RLock()

def get(self, key, f): # type: ignore[override]
# type: (T, F) -> Any
"""Get a value from the cache.

If the value with the given key is not in the cache, the expensive
function ``f`` is called on the key to generate it. The return value is
then stored in the cache and returned to the caller.
"""
if len(self) >= self.maxsize:
for _, h in zip(range(self.maxsize >> 1), sorted(self, key=lambda h: self[h][1])):
del self[h]

_ = super(LFUCache, self).get(key, miss)
if _ is not miss:
value, count = _
self[key] = (value, count + 1)
return value

with self.lock:
_ = super(LFUCache, self).get(key, miss)
>>>>>>> 5140de46 (fix(typing): update types to be compatible with latest mypy (#4234))
if _ is not miss:
value, count = _
cache[key] = (value, count + 1)
Expand All @@ -54,7 +81,22 @@ def cached_f(key):

cache[key] = (result, 1)

<<<<<<< HEAD
return result
=======

def cached(maxsize=256):
# type: (int) -> Callable[[F], F]
"""Decorator for memoizing functions of a single argument (LFU policy)."""

def cached_wrapper(f):
# type: (F) -> F
cache = LFUCache(maxsize)

def cached_f(key):
# type: (T) -> Any
return cache.get(key, f)
>>>>>>> 5140de46 (fix(typing): update types to be compatible with latest mypy (#4234))

cached_f.invalidate = cache.clear # type: ignore[attr-defined]

Expand Down
2 changes: 1 addition & 1 deletion ddtrace/internal/writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -549,7 +549,7 @@ def flush_queue(self, raise_exc=False):
def periodic(self):
self.flush_queue(raise_exc=False)

def _stop_service( # type: ignore[override]
def _stop_service(
self,
timeout=None, # type: Optional[float]
):
Expand Down
4 changes: 4 additions & 0 deletions ddtrace/profiling/collector/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,5 +77,9 @@ def _create_capture_sampler(collector):

@attr.s
class CaptureSamplerCollector(Collector):
<<<<<<< HEAD
capture_pct = attr.ib(factory=attr_utils.from_env("DD_PROFILING_CAPTURE_PCT", 2.0, float))
=======
capture_pct = attr.ib(factory=attr_utils.from_env("DD_PROFILING_CAPTURE_PCT", 1.0, float)) # type: ignore[arg-type]
>>>>>>> 5140de46 (fix(typing): update types to be compatible with latest mypy (#4234))
_capture_sampler = attr.ib(default=attr.Factory(_create_capture_sampler, takes_self=True), init=False, repr=False)
6 changes: 3 additions & 3 deletions ddtrace/profiling/collector/_lock.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def __get__(self, instance, owner=None):
class LockCollector(collector.CaptureSamplerCollector):
"""Record lock usage."""

nframes = attr.ib(factory=attr_utils.from_env("DD_PROFILING_MAX_FRAMES", 64, int))
nframes = attr.ib(factory=attr_utils.from_env("DD_PROFILING_MAX_FRAMES", 64, int)) # type: ignore[arg-type]
endpoint_collection_enabled = attr.ib(
factory=attr_utils.from_env("DD_PROFILING_ENDPOINT_COLLECTION_ENABLED", True, formats.asbool)
)
Expand All @@ -197,13 +197,13 @@ def _set_original(
# type: (...) -> None
pass

def _start_service(self): # type: ignore[override]
def _start_service(self):
# type: (...) -> None
"""Start collecting lock usage."""
self.patch()
super(LockCollector, self)._start_service()

def _stop_service(self): # type: ignore[override]
def _stop_service(self):
# type: (...) -> None
"""Stop collecting lock usage."""
super(LockCollector, self)._stop_service()
Expand Down
50 changes: 50 additions & 0 deletions ddtrace/profiling/collector/asyncio.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import typing

import attr

from . import _lock
from .. import collector
from .. import event


@event.event_class
class AsyncioLockAcquireEvent(_lock.LockAcquireEvent):
"""An asyncio.Lock has been acquired."""


@event.event_class
class AsyncioLockReleaseEvent(_lock.LockReleaseEvent):
"""An asyncio.Lock has been released."""


class _ProfiledAsyncioLock(_lock._ProfiledLock):

ACQUIRE_EVENT_CLASS = AsyncioLockAcquireEvent
RELEASE_EVENT_CLASS = AsyncioLockReleaseEvent


@attr.s
class AsyncioLockCollector(_lock.LockCollector):
"""Record asyncio.Lock usage."""

PROFILED_LOCK_CLASS = _ProfiledAsyncioLock

def _start_service(self):
# type: (...) -> None
"""Start collecting lock usage."""
try:
import asyncio
except ImportError as e:
raise collector.CollectorUnavailable(e)
self._asyncio_module = asyncio
return super(AsyncioLockCollector, self)._start_service()

def _get_original(self):
# type: (...) -> typing.Any
return self._asyncio_module.Lock

def _set_original(
self, value # type: typing.Any
):
# type: (...) -> None
self._asyncio_module.Lock = value # type: ignore[misc]
14 changes: 10 additions & 4 deletions ddtrace/profiling/collector/memalloc.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,12 +88,18 @@ class MemoryCollector(collector.PeriodicCollector):
_interval = attr.ib(default=_DEFAULT_INTERVAL, repr=False)

# TODO make this dynamic based on the 1. interval and 2. the max number of events allowed in the Recorder
_max_events = attr.ib(factory=attr_utils.from_env("_DD_PROFILING_MEMORY_EVENTS_BUFFER", _DEFAULT_MAX_EVENTS, int))
max_nframe = attr.ib(factory=attr_utils.from_env("DD_PROFILING_MAX_FRAMES", 64, int))
_max_events = attr.ib(
factory=attr_utils.from_env(
"_DD_PROFILING_MEMORY_EVENTS_BUFFER",
_DEFAULT_MAX_EVENTS,
int, # type: ignore[arg-type]
)
)
max_nframe = attr.ib(factory=attr_utils.from_env("DD_PROFILING_MAX_FRAMES", 64, int)) # type: ignore[arg-type]
heap_sample_size = attr.ib(type=int, factory=_get_default_heap_sample_size)
ignore_profiler = attr.ib(factory=attr_utils.from_env("DD_PROFILING_IGNORE_PROFILER", False, formats.asbool))

def _start_service(self): # type: ignore[override]
def _start_service(self):
# type: (...) -> None
"""Start collecting memory profiles."""
if _memalloc is None:
Expand All @@ -103,7 +109,7 @@ def _start_service(self): # type: ignore[override]

super(MemoryCollector, self)._start_service()

def _stop_service(self): # type: ignore[override]
def _stop_service(self):
# type: (...) -> None
super(MemoryCollector, self)._stop_service()

Expand Down
5 changes: 4 additions & 1 deletion ddtrace/profiling/exporter/http.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,10 @@ class PprofHTTPExporter(pprof.PprofExporter):
api_key = attr.ib(default=None, type=typing.Optional[str])
# Do not use the default agent timeout: it is too short, the agent is just a unbuffered proxy and the profiling
# backend is not as fast as the tracer one.
timeout = attr.ib(factory=attr_utils.from_env("DD_PROFILING_API_TIMEOUT", 10.0, float), type=float)
timeout = attr.ib(
factory=attr_utils.from_env("DD_PROFILING_API_TIMEOUT", 10.0, float), # type: ignore[arg-type]
type=float,
)
service = attr.ib(default=None, type=typing.Optional[str])
env = attr.ib(default=None, type=typing.Optional[str])
version = attr.ib(default=None, type=typing.Optional[str])
Expand Down
4 changes: 2 additions & 2 deletions ddtrace/profiling/profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def copy(self):
service=self.service, env=self.env, version=self.version, tracer=self.tracer, tags=self.tags
)

def _start_service(self): # type: ignore[override]
def _start_service(self):
# type: (...) -> None
"""Start the profiler."""
collectors = []
Expand All @@ -243,7 +243,7 @@ def _start_service(self): # type: ignore[override]
if self._scheduler is not None:
self._scheduler.start()

def _stop_service( # type: ignore[override]
def _stop_service(
self, flush=True # type: bool
):
# type: (...) -> None
Expand Down
6 changes: 4 additions & 2 deletions ddtrace/profiling/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,17 @@ class Scheduler(periodic.PeriodicService):
recorder = attr.ib()
exporters = attr.ib()
before_flush = attr.ib(default=None, eq=False)
_interval = attr.ib(factory=attr_utils.from_env("DD_PROFILING_UPLOAD_INTERVAL", 60.0, float))
_interval = attr.ib(
factory=attr_utils.from_env("DD_PROFILING_UPLOAD_INTERVAL", 60.0, float) # type: ignore[arg-type]
)
_configured_interval = attr.ib(init=False)
_last_export = attr.ib(init=False, default=None, eq=False)

def __attrs_post_init__(self):
# Copy the value to use it later since we're going to adjust the real interval
self._configured_interval = self.interval

def _start_service(self): # type: ignore[override]
def _start_service(self):
# type: (...) -> None
"""Start the scheduler."""
LOG.debug("Starting scheduler")
Expand Down