Skip to content

Commit

Permalink
[Py] Default to unnamed (#1024)
Browse files Browse the repository at this point in the history
Also use cached client for defaults in testing to avoid creating new
threads
  • Loading branch information
hinthornw committed Sep 20, 2024
1 parent ef092de commit 6b3ea0b
Show file tree
Hide file tree
Showing 8 changed files with 29 additions and 21 deletions.
5 changes: 3 additions & 2 deletions python/langsmith/_expect.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ def test_output_semantically_close():

from langsmith import client as ls_client
from langsmith import run_helpers as rh
from langsmith import run_trees as rt
from langsmith import utils as ls_utils

if TYPE_CHECKING:
Expand Down Expand Up @@ -103,7 +104,7 @@ def __init__(
def _submit_feedback(self, score: int, message: Optional[str] = None) -> None:
if not ls_utils.test_tracking_is_disabled():
if not self._client:
self._client = ls_client.Client()
self._client = rt.get_cached_client()
self._executor.submit(
self._client.create_feedback,
run_id=self._run_id,
Expand Down Expand Up @@ -431,7 +432,7 @@ def _submit_feedback(self, key: str, results: dict):
run_id = current_run.trace_id if current_run else None
if not ls_utils.test_tracking_is_disabled():
if not self._client:
self._client = ls_client.Client()
self._client = rt.get_cached_client()
self.executor.submit(
self._client.create_feedback, run_id=run_id, key=key, **results
)
Expand Down
4 changes: 2 additions & 2 deletions python/langsmith/_internal/_embedding_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
def _get_openai_encoder() -> Callable[[Sequence[str]], Sequence[Sequence[float]]]:
"""Get the OpenAI GPT-3 encoder."""
try:
from openai import Client
from openai import Client as OpenAIClient
except ImportError:
raise ImportError(
"THe default encoder for the EmbeddingDistance class uses the OpenAI API. "
Expand All @@ -72,7 +72,7 @@ def _get_openai_encoder() -> Callable[[Sequence[str]], Sequence[Sequence[float]]
)

def encode_text(texts: Sequence[str]) -> Sequence[Sequence[float]]:
client = Client()
client = OpenAIClient()
response = client.embeddings.create(
input=list(texts), model="text-embedding-3-small"
)
Expand Down
7 changes: 4 additions & 3 deletions python/langsmith/_testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from langsmith import client as ls_client
from langsmith import env as ls_env
from langsmith import run_helpers as rh
from langsmith import run_trees as rt
from langsmith import schemas as ls_schemas
from langsmith import utils as ls_utils

Expand Down Expand Up @@ -387,7 +388,7 @@ def __init__(
experiment: ls_schemas.TracerSession,
dataset: ls_schemas.Dataset,
):
self.client = client or ls_client.Client()
self.client = client or rt.get_cached_client()
self._experiment = experiment
self._dataset = dataset
self._version: Optional[datetime.datetime] = None
Expand All @@ -413,7 +414,7 @@ def from_test(
func: Callable,
test_suite_name: Optional[str] = None,
) -> _LangSmithTestSuite:
client = client or ls_client.Client()
client = client or rt.get_cached_client()
test_suite_name = test_suite_name or _get_test_suite_name(func)
with cls._lock:
if not cls._instances:
Expand Down Expand Up @@ -526,7 +527,7 @@ def _get_test_repr(func: Callable, sig: inspect.Signature) -> str:
def _ensure_example(
func: Callable, *args: Any, langtest_extra: _UTExtra, **kwargs: Any
) -> Tuple[_LangSmithTestSuite, uuid.UUID]:
client = langtest_extra["client"] or ls_client.Client()
client = langtest_extra["client"] or rt.get_cached_client()
output_keys = langtest_extra["output_keys"]
signature = inspect.signature(func)
inputs: dict = rh._get_inputs_safe(signature, *args, **kwargs)
Expand Down
5 changes: 3 additions & 2 deletions python/langsmith/beta/_evals.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import uuid
from typing import DefaultDict, List, Optional, Sequence, Tuple, TypeVar

import langsmith.run_trees as rt
import langsmith.schemas as ls_schemas
from langsmith import evaluation as ls_eval
from langsmith._internal._beta_decorator import warn_beta
Expand Down Expand Up @@ -121,7 +122,7 @@ def convert_runs_to_test(
"""
if not runs:
raise ValueError(f"""Expected a non-empty sequence of runs. Received: {runs}""")
client = client or Client()
client = client or rt.get_cached_client()
ds = client.create_dataset(dataset_name=dataset_name)
outputs = [r.outputs for r in runs] if include_outputs else None
client.create_examples(
Expand Down Expand Up @@ -229,7 +230,7 @@ def compute_test_metrics(
raise NotImplementedError(
f"Evaluation not yet implemented for evaluator of type {type(func)}"
)
client = client or Client()
client = client or rt.get_cached_client()
traces = _load_nested_traces(project_name, client)
with ContextThreadPoolExecutor(max_workers=max_concurrency) as executor:
results = executor.map(
Expand Down
5 changes: 3 additions & 2 deletions python/langsmith/evaluation/_arunner.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import langsmith
from langsmith import run_helpers as rh
from langsmith import run_trees, schemas
from langsmith import run_trees as rt
from langsmith import utils as ls_utils
from langsmith._internal import _aiter as aitertools
from langsmith.evaluation._runner import (
Expand Down Expand Up @@ -324,7 +325,7 @@ async def aevaluate_existing(
""" # noqa: E501
client = client or langsmith.Client()
client = client or run_trees.get_cached_client()
project = (
experiment
if isinstance(experiment, schemas.TracerSession)
Expand Down Expand Up @@ -366,7 +367,7 @@ async def _aevaluate(
is_async_target = asyncio.iscoroutinefunction(target) or (
hasattr(target, "__aiter__") and asyncio.iscoroutine(target.__aiter__())
)
client = client or langsmith.Client()
client = client or rt.get_cached_client()
runs = None if is_async_target else cast(Iterable[schemas.Run], target)
experiment_, runs = await aitertools.aio_to_thread(
_resolve_experiment,
Expand Down
13 changes: 7 additions & 6 deletions python/langsmith/evaluation/_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@
import langsmith
from langsmith import env as ls_env
from langsmith import run_helpers as rh
from langsmith import run_trees, schemas
from langsmith import run_trees as rt
from langsmith import schemas
from langsmith import utils as ls_utils
from langsmith.evaluation.evaluator import (
ComparisonEvaluationResult,
Expand Down Expand Up @@ -346,7 +347,7 @@ def evaluate_existing(
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
""" # noqa: E501
client = client or langsmith.Client()
client = client or rt.get_cached_client()
project = (
experiment
if isinstance(experiment, schemas.TracerSession)
Expand Down Expand Up @@ -660,7 +661,7 @@ def evaluate_comparative(
)
if max_concurrency < 0:
raise ValueError("max_concurrency must be a positive integer.")
client = client or langsmith.Client()
client = client or rt.get_cached_client()

# TODO: Add information about comparison experiments
projects = [_load_experiment(experiment, client) for experiment in experiments]
Expand Down Expand Up @@ -859,7 +860,7 @@ def _evaluate(
experiment: Optional[Union[schemas.TracerSession, str, uuid.UUID]] = None,
) -> ExperimentResults:
# Initialize the experiment manager.
client = client or langsmith.Client()
client = client or rt.get_cached_client()
runs = None if _is_callable(target) else cast(Iterable[schemas.Run], target)
experiment_, runs = _resolve_experiment(
experiment,
Expand Down Expand Up @@ -982,7 +983,7 @@ def __init__(
client: Optional[langsmith.Client] = None,
description: Optional[str] = None,
):
self.client = client or langsmith.Client()
self.client = client or rt.get_cached_client()
self._experiment: Optional[schemas.TracerSession] = None
if experiment is None:
self._experiment_name = _get_random_name()
Expand Down Expand Up @@ -1556,7 +1557,7 @@ def _forward(
) -> _ForwardResults:
run: Optional[schemas.RunBase] = None

def _get_run(r: run_trees.RunTree) -> None:
def _get_run(r: rt.RunTree) -> None:
nonlocal run
run = r

Expand Down
9 changes: 6 additions & 3 deletions python/langsmith/run_trees.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@
_LOCK = threading.Lock()


def _get_client() -> Client:
# Note, this is called directly by langchain. Do not remove.
def get_cached_client() -> Client:
global _CLIENT
if _CLIENT is None:
with _LOCK:
Expand Down Expand Up @@ -78,11 +79,13 @@ class Config:
@root_validator(pre=True)
def infer_defaults(cls, values: dict) -> dict:
"""Assign name to the run."""
if values.get("name") is None and "serialized" in values:
if values.get("name") is None and values.get("serialized") is not None:
if "name" in values["serialized"]:
values["name"] = values["serialized"]["name"]
elif "id" in values["serialized"]:
values["name"] = values["serialized"]["id"][-1]
if values.get("name") is None:
values["name"] = "Unnamed"
if "client" in values: # Handle user-constructed clients
values["_client"] = values["client"]
if values.get("parent_run") is not None:
Expand Down Expand Up @@ -126,7 +129,7 @@ def client(self) -> Client:
# Lazily load the client
# If you never use this for API calls, it will never be loaded
if not self._client:
self._client = _get_client()
self._client = get_cached_client()
return self._client

def add_tags(self, tags: Union[Sequence[str], str]) -> None:
Expand Down
2 changes: 1 addition & 1 deletion python/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "langsmith"
version = "0.1.124"
version = "0.1.125"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
authors = ["LangChain <support@langchain.dev>"]
license = "MIT"
Expand Down

0 comments on commit 6b3ea0b

Please sign in to comment.