Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

x/fix task execution #482

Merged
merged 4 commits into from
Sep 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from uuid import uuid4

from beartype import beartype
from temporalio import activity

from ...autogen.openapi_model import CreateTransitionRequest
from ...autogen.openapi_model import CreateTransitionRequest, Transition
from ...common.protocol.tasks import StepContext
from ...env import testing
from ...models.execution.create_execution_transition import (
Expand All @@ -13,7 +15,7 @@
async def transition_step(
context: StepContext,
transition_info: CreateTransitionRequest,
) -> None:
) -> Transition:
need_to_wait = transition_info.type == "wait"

# Get task token if it's a waiting step
Expand All @@ -22,14 +24,15 @@ async def transition_step(
transition_info.task_token = task_token

# Create transition
create_execution_transition_query(
transition = create_execution_transition_query(
developer_id=context.execution_input.developer_id,
execution_id=context.execution_input.execution.id,
task_id=context.execution_input.task.id,
data=transition_info,
update_execution_status=True,
)

return transition

async def mock_transition_step(
context: StepContext,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The mock_transition_step function should return a Transition object to match the return type of transition_step.

Expand Down
10 changes: 5 additions & 5 deletions agents-api/agents_api/autogen/Agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class Agent(BaseModel):
"""
Model name to use (gpt-4-turbo, gemini-nano etc)
"""
instructions: str | list[str] = ""
instructions: str | list[str] = []
"""
Instructions for the agent
"""
Expand Down Expand Up @@ -82,7 +82,7 @@ class CreateAgentRequest(BaseModel):
"""
Model name to use (gpt-4-turbo, gemini-nano etc)
"""
instructions: str | list[str] = ""
instructions: str | list[str] = []
"""
Instructions for the agent
"""
Expand Down Expand Up @@ -117,7 +117,7 @@ class CreateOrUpdateAgentRequest(CreateAgentRequest):
"""
Model name to use (gpt-4-turbo, gemini-nano etc)
"""
instructions: str | list[str] = ""
instructions: str | list[str] = []
"""
Instructions for the agent
"""
Expand Down Expand Up @@ -155,7 +155,7 @@ class PatchAgentRequest(BaseModel):
"""
Model name to use (gpt-4-turbo, gemini-nano etc)
"""
instructions: str | list[str] = ""
instructions: str | list[str] = []
"""
Instructions for the agent
"""
Expand Down Expand Up @@ -193,7 +193,7 @@ class UpdateAgentRequest(BaseModel):
"""
Model name to use (gpt-4-turbo, gemini-nano etc)
"""
instructions: str | list[str] = ""
instructions: str | list[str] = []
"""
Instructions for the agent
"""
Expand Down
66 changes: 46 additions & 20 deletions agents-api/agents_api/autogen/Chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from __future__ import annotations

from typing import Annotated, Literal
from typing import Annotated, Any, Literal
from uuid import UUID

from pydantic import AwareDatetime, BaseModel, ConfigDict, Field, StrictBool
Expand Down Expand Up @@ -36,11 +36,11 @@ class BaseChatResponse(BaseModel):
"""
Usage statistics for the completion request
"""
jobs: list[UUID]
jobs: Annotated[list[UUID], Field([], json_schema_extra={"readOnly": True})]
"""
Background job IDs that may have been spawned from this interaction.
"""
docs: list[DocReference]
docs: Annotated[list[DocReference], Field([], json_schema_extra={"readOnly": True})]
"""
Documents referenced for this request (for citation purposes).
"""
Expand Down Expand Up @@ -71,7 +71,7 @@ class ChatInputData(BaseModel):
"""
A list of new input messages comprising the conversation so far.
"""
tools: Annotated[list[FunctionTool] | None, Field(None, min_length=1)]
tools: list[FunctionTool] = []
"""
(Advanced) List of tools that are provided in addition to agent's default set of tools.
"""
Expand Down Expand Up @@ -133,16 +133,6 @@ class CompetionUsage(BaseModel):
"""


class CompletionResponseFormat(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
type: Literal["text", "json_object"] = "text"
"""
The format of the response
"""


class Content(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
Expand Down Expand Up @@ -278,7 +268,9 @@ class MultipleChatOutput(BaseChatOutput):
model_config = ConfigDict(
populate_by_name=True,
)
messages: list[Message]
messages: Annotated[
list[Message], Field(json_schema_extra={"readOnly": True}, min_length=1)
]


class OpenAISettings(BaseModel):
Expand All @@ -303,6 +295,30 @@ class OpenAISettings(BaseModel):
"""


class SchemaCompletionResponseFormat(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
type: Literal["json_schema"] = "json_schema"
"""
The format of the response
"""
json_schema: dict[str, Any]
"""
The schema of the response
"""


class SimpleCompletionResponseFormat(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
type: Literal["text", "json_object"] = "text"
"""
The format of the response
"""


class SingleChatOutput(BaseChatOutput):
"""
The output returned by the model. Note that, depending on the model provider, they might return more than one message.
Expand All @@ -318,7 +334,13 @@ class TokenLogProb(BaseTokenLogProb):
model_config = ConfigDict(
populate_by_name=True,
)
top_logprobs: list[BaseTokenLogProb]
top_logprobs: Annotated[
list[BaseTokenLogProb],
Field(json_schema_extra={"readOnly": True}, min_length=1),
]
"""
The log probabilities of the tokens
"""


class ChatInput(ChatInputData):
Expand Down Expand Up @@ -352,7 +374,7 @@ class ChatInput(ChatInputData):
"""
Indicates if the server should stream the response as it's generated
"""
stop: Annotated[list[str] | None, Field(None, max_length=4, min_length=1)]
stop: Annotated[list[str], Field([], max_length=4)]
"""
Up to 4 sequences where the API will stop generating further tokens.
"""
Expand All @@ -368,7 +390,9 @@ class ChatInput(ChatInputData):
"""
Modify the likelihood of specified tokens appearing in the completion
"""
response_format: CompletionResponseFormat | None = None
response_format: (
SimpleCompletionResponseFormat | SchemaCompletionResponseFormat | None
) = None
"""
Response format (set to `json_object` to restrict output to JSON)
"""
Expand Down Expand Up @@ -447,7 +471,7 @@ class ChatSettings(DefaultChatSettings):
"""
Indicates if the server should stream the response as it's generated
"""
stop: Annotated[list[str] | None, Field(None, max_length=4, min_length=1)]
stop: Annotated[list[str], Field([], max_length=4)]
"""
Up to 4 sequences where the API will stop generating further tokens.
"""
Expand All @@ -463,7 +487,9 @@ class ChatSettings(DefaultChatSettings):
"""
Modify the likelihood of specified tokens appearing in the completion
"""
response_format: CompletionResponseFormat | None = None
response_format: (
SimpleCompletionResponseFormat | SchemaCompletionResponseFormat | None
) = None
"""
Response format (set to `json_object` to restrict output to JSON)
"""
Expand Down
6 changes: 3 additions & 3 deletions agents-api/agents_api/autogen/Common.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class ResourceCreatedResponse(BaseModel):
"""
When this resource was created as UTC date-time
"""
jobs: list[UUID]
jobs: Annotated[list[UUID], Field([], json_schema_extra={"readOnly": True})]
"""
IDs (if any) of jobs created as part of this request
"""
Expand All @@ -76,7 +76,7 @@ class ResourceDeletedResponse(BaseModel):
"""
When this resource was deleted as UTC date-time
"""
jobs: list[UUID]
jobs: Annotated[list[UUID], Field([], json_schema_extra={"readOnly": True})]
"""
IDs (if any) of jobs created as part of this request
"""
Expand All @@ -94,7 +94,7 @@ class ResourceUpdatedResponse(BaseModel):
"""
When this resource was updated as UTC date-time
"""
jobs: list[UUID]
jobs: Annotated[list[UUID], Field([], json_schema_extra={"readOnly": True})]
"""
IDs (if any) of jobs created as part of this request
"""
26 changes: 25 additions & 1 deletion agents-api/agents_api/autogen/Executions.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,14 @@ class CreateExecutionRequest(BaseModel):
"""
The input to the execution
"""
output: Any | None = None
"""
The output of the execution if it succeeded
"""
error: str | None = None
"""
The error of the execution if it failed
"""
metadata: dict[str, Any] | None = None


Expand Down Expand Up @@ -51,6 +59,14 @@ class Execution(BaseModel):
"""
The input to the execution
"""
output: Any | None = None
"""
The output of the execution if it succeeded
"""
error: str | None = None
"""
The error of the execution if it failed
"""
created_at: Annotated[AwareDatetime, Field(json_schema_extra={"readOnly": True})]
"""
When this resource was created as UTC date-time
Expand Down Expand Up @@ -80,7 +96,15 @@ class TransitionEvent(BaseModel):
)
type: Annotated[
Literal[
"finish", "branch_finish", "wait", "resume", "error", "step", "cancelled"
"init",
"init_branch",
"finish",
"finish_branch",
"wait",
"resume",
"error",
"step",
"cancelled",
],
Field(json_schema_extra={"readOnly": True}),
]
Expand Down
10 changes: 5 additions & 5 deletions agents-api/agents_api/autogen/Sessions.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ class SingleAgentMultiUserSession(Session):
populate_by_name=True,
)
agent: UUID
users: list[UUID]
users: Annotated[list[UUID], Field(min_length=2)]


class SingleAgentNoUserSession(Session):
Expand Down Expand Up @@ -201,20 +201,20 @@ class MultiAgentMultiUserSession(Session):
model_config = ConfigDict(
populate_by_name=True,
)
agents: list[UUID]
users: list[UUID]
agents: Annotated[list[UUID], Field(min_length=2)]
users: Annotated[list[UUID], Field(min_length=2)]


class MultiAgentNoUserSession(Session):
model_config = ConfigDict(
populate_by_name=True,
)
agents: list[UUID]
agents: Annotated[list[UUID], Field(min_length=2)]


class MultiAgentSingleUserSession(Session):
model_config = ConfigDict(
populate_by_name=True,
)
agents: list[UUID]
agents: Annotated[list[UUID], Field(min_length=2)]
user: UUID
Loading
Loading