Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main'
Browse files Browse the repository at this point in the history
  • Loading branch information
bboynton97 committed Mar 18, 2024
2 parents ddcbd26 + 1b7500c commit d423e12
Show file tree
Hide file tree
Showing 8 changed files with 134 additions and 123 deletions.
2 changes: 1 addition & 1 deletion agentops/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@
from .client import Client
from .event import ActionEvent, LLMEvent, ToolEvent, ErrorEvent
from .logger import AgentOpsLogger
from .enums import Models
from .enums import Models, LLMMessageFormat
from .decorators import record_function
4 changes: 2 additions & 2 deletions agentops/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ def _record_event_sync(self, func, event_name, *args, **kwargs):

except Exception as e:
# TODO: add the stack trace
self.record(ErrorEvent(event=event, details={f"{type(e).__name__}": str(e)}))
self.record(ErrorEvent(trigger_event=event, details={f"{type(e).__name__}": str(e)}))

# Re-raise the exception
raise
Expand Down Expand Up @@ -182,7 +182,7 @@ async def _record_event_async(self, func, event_name, *args, **kwargs):

except Exception as e:
# TODO: add the stack trace
self.record(ErrorEvent(event=event, details={f"{type(e).__name__}": str(e)}))
self.record(ErrorEvent(trigger_event=event, details={f"{type(e).__name__}": str(e)}))

# Re-raise the exception
raise
Expand Down
5 changes: 5 additions & 0 deletions agentops/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,8 @@ class Models(Enum):
GPT_4_32K_0314 = "gpt-4-32k-0314"
GPT_4_0613 = "gpt-4-0613"
TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002"


class LLMMessageFormat(Enum):
STRING = "string"
CHATML = "chatml"
51 changes: 38 additions & 13 deletions agentops/event.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@
from dataclasses import dataclass, field
from typing import List, Optional
from .helpers import get_ISO_time
from .enums import EventType, Models
from .enums import EventType, Models, LLMMessageFormat
from uuid import UUID, uuid4
import logging


@dataclass
Expand All @@ -20,6 +21,7 @@ class Event:
init_timestamp: Optional[str] = field(default_factory=get_ISO_time)
end_timestamp: str = field(default_factory=get_ISO_time)
id: UUID = field(default_factory=uuid4)
# TODO: has_been_recorded: bool = False


@dataclass
Expand All @@ -42,12 +44,36 @@ class LLMEvent(Event):
event_type: str = EventType.LLM.value
agent_id: Optional[UUID] = None
thread_id: Optional[UUID] = None
prompt: Optional[str] = None
completion: Optional[str] = None
prompt_messages: str | List = None # TODO: remove from serialization
prompt_messages_format: LLMMessageFormat = LLMMessageFormat.STRING # TODO: remove from serialization
# TODO: remove and just create it in __post_init__ so it can never be set by user?
_formatted_prompt_messages: object = field(init=False, default=None)
completion_message: str | object = None # TODO: remove from serialization
completion_message_format: LLMMessageFormat = LLMMessageFormat.STRING # TODO: remove from serialization
# TODO: remove and just create it in __post_init__ so it can never be set by user?
_formatted_completion_message: object = field(init=False, default=None)
model: Optional[Models] = None
prompt_tokens: Optional[int] = None
completion_tokens: Optional[int] = None

def format_messages(self):
if self.prompt_messages:
# TODO should we just figure out if it's chatml so user doesn't have to pass anything?
if self.prompt_messages_format == LLMMessageFormat.STRING:
self._formatted_prompt_messages = {"type": "string", "string": self.prompt_messages}
elif self.prompt_messages_format == LLMMessageFormat.CHATML:
self._formatted_prompt_messages = {"type": "chatml", "messages": self.prompt_messages}

if self.completion_message:
if self.completion_message_format == LLMMessageFormat.STRING:
self._formatted_completion_message = {"type": "string", "string": self.completion_message}
elif self.completion_message_format == LLMMessageFormat.CHATML:
self._formatted_completion_message = {"type": "chatml", "message": self.completion_message}

def __post_init__(self):
# format if prompt/completion messages were passed when LLMEvent was created
self.format_messages()


@dataclass
class ToolEvent(Event):
Expand All @@ -60,18 +86,17 @@ class ToolEvent(Event):
# Does not inherit from Event because error will (optionally) be linked to an ActionEvent, LLMEvent, etc that will have the details
@dataclass
class ErrorEvent():
trigger_event_id: Optional[UUID] = None
trigger_event_type: Optional[EventType] = None
trigger_event: Optional[Event] = None # TODO: remove from serialization?
error_type: Optional[str] = None
code: Optional[str] = None
details: Optional[str] = None
logs: Optional[str] = None
timestamp: str = field(default_factory=get_ISO_time)

def __init__(self, event: Event = None, **kwargs):
self.event_type: str = EventType.ERROR.value
self.timestamp = get_ISO_time()
if event:
self.trigger_event_id = event.id
self.trigger_event_type = event.event_type
for key, value in kwargs.items():
setattr(self, key, value)
def __post_init__(self):
self.event_type = EventType.ERROR.value
if self.trigger_event:
self.trigger_event_id = self.trigger_event.id
self.trigger_event_type = self.trigger_event.event_type
# TODO: remove trigger_event from serialization
# e.g. field(repr=False, compare=False, hash=False, metadata={'serialize': False})
185 changes: 83 additions & 102 deletions agentops/llm_tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@
from importlib import import_module
from packaging.version import parse
import logging
from .event import LLMEvent
from .event import LLMEvent, ErrorEvent
from .helpers import get_ISO_time, check_call_stack_for_agent_id
import inspect
from .enums import LLMMessageFormat


class LlmTracker:
Expand All @@ -24,44 +25,43 @@ class LlmTracker:

def __init__(self, client):
self.client = client
self.event_stream = None
self.completion = ""
self.llm_event: LLMEvent = None

def _handle_response_v0_openai(self, response, kwargs, init_timestamp):
"""Handle responses for OpenAI versions <v1.0.0"""

prompt = kwargs.pop("messages", None) # pull prompt out for visibility
self.completion = ""
self.llm_event = None

def handle_stream_chunk(chunk):
self.llm_event = LLMEvent(
init_timestamp=init_timestamp,
params=kwargs
)

try:
model = chunk['model']
choices = chunk['choices']
token = choices[0]['delta'].get('content', '')
finish_reason = choices[0]['finish_reason']

if self.event_stream == None:
self.event_stream = LLMEvent(
init_timestamp=init_timestamp,
params=kwargs,
returns={"finish_reason": None,
"content": token},
agent_id=check_call_stack_for_agent_id(),
prompt=prompt,
completion=token,
model=model
)
else:
self.event_stream.returns['content'] += token
self.event_stream.completion += token
if token:
self.completion += token

if finish_reason:
if not self.event_stream.returns:
self.event_stream.returns = {}
self.event_stream.returns['finish_reason'] = finish_reason
# Update end_timestamp
self.event_stream.end_timestamp = get_ISO_time()
self.client.record(self.event_stream)
self.event_stream = None
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.prompt_messages = kwargs["messages"]
self.llm_event.prompt_messages_format = LLMMessageFormat.CHATML
self.llm_event.completion_message = {"role": "assistant", "content": self.completion}
self.llm_event.completion_message_format = LLMMessageFormat.CHATML
self.llm_event.returns = {"finish_reason": finish_reason, "content": self.completion}
self.llm_event.model = model
self.llm_event.end_timestamp = get_ISO_time()
self.llm_event.format_messages()

self.client.record(self.llm_event)
except Exception as e:
self.client.record(ErrorEvent(trigger_event=self.llm_event, details={f"{type(e).__name__}": str(e)}))
# TODO: This error is specific to only one path of failure. Should be more generic or have different logging for different paths
logging.warning(
f"AgentOps: Unable to parse a chunk for LLM call {kwargs} - skipping upload to AgentOps")
Expand All @@ -83,45 +83,28 @@ def generator():
yield chunk
return generator()

self.llm_event = LLMEvent(
init_timestamp=init_timestamp,
params=kwargs
)
# v0.0.0 responses are dicts
try:
self.client.record(LLMEvent(
init_timestamp=init_timestamp,
params=kwargs,
returns={"content":
response['choices'][0]['message']['content']},
agent_id=check_call_stack_for_agent_id(),
prompt=prompt,
completion=response['choices'][0]['message']['content'],
model=response['model'],
prompt_tokens=response.get('usage',
{}).get('prompt_tokens'),
completion_tokens=response.get('usage',
{}).get('completion_tokens')
))

except: # NOTE: Execution should never reach here. Should remove if does not break anything

# v1.0.0+ responses are objects
try:
self.client.record(LLMEvent(
init_timestamp=init_timestamp,
params=kwargs,
returns={
"content": response.choices[0].message.model_dump()},
agent_id=check_call_stack_for_agent_id(),
# TODO: Will need to make the completion the key for content, splat out the model dump
prompt=prompt,
completion=response.choices[0].message.model_dump(),
model=response.model,
prompt_tokens=response.usage.prompt_tokens,
completion_tokens=response.usage.completion_tokens
))
# Standard response
except Exception as e:
# TODO: This error is specific to only one path of failure. Should be more generic or have different logging for different paths
logging.warning(
f"AgentOps: Unable to parse a chunk for LLM call {kwargs} - skipping upload to AgentOps")
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.prompt_messages = kwargs["messages"]
self.llm_event.prompt_messages_format = LLMMessageFormat.CHATML
self.llm_event.completion_message = response['choices'][0]['message']
self.llm_event.completion_message_format = LLMMessageFormat.CHATML
self.llm_event.returns = {"content": response['choices'][0]['message']['content']}
self.llm_event.model = response["model"]
self.llm_event.end_timestamp = get_ISO_time()
self.llm_event.format_messages()

self.client.record(self.llm_event)
except Exception as e:
self.client.record(ErrorEvent(trigger_event=self.llm_event, details={f"{type(e).__name__}": str(e)}))
# TODO: This error is specific to only one path of failure. Should be more generic or have different logging for different paths
logging.warning(
f"AgentOps: Unable to parse a chunk for LLM call {kwargs} - skipping upload to AgentOps")

return response

Expand All @@ -131,9 +114,16 @@ def _handle_response_v1_openai(self, response, kwargs, init_timestamp):
from openai.types.chat import ChatCompletionChunk
from openai.resources import AsyncCompletions

prompt = kwargs.pop("messages", None) # pull prompt out for visibility
self.completion = ""
self.llm_event = None

def handle_stream_chunk(chunk: ChatCompletionChunk):

self.llm_event = LLMEvent(
init_timestamp=init_timestamp,
params=kwargs
)

try:
model = chunk.model
choices = chunk.choices
Expand All @@ -142,34 +132,24 @@ def handle_stream_chunk(chunk: ChatCompletionChunk):
function_call = choices[0].delta.function_call
tool_calls = choices[0].delta.tool_calls
role = choices[0].delta.role

if self.event_stream == None:
self.event_stream = LLMEvent(
init_timestamp=init_timestamp,
params=kwargs,
agent_id=check_call_stack_for_agent_id(),
prompt=prompt,
returns={"finish_reason": None,
"content": token},
model=model
)
else:
if token == None:
token = ''
self.event_stream.returns['content'] += token
if token:
self.completion += token

if finish_reason:
if not self.event_stream.returns:
self.event_stream.returns = {}
self.event_stream.returns['finish_reason'] = finish_reason
self.event_stream.returns['function_call'] = function_call
self.event_stream.returns['tool_calls'] = tool_calls
self.event_stream.returns['role'] = role
# Update end_timestamp
self.event_stream.end_timestamp = get_ISO_time()
self.client.record(self.event_stream)
self.event_stream = None
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.prompt_messages = kwargs["messages"]
self.llm_event.prompt_messages_format = LLMMessageFormat.CHATML
self.llm_event.completion_message = {"role": "assistant", "content": self.completion}
self.llm_event.completion_message_format = LLMMessageFormat.CHATML
self.llm_event.returns = {"finish_reason": finish_reason, "content": self.completion,
"function_call": function_call, "tool_calls": tool_calls, "role": role}
self.llm_event.model = model
self.llm_event.end_timestamp = get_ISO_time()
self.llm_event.format_messages()

self.client.record(self.llm_event)
except Exception as e:
self.client.record(ErrorEvent(trigger_event=self.llm_event, details={f"{type(e).__name__}": str(e)}))
# TODO: This error is specific to only one path of failure. Should be more generic or have different logging for different paths
logging.warning(
f"AgentOps: Unable to parse a chunk for LLM call {kwargs} - skipping upload to AgentOps")
Expand Down Expand Up @@ -198,23 +178,24 @@ async def async_generator():
yield chunk
return async_generator()

self.llm_event = LLMEvent(
init_timestamp=init_timestamp,
params=kwargs
)
# v1.0.0+ responses are objects
try:
self.client.record(LLMEvent(
init_timestamp=init_timestamp,
params=kwargs,
returns={
# TODO: Will need to make the completion the key for content, splat out the model dump
"content": response.choices[0].message.model_dump()},
agent_id=check_call_stack_for_agent_id(),
prompt=prompt,
completion=response.choices[0].message.model_dump(),
model=response.model,
prompt_tokens=response.usage.prompt_tokens,
completion_tokens=response.usage.completion_tokens
))
# Standard response
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.prompt_messages = kwargs["messages"]
self.llm_event.prompt_messages_format = LLMMessageFormat.CHATML
self.llm_event.completion_message = response.choices[0].message.model_dump()
self.llm_event.completion_message_format = LLMMessageFormat.CHATML
self.llm_event.returns = response.model_dump()
self.llm_event.model = response.model
self.llm_event.format_messages()

self.client.record(self.llm_event)
except Exception as e:
self.client.record(ErrorEvent(trigger_event=self.llm_event, details={f"{type(e).__name__}": str(e)}))
# TODO: This error is specific to only one path of failure. Should be more generic or have different logging for different paths
logging.warning(
f"AgentOps: Unable to parse a chunk for LLM call {kwargs} - skipping upload to AgentOps")
Expand Down
2 changes: 1 addition & 1 deletion tests/openai_handlers/test_llm_tracker_ge_1_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ async def call_openai_async():
)

print(response)
raise ValueError("This is an intentional error for testing.")
# raise ValueError("This is an intentional error for testing.")


async def main():
Expand Down
Loading

0 comments on commit d423e12

Please sign in to comment.