Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add tracing to OpenAI autologging #12267

Merged
merged 5 commits into from
Jun 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions mlflow/openai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -857,6 +857,7 @@ def autolog(
silent=False,
registered_model_name=None,
extra_tags=None,
log_traces=True,
):
"""
Enables (or disables) and configures autologging from OpenAI to MLflow.
Expand Down Expand Up @@ -897,6 +898,8 @@ def autolog(
new model version of the registered model with this name.
The registered model is created if it does not already exist.
extra_tags: A dictionary of extra tags to set on each managed run created by autologging.
log_traces: If ``True``, traces are logged for OpenAI models. If ``False``, no traces are
collected during inference. Default to ``True``.
"""

if Version(_get_openai_package_version()).major < 1:
Expand Down
29 changes: 27 additions & 2 deletions mlflow/openai/_openai_autolog.py
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry for the late review, thanks for the PR!

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

no worries :)

Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

import mlflow
from mlflow import MlflowException
from mlflow.entities import RunTag
from mlflow.entities import RunTag, SpanType
from mlflow.ml_package_versions import _ML_PACKAGE_VERSIONS
from mlflow.tracking.context import registry as context_registry
from mlflow.tracking.fluent import _get_experiment_id
Expand Down Expand Up @@ -75,6 +75,19 @@ def default(self, o):
return str(o)


def _get_span_type(task) -> str:
from openai.resources.chat.completions import Completions as ChatCompletions
from openai.resources.completions import Completions
from openai.resources.embeddings import Embeddings

span_type_mapping = {
ChatCompletions: SpanType.CHAT_MODEL,
Completions: SpanType.LLM,
Embeddings: SpanType.EMBEDDING,
harupy marked this conversation as resolved.
Show resolved Hide resolved
}
return span_type_mapping.get(task, SpanType.UNKNOWN)


def patched_call(original, self, *args, **kwargs):
from openai import Stream

Expand All @@ -100,8 +113,20 @@ def patched_call(original, self, *args, **kwargs):
)
run_id = run.info.run_id

log_traces = get_autologging_config(mlflow.openai.FLAVOR_NAME, "log_traces", False)
with disable_autologging():
result = original(self, *args, **kwargs)
if log_traces:
with mlflow.start_span(
name=self.__class__.__name__,
span_type=_get_span_type(self.__class__),
) as span:
# openai does not accept positional arguments
# so we do not need to worry about it for now
span.set_inputs(kwargs)
result = original(self, *args, **kwargs)
span.set_outputs(result)
else:
result = original(self, *args, **kwargs)

# Use session_id-inference_id as artifact directory where mlflow
# callback logs artifacts into, to avoid overriding artifacts
Expand Down
6 changes: 6 additions & 0 deletions tests/openai/mock_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ class Message(BaseModel):

class ChatPayload(BaseModel):
messages: List[Message]
temperature: float = 0
stream: bool = False


Expand Down Expand Up @@ -79,6 +80,11 @@ async def chat_response_stream():

@app.post("/chat/completions")
async def chat(payload: ChatPayload):
if not 0.0 <= payload.temperature <= 2.0:
return fastapi.Response(
content="Temperature must be between 0.0 and 2.0",
status_code=400,
)
Comment on lines +83 to +87
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this lets us test error behaviours. openai will throw a openai.BadRequestError when a 400 response is returned

if payload.stream:
# SSE stream
return StreamingResponse(
Expand Down
59 changes: 59 additions & 0 deletions tests/openai/test_openai_tracing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import openai
import pytest

import mlflow
from mlflow.tracing.constant import SpanAttributeKey

from tests.openai.conftest import is_v1


@pytest.fixture
def client(mock_openai):
return openai.OpenAI(api_key="test", base_url=mock_openai)


@pytest.mark.skipif(not is_v1, reason="Requires OpenAI SDK v1")
def test_chat_completions_autolog_tracing_success(client, monkeypatch):
mlflow.openai.autolog()
messages = [{"role": "user", "content": "test"}]
with mlflow.start_run():
client.chat.completions.create(
messages=messages,
model="gpt-3.5-turbo",
temperature=0,
)

trace = mlflow.get_last_active_trace()
assert trace.info.status == "OK"

assert len(trace.data.spans) == 1
span = trace.data.spans[0]
assert span.name == "Completions"
assert span.attributes[SpanAttributeKey.INPUTS]["messages"][0]["content"] == "test"
assert span.attributes[SpanAttributeKey.OUTPUTS]["id"] == "chatcmpl-123"


@pytest.mark.skipif(not is_v1, reason="Requires OpenAI SDK v1")
def test_chat_completions_autolog_tracing_error(client, monkeypatch):
mlflow.openai.autolog()
messages = [{"role": "user", "content": "test"}]
with mlflow.start_run(), pytest.raises(
openai.BadRequestError, match="Temperature must be between 0.0 and 2.0"
):
client.chat.completions.create(
messages=messages,
model="gpt-3.5-turbo",
temperature=5.0,
)

trace = mlflow.get_last_active_trace()
assert trace.info.status == "ERROR"

assert len(trace.data.spans) == 1
span = trace.data.spans[0]
assert span.name == "Completions"
assert span.attributes[SpanAttributeKey.INPUTS]["messages"][0]["content"] == "test"
assert span.attributes.get(SpanAttributeKey.OUTPUTS) is None

assert span.events[0].name == "exception"
assert span.events[0].attributes["exception.type"] == "openai.BadRequestError"
Loading