diff --git a/docs.json b/docs.json index a0fe000b..6159354c 100644 --- a/docs.json +++ b/docs.json @@ -29,6 +29,13 @@ "group": "Observability", "pages": [ "product/observability", + { + "group": "OpenTelemetry", + "pages": [ + "product/observability/opentelemetry", + "product/observability/opentelemetry/list-of-supported-otel-instrumenters" + ] + }, "product/observability/logs", "product/observability/traces", "product/observability/analytics", @@ -415,7 +422,12 @@ "group": "Tracing Providers", "pages": [ "integrations/tracing-providers/arize", - "integrations/tracing-providers/logfire" + "integrations/tracing-providers/phoenix", + "integrations/tracing-providers/logfire", + "integrations/tracing-providers/ml-flow", + "integrations/tracing-providers/openlit", + "integrations/tracing-providers/opentelemetry-python-sdk", + "integrations/tracing-providers/traceloop" ] } ] @@ -2319,6 +2331,10 @@ { "source": "/product/guardrails/bring-your-own-guardrails", "destination": "/integrations/guardrails/bring-your-own-guardrails" + }, + { + "source": "/integrations/tracing-providers", + "destination": "/product/observability/opentelemetry/list-of-supported-otel-instrumenters" } ], "seo": { diff --git a/integrations/ecosystem.mdx b/integrations/ecosystem.mdx index a16c366b..447d086b 100644 --- a/integrations/ecosystem.mdx +++ b/integrations/ecosystem.mdx @@ -9,7 +9,7 @@ title: "Integrations" - + diff --git a/integrations/tracing-providers.mdx b/integrations/tracing-providers.mdx deleted file mode 100644 index abfcfbed..00000000 --- a/integrations/tracing-providers.mdx +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: "Overview" ---- - - - - - - diff --git a/integrations/tracing-providers/logfire.mdx b/integrations/tracing-providers/logfire.mdx index cd24e861..03f16a0b 100644 --- a/integrations/tracing-providers/logfire.mdx +++ b/integrations/tracing-providers/logfire.mdx @@ -1,17 +1,149 @@ --- title: "Pydantic Logfire" -description: "Logfire is a tool for comprehensive observability of your LLM applications with OpenTelemetry." +description: "Modern Python observability with automatic OpenAI instrumentation and intelligent gateway routing" --- +[Pydantic Logfire](https://pydantic.dev/logfire) is a modern observability platform from the creators of Pydantic, designed specifically for Python applications. It provides automatic instrumentation for popular libraries including OpenAI, Anthropic, and other LLM providers, making it an excellent choice for AI application monitoring. -[Logfire](https://pydantic.dev/logfire) and any opentelemetry compatible tracing library works out of the box with Portkey. + +Logfire's automatic instrumentation combined with Portkey's intelligent gateway creates a powerful observability stack where every trace is enriched with routing decisions, cache performance, and cost optimization data. + -All you need to do is set the following environment variables in your application: +## Why Logfire + Portkey? -```sh -OTEL_EXPORTER_OTLP_ENDPOINT = "https://api.portkey.com/v1/otel" -OTEL_EXPORTER_OTLP_HEADERS = "x-portkey-api-key={YOUR_PORTKEY_API_KEY}" + + +Logfire automatically instruments OpenAI SDK calls without any code changes + + +Portkey adds routing context, fallback decisions, and cache performance to every trace + + +Built by the Pydantic team specifically for Python developers + + +See traces immediately with actionable optimization opportunities + + + +## Quick Start + +### Prerequisites + +- Python +- Portkey account with API key +- OpenAI API key (or use Portkey's virtual keys) + +### Step 1: Install Dependencies + +Install the required packages for Logfire and Portkey integration: + +```bash +pip install logfire openai portkey-ai ``` +### Step 2: Basic Setup - Send Traces to Portkey + +First, let's configure Logfire to send traces to Portkey's OpenTelemetry endpoint: + +```python +import os +import logfire + +# Configure OpenTelemetry export to Portkey +os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://api.portkey.ai/v1/logs/otel" +os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = "x-portkey-api-key=YOUR_PORTKEY_API_KEY" + +# Initialize Logfire +logfire.configure( + service_name='my-llm-app', + send_to_logfire=False, # Disable sending to Logfire cloud +) + +# Instrument OpenAI globally +logfire.instrument_openai() +``` + +### Step 3: Complete Setup - Use Portkey's Gateway + +For the best experience, route your LLM calls through Portkey's gateway to get automatic optimizations: + +```python +import logfire +import os +from portkey_ai import createHeaders +from openai import OpenAI + +# Configure OpenTelemetry export +os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://api.portkey.ai/v1/logs/otel" +os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = "x-portkey-api-key=YOUR_PORTKEY_API_KEY" + +# Initialize Logfire +logfire.configure( + service_name='my-llm-app', + send_to_logfire=False, +) + +# Create OpenAI client with Portkey's gateway +client = OpenAI( + api_key="YOUR_OPENAI_API_KEY", # Or use a dummy value with virtual keys + base_url="https://api.portkey.ai/v1", + default_headers=createHeaders( + api_key="YOUR_PORTKEY_API_KEY", + virtual_key="YOUR_VIRTUAL_KEY" # Optional: Use Portkey's secure key management + ) +) + +# Instrument the Portkey-configured client +logfire.instrument_openai(client) +``` + +### Step 4: Make Instrumented LLM Calls + +Now your LLM calls are automatically traced by Logfire and enhanced by Portkey: + +```python +# Simple chat completion - automatically traced +response = client.chat.completions.create( + model="gpt-4", + messages=[ + { + "role": "user", + "content": "Explain the benefits of observability in LLM applications" + } + ], + temperature=0.7 +) + +print(response.choices[0].message.content) +``` + + + + +## Next Steps + + + +Set up intelligent routing, fallbacks, and caching + + +Secure your API keys with Portkey's vault + + +Analyze costs, performance, and usage patterns + + + Set Rate and Budget Limits per model/user/api-key + + + +--- + +## See Your Traces in Action +Once configured, navigate to the [Portkey dashboard](https://app.portkey.ai/logs) to see your Logfire instrumentation combined with gateway intelligence: + + OpenTelemetry traces in Portkey + diff --git a/integrations/tracing-providers/ml-flow.mdx b/integrations/tracing-providers/ml-flow.mdx new file mode 100644 index 00000000..efdfb131 --- /dev/null +++ b/integrations/tracing-providers/ml-flow.mdx @@ -0,0 +1,203 @@ +--- +title: "MLflow Tracing" +description: "Enhance LLM observability with automatic tracing and intelligent gateway routing" +--- + +[MLflow Tracing](https://mlflow.org/docs/latest/llms/tracing/index.html) is a feature that enhances LLM observability in your Generative AI (GenAI) applications by capturing detailed information about the execution of your application's services. Tracing provides a way to record the inputs, outputs, and metadata associated with each intermediate step of a request, enabling you to easily pinpoint the source of bugs and unexpected behaviors. + + +MLflow offers automatic, no-code-added integrations with over 20 popular GenAI libraries, providing immediate observability with just a single line of code. Combined with Portkey's intelligent gateway, you get comprehensive tracing enriched with routing decisions and performance optimizations. + + +## Why MLflow + Portkey? + + + +Automatic instrumentation for 20+ GenAI libraries with one line of code + + +Capture inputs, outputs, and metadata for every step + + +Portkey adds routing context, fallback decisions, and cache performance + + +Easily pinpoint issues with comprehensive trace data + + + +## Quick Start + +### Prerequisites + +- Python +- Portkey account with API key +- OpenAI API key (or use Portkey's virtual keys) + +### Step 1: Install Dependencies + +Install the required packages for MLflow and Portkey integration: + +```bash +pip install mlflow openai opentelemetry-exporter-otlp-proto-http portkey-ai +``` + +### Step 2: Configure OpenTelemetry Export + +Set up the environment variables to send traces to Portkey's OpenTelemetry endpoint: + +```python +import os + +# Configure Portkey endpoint and authentication +os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://api.portkey.ai/v1/logs/otel" +os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = "x-portkey-api-key=YOUR_PORTKEY_API_KEY" +os.environ["OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"] = "http/protobuf" +``` + +### Step 3: Enable MLflow Instrumentation + +Enable automatic tracing for OpenAI with just one line: + +```python +import mlflow + +# Enable the MLflow instrumentation for tracing OpenAI +mlflow.openai.autolog() +``` + +### Step 4: Configure Portkey Gateway + +Set up the OpenAI client to use Portkey's intelligent gateway: + +```python +from openai import OpenAI +from portkey_ai import createHeaders + +# Use Portkey's gateway for intelligent routing +client = OpenAI( + api_key="YOUR_OPENAI_API_KEY", # Or use a dummy value with virtual keys + base_url="https://api.portkey.ai/v1", + default_headers=createHeaders( + api_key="YOUR_PORTKEY_API_KEY", + virtual_key="YOUR_VIRTUAL_KEY" # Optional: Use Portkey's secure key management + ) +) +``` + +### Step 5: Make Instrumented LLM Calls + +Now your LLM calls are automatically traced by MLflow and enhanced by Portkey: + +```python +# Make calls through Portkey's gateway +# MLflow instruments the call, Portkey adds gateway intelligence +response = client.chat.completions.create( + model="gpt-4", + messages=[ + { + "role": "user", + "content": "Explain the importance of tracing in LLM applications" + } + ], + temperature=0.7 +) + +print(response.choices[0].message.content) + +# You now get: +# 1. Automatic tracing from MLflow +# 2. Gateway features from Portkey (caching, fallbacks, routing) +# 3. Combined insights in Portkey's dashboard +``` + +## Complete Example + +Here's a full example bringing everything together: + +```python +import os +import mlflow +from openai import OpenAI +from portkey_ai import createHeaders + +# Step 1: Configure Portkey endpoint +os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://api.portkey.ai/v1/logs/otel" +os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = "x-portkey-api-key=YOUR_PORTKEY_API_KEY" +os.environ["OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"] = "http/protobuf" + +# Step 2: Enable MLflow instrumentation +mlflow.openai.autolog() + +# Step 3: Configure Portkey Gateway +client = OpenAI( + api_key="YOUR_OPENAI_API_KEY", + base_url="https://api.portkey.ai/v1", + default_headers=createHeaders( + api_key="YOUR_PORTKEY_API_KEY", + virtual_key="YOUR_VIRTUAL_KEY" + ) +) + +# Step 4: Make instrumented calls +response = client.chat.completions.create( + model="gpt-4", + messages=[ + {"role": "system", "content": "You are a helpful AI assistant."}, + {"role": "user", "content": "What are the benefits of observability in production AI systems?"} + ] +) + +print(response.choices[0].message.content) +``` + +## Supported Integrations + +MLflow automatically instruments many popular GenAI libraries: + +### LLM Providers +- OpenAI +- Anthropic +- Cohere +- Google Generative AI +- Azure OpenAI + +### Vector Databases +- Pinecone +- ChromaDB +- Weaviate +- Qdrant + +### Frameworks +- LangChain +- LlamaIndex +- Haystack +- And 10+ more! + + +## Next Steps + + + +Set up intelligent routing, fallbacks, and caching + + +Secure your API keys with Portkey's vault + + +Analyze costs, performance, and usage patterns + + +Control costs with budget and rate limiting + + + +--- + +## See Your Traces in Action + +Once configured, navigate to the [Portkey dashboard](https://app.portkey.ai/logs) to see your MLflow instrumentation combined with gateway intelligence: + + + OpenTelemetry traces in Portkey + diff --git a/integrations/tracing-providers/openlit.mdx b/integrations/tracing-providers/openlit.mdx new file mode 100644 index 00000000..1305d03b --- /dev/null +++ b/integrations/tracing-providers/openlit.mdx @@ -0,0 +1,202 @@ +--- +title: "OpenLIT" +description: "Simplify AI development with OpenTelemetry-native observability and intelligent gateway routing" +--- + +[OpenLIT](https://openlit.io/) allows you to simplify your AI development workflow, especially for Generative AI and LLMs. It streamlines essential tasks like experimenting with LLMs, organizing and versioning prompts, and securely handling API keys. With just one line of code, you can enable OpenTelemetry-native observability, offering full-stack monitoring that includes LLMs, vector databases, and GPUs. + + +OpenLIT's automatic instrumentation combined with Portkey's intelligent gateway creates a comprehensive observability solution where every trace captures model performance, prompt versioning, and cost optimization data in real-time. + + +## Why OpenLIT + Portkey? + + + +Enable complete observability with a single line of code for all AI components + + +Monitor LLMs, vector databases, and GPUs in a unified view + + +Built on OpenTelemetry standards for seamless integration + + +Smooth transition from experimentation to production deployment + + + +## Quick Start + +### Prerequisites + +- Python +- Portkey account with API key +- OpenAI API key (or use Portkey's virtual keys) + +### Step 1: Install Dependencies + +Install the required packages for OpenLIT and Portkey integration: + +```bash +pip install openlit openai portkey-ai +``` + +### Step 2: Configure OpenTelemetry Export + +Set up the environment variables to send traces to Portkey's OpenTelemetry endpoint: + +```python +import os + +# Configure Portkey endpoint and authentication +os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://api.portkey.ai/v1/logs/otel" +os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = "x-portkey-api-key=YOUR_PORTKEY_API_KEY" +``` + +### Step 3: Initialize OpenLIT with Custom Tracer + +Set up OpenTelemetry tracer and initialize OpenLIT: + +```python +import openlit +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from opentelemetry import trace + +# Create and configure the tracer provider +trace_provider = TracerProvider() +trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter())) + +# Set the global default tracer provider +trace.set_tracer_provider(trace_provider) + +# Create a tracer from the global tracer provider +tracer = trace.get_tracer(__name__) + +# Initialize OpenLIT with the custom tracer +# disable_batch=True ensures traces are processed immediately +openlit.init(tracer=tracer, disable_batch=True) +``` + +### Step 4: Configure Portkey Gateway + +Set up the OpenAI client to use Portkey's intelligent gateway: + +```python +from openai import OpenAI +from portkey_ai import createHeaders + +# Create OpenAI client with Portkey's gateway +client = OpenAI( + api_key="YOUR_OPENAI_API_KEY", # Or use a dummy value with virtual keys + base_url="https://api.portkey.ai/v1", + default_headers=createHeaders( + api_key="YOUR_PORTKEY_API_KEY", + virtual_key="YOUR_VIRTUAL_KEY" # Optional: Use Portkey's secure key management + ) +) +``` + +### Step 5: Make Instrumented LLM Calls + +Now your LLM calls are automatically traced by OpenLIT and enhanced by Portkey: + +```python +# Make calls through Portkey's gateway +# OpenLIT instruments the call, Portkey adds gateway intelligence +response = client.chat.completions.create( + model="gpt-4", + messages=[ + { + "role": "user", + "content": "Explain the benefits of OpenTelemetry in AI applications" + } + ], + temperature=0.7 +) + +print(response.choices[0].message.content) + +# You now get: +# 1. Automatic tracing from OpenLIT +# 2. Gateway features from Portkey (caching, fallbacks, routing) +# 3. Combined insights in Portkey's dashboard +``` + +## Complete Example + +Here's a full example bringing everything together: + +```python +import os +import openlit +from openai import OpenAI +from portkey_ai import createHeaders +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from opentelemetry import trace + +# Step 1: Configure Portkey endpoint +os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://api.portkey.ai/v1/logs/otel" +os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = "x-portkey-api-key=YOUR_PORTKEY_API_KEY" + +# Step 2: Set up OpenTelemetry +trace_provider = TracerProvider() +trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter())) +trace.set_tracer_provider(trace_provider) +tracer = trace.get_tracer(__name__) + +# Step 3: Initialize OpenLIT +openlit.init(tracer=tracer, disable_batch=True) + +# Step 4: Configure Portkey Gateway +client = OpenAI( + api_key="YOUR_OPENAI_API_KEY", + base_url="https://api.portkey.ai/v1", + default_headers=createHeaders( + api_key="YOUR_PORTKEY_API_KEY", + virtual_key="YOUR_VIRTUAL_KEY" + ) +) + +# Step 5: Make instrumented calls +response = client.chat.completions.create( + model="gpt-4", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What are the key benefits of observability in AI?"} + ] +) + +print(response.choices[0].message.content) +``` + +## Next Steps + + + +Set up intelligent routing, fallbacks, and caching + + +Secure your API keys with Portkey's vault + + +Analyze costs, performance, and usage patterns + + +Configure alerts for anomalies and performance issues + + + +--- + +## See Your Traces in Action + +Once configured, navigate to the [Portkey dashboard](https://app.portkey.ai/logs) to see your OpenLIT instrumentation combined with gateway intelligence: + + + OpenTelemetry traces in Portkey + diff --git a/integrations/tracing-providers/opentelemetry-python-sdk.mdx b/integrations/tracing-providers/opentelemetry-python-sdk.mdx new file mode 100644 index 00000000..88225c56 --- /dev/null +++ b/integrations/tracing-providers/opentelemetry-python-sdk.mdx @@ -0,0 +1,213 @@ +--- +title: "OpenTelemetry Python SDK" +description: "Direct OpenTelemetry instrumentation with full control over traces and intelligent gateway routing" +--- + +The [OpenTelemetry SDK](https://opentelemetry.io/docs/languages/python/) provides direct, fine-grained control over instrumentation in your LLM applications. Unlike automatic instrumentation libraries, the SDK allows you to manually create spans and set attributes exactly where and how you need them. + + +Using the OpenTelemetry SDK directly with Portkey gives you complete control over what gets traced while benefiting from Portkey's intelligent gateway features like caching, fallbacks, and load balancing. + + +## Why OpenTelemetry SDK + Portkey? + + + +Manually instrument exactly what you need with custom spans and attributes + + +Battle-tested OpenTelemetry standard used by enterprises worldwide + + +Add any metadata you need to traces for debugging and analysis + + +Portkey adds routing optimization and resilience to your LLM calls + + + +## Quick Start + +### Prerequisites + +- Python +- Portkey account with API key +- OpenAI API key (or use Portkey's virtual keys) + +### Step 1: Install Dependencies + +Install the required packages: + +```bash +pip install opentelemetry-api opentelemetry-sdk opentelemetry-exporter-otlp-proto-http openai portkey-ai +``` + +### Step 2: Configure OpenTelemetry + +Set up the tracer provider and OTLP exporter: + +```python +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter + +# Setup tracer provider +provider = TracerProvider() +trace.set_tracer_provider(provider) + +# Configure OTLP exporter to send to Portkey +otlp_exporter = OTLPSpanExporter( + endpoint="https://api.portkey.ai/v1/logs/otel/v1/traces", + headers={ + "x-portkey-api-key": "YOUR_PORTKEY_API_KEY", + } +) + +# Add batch span processor (recommended for production) +span_processor = BatchSpanProcessor(otlp_exporter) +provider.add_span_processor(span_processor) + +# Get tracer +tracer = trace.get_tracer(__name__) +``` + +### Step 3: Configure Portkey Gateway + +Set up the OpenAI client with Portkey's gateway: + +```python +from openai import OpenAI +from portkey_ai import createHeaders + +# Use Portkey's gateway for intelligent routing +client = OpenAI( + api_key="YOUR_OPENAI_API_KEY", + base_url="https://api.portkey.ai/v1", + default_headers=createHeaders( + api_key="YOUR_PORTKEY_API_KEY", + virtual_key="YOUR_VIRTUAL_KEY" # Optional: Use Portkey's secure key management + ) +) +``` + +### Step 4: Create Instrumented Functions + +Manually instrument your LLM calls with custom spans: + +```python +def generate_ai_response(input_text): + with tracer.start_as_current_span("OpenAI-Chat-Completion") as span: + # Add input attributes + span.set_attribute("input.value", input_text) + span.set_attribute("model.name", "gpt-4") + span.set_attribute("temperature", 0.7) + span.set_attribute("gen_ai.prompt.0.role", "user") + span.set_attribute("gen_ai.prompt.0.content", input_text) + + # Make the API call + response = client.chat.completions.create( + messages=[{"role": "user", "content": input_text}], + model="gpt-4", + temperature=0.7, + ) + + # Add response attributes + output_content = response.choices[0].message.content + span.set_attribute("output.value", output_content) + span.set_attribute("gen_ai.completion.0.role", "assistant") + span.set_attribute("gen_ai.completion.0.content", output_content) + + return output_content +``` + +## Complete Example + +Here's a full working example: + +```python +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from openai import OpenAI +from portkey_ai import createHeaders + +# Step 1: Setup OpenTelemetry +provider = TracerProvider() +trace.set_tracer_provider(provider) + +otlp_exporter = OTLPSpanExporter( + endpoint="https://api.portkey.ai/v1/logs/otel/v1/traces", + headers={"x-portkey-api-key": "YOUR_PORTKEY_API_KEY"} +) + +span_processor = BatchSpanProcessor(otlp_exporter) +provider.add_span_processor(span_processor) + +tracer = trace.get_tracer(__name__) + +# Step 2: Configure Portkey Gateway +client = OpenAI( + api_key="YOUR_OPENAI_API_KEY", + base_url="https://api.portkey.ai/v1", + default_headers=createHeaders( + api_key="YOUR_PORTKEY_API_KEY", + virtual_key="YOUR_VIRTUAL_KEY" + ) +) + +# Step 3: Create instrumented function +def generate_ai_response(input_text): + with tracer.start_as_current_span("OpenAI-Chat-Completion") as span: + # Set input attributes + span.set_attribute("input.value", input_text) + span.set_attribute("model.name", "gpt-4") + span.set_attribute("temperature", 0.7) + + # Make API call + response = client.chat.completions.create( + messages=[{"role": "user", "content": input_text}], + model="gpt-4", + temperature=0.7, + ) + + # Set output attributes + output_content = response.choices[0].message.content + span.set_attribute("output.value", output_content) + + return output_content + +# Step 4: Use the instrumented function +if __name__ == "__main__": + input_text = "Explain the concept of AI in 50 words" + response = generate_ai_response(input_text) + print(response) +``` + +## Next Steps + + + +Set up intelligent routing, fallbacks, and caching + + +Secure your API keys with Portkey's vault + + +Analyze costs, performance, and usage patterns + + +Configure alerts for anomalies and performance issues + + + +--- + +## See Your Traces in Action + +Once configured, navigate to the [Portkey dashboard](https://app.portkey.ai/logs) to see your custom OpenTelemetry traces enhanced with gateway intelligence: + + + OpenTelemetry traces in Portkey + diff --git a/integrations/tracing-providers/phoenix.mdx b/integrations/tracing-providers/phoenix.mdx new file mode 100644 index 00000000..ebb3b981 --- /dev/null +++ b/integrations/tracing-providers/phoenix.mdx @@ -0,0 +1,258 @@ +--- +title: "Phoenix(Arize) Open-Telemetry" +description: "AI observability and debugging platform with OpenInference instrumentation and intelligent gateway routing" +--- + +[Arize Phoenix](https://phoenix.arize.com/) is an open-source AI observability platform designed to help developers debug, monitor, and evaluate LLM applications. Phoenix provides powerful visualization tools and uses OpenInference instrumentation to automatically capture detailed traces of your AI system's behavior. + + +Phoenix's OpenInference instrumentation combined with Portkey's intelligent gateway provides comprehensive debugging capabilities with automatic trace collection, while adding routing optimization and resilience features to your LLM calls. + + +## Why Arize Phoenix + Portkey? + + + +Powerful UI for exploring traces, spans, and debugging LLM behavior + + +Industry-standard semantic conventions for AI/LLM observability + + +Built-in tools for evaluating model performance and behavior + + +Portkey adds caching, fallbacks, and load balancing to every request + + + +## Quick Start + +### Prerequisites + +- Python +- Portkey account with API key +- OpenAI API key (or use Portkey's virtual keys) + +### Step 1: Install Dependencies + +Install the required packages for Phoenix and Portkey integration: + +```bash +pip install arize-phoenix-otel openai openinference-instrumentation-openai portkey-ai +``` + +### Step 2: Configure OpenTelemetry Export + +Set up the environment variables to send traces to Portkey: + +```python +import os + +# Configure Portkey endpoint and authentication +os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://api.portkey.ai/v1/logs/otel" +os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = "x-portkey-api-key=YOUR_PORTKEY_API_KEY" +``` + +### Step 3: Register Phoenix and Instrument OpenAI + +Initialize Phoenix and enable OpenAI instrumentation: + +```python +from phoenix.otel import register +from openinference.instrumentation.openai import OpenAIInstrumentor + +# Configure Phoenix tracer +register(set_global_tracer_provider=False) + +# Instrument OpenAI +OpenAIInstrumentor().instrument() +``` + +### Step 4: Configure Portkey Gateway + +Set up the OpenAI client with Portkey's gateway: + +```python +from openai import OpenAI +from portkey_ai import createHeaders + +# Use Portkey's gateway for intelligent routing +client = OpenAI( + api_key="YOUR_OPENAI_API_KEY", # Or use a dummy value with virtual keys + base_url="https://api.portkey.ai/v1", + default_headers=createHeaders( + api_key="YOUR_PORTKEY_API_KEY", + virtual_key="YOUR_VIRTUAL_KEY" # Optional: Use Portkey's secure key management + ) +) +``` + +### Step 5: Make Instrumented LLM Calls + +Your LLM calls are now automatically traced by Phoenix and enhanced by Portkey: + +```python +# Make calls with automatic tracing +response = client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "How does Phoenix help with AI debugging?", + } + ], + model="gpt-4", + temperature=0.7 +) + +print(response.choices[0].message.content) + +# Phoenix captures: +# - Input/output pairs +# - Token usage +# - Latency metrics +# - Model parameters +# +# Portkey adds: +# - Gateway routing decisions +# - Cache hit/miss data +# - Fallback information +``` + +## Complete Example + +Here's a full working example: + +```python +from phoenix.otel import register +from openinference.instrumentation.openai import OpenAIInstrumentor +import os +from openai import OpenAI +from portkey_ai import createHeaders + +# Step 1: Configure Portkey endpoint +os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://api.portkey.ai/v1/logs/otel" +os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = "x-portkey-api-key=YOUR_PORTKEY_API_KEY" + +# Step 2: Register Phoenix and instrument OpenAI +register(set_global_tracer_provider=False) +OpenAIInstrumentor().instrument() + +# Step 3: Configure Portkey Gateway +client = OpenAI( + api_key="YOUR_OPENAI_API_KEY", + base_url="https://api.portkey.ai/v1", + default_headers=createHeaders( + api_key="YOUR_PORTKEY_API_KEY", + virtual_key="YOUR_VIRTUAL_KEY" + ) +) + +# Step 4: Make instrumented calls +response = client.chat.completions.create( + messages=[ + {"role": "system", "content": "You are a helpful AI assistant."}, + {"role": "user", "content": "Explain how observability helps in production AI systems"} + ], + model="gpt-4", + temperature=0.7 +) + +print(response.choices[0].message.content) +``` + +## OpenInference Instrumentation + +Phoenix uses OpenInference semantic conventions for AI observability: + +### Automatic Capture +- **Messages**: Full conversation history with roles and content +- **Model Info**: Model name, temperature, and other parameters +- **Token Usage**: Input/output token counts for cost tracking +- **Errors**: Detailed error information when requests fail +- **Latency**: End-to-end request timing + +### Supported Providers +Phoenix can instrument multiple LLM providers: +- OpenAI +- Anthropic +- Bedrock +- Vertex AI +- Azure OpenAI +- And more through OpenInference instrumentors + +## Configuration Options + +### Custom Span Attributes + +Add custom attributes to your traces: + +```python +from opentelemetry import trace + +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span("custom_operation") as span: + span.set_attribute("user.id", "user123") + span.set_attribute("session.id", "session456") + + # Your LLM call here + response = client.chat.completions.create(...) +``` + +### Sampling Configuration + +Control trace sampling for production environments: + +```python +from opentelemetry.sdk.trace.sampling import TraceIdRatioBased + +# Sample 10% of traces +register( + set_global_tracer_provider=False, + sampler=TraceIdRatioBased(0.1) +) +``` + +## Troubleshooting + +### Common Issues + + + +Ensure both OTEL_EXPORTER_OTLP_ENDPOINT and OTEL_EXPORTER_OTLP_HEADERS are correctly set + + +Make sure to call OpenAIInstrumentor().instrument() before creating your OpenAI client + + +If using Phoenix UI locally, ensure Phoenix is running and properly configured + + + +## Next Steps + + + +Set up intelligent routing, fallbacks, and caching + + +Secure your API keys with Portkey's vault + + +Analyze costs, performance, and usage patterns + + +Create custom evaluations for your AI system + + + +--- + +## See Your Traces in Action + +Once configured, navigate to the [Portkey dashboard](https://app.portkey.ai/logs) to see your Phoenix instrumentation combined with gateway intelligence: + + + OpenTelemetry traces in Portkey + diff --git a/integrations/tracing-providers/traceloop.mdx b/integrations/tracing-providers/traceloop.mdx new file mode 100644 index 00000000..ebf40ff1 --- /dev/null +++ b/integrations/tracing-providers/traceloop.mdx @@ -0,0 +1,169 @@ +--- +title: "Traceloop (OpenLLMetry)" +--- + +[Traceloop's OpenLLMetry](https://www.traceloop.com/docs/openllmetry/introduction) is an open source project that allows you to easily start monitoring and debugging the execution of your LLM app. + + +Traceloop's non-intrusive instrumentation combined with Portkey's intelligent gateway provides comprehensive observability without modifying your application code, while adding routing intelligence, caching, and failover capabilities. + + +## Why Traceloop + Portkey? + + + +Automatic instrumentation without changing your application code + + +Built on industry-standard OpenTelemetry for maximum compatibility + + +Send traces to Portkey or any OpenTelemetry-compatible backend + + +Portkey adds gateway features like caching, fallbacks, and load balancing + + + +## Quick Start + +### Prerequisites + +- Python +- Portkey account with API key +- OpenAI API key (or use Portkey's virtual keys) + +### Step 1: Install Dependencies + +Install the required packages for Traceloop and Portkey integration: + +```bash +pip install openai traceloop-sdk portkey-ai +``` + +### Step 2: Initialize Traceloop + +Configure Traceloop to send traces to Portkey's OpenTelemetry endpoint: + +```python +from traceloop.sdk import Traceloop + +# Initialize Traceloop with Portkey's endpoint +Traceloop.init( + disable_batch=True, # Process traces immediately + api_endpoint="https://api.portkey.ai/v1/logs/otel", + headers="x-portkey-api-key=YOUR_PORTKEY_API_KEY", + telemetry_enabled=False # Disable Traceloop's own telemetry +) +``` + +### Step 3: Configure Portkey Gateway + +Set up the OpenAI client to use Portkey's intelligent gateway: + +```python +from openai import OpenAI +from portkey_ai import createHeaders + +# Use Portkey's gateway for intelligent routing +client = OpenAI( + api_key="YOUR_OPENAI_API_KEY", # Or use a dummy value with virtual keys + base_url="https://api.portkey.ai/v1", + default_headers=createHeaders( + api_key="YOUR_PORTKEY_API_KEY", + virtual_key="YOUR_VIRTUAL_KEY" # Optional: Use Portkey's secure key management + ) +) +``` + +### Step 4: Make Instrumented LLM Calls + +Your LLM calls are now automatically traced by Traceloop and enhanced by Portkey: + +```python +# Make calls through Portkey's gateway +# Traceloop automatically instruments the call +response = client.chat.completions.create( + model="gpt-4", + messages=[ + { + "role": "user", + "content": "Explain the benefits of OpenTelemetry for LLM applications" + } + ], + temperature=0.7 +) + +print(response.choices[0].message.content) + +# You now get: +# 1. Automatic, non-intrusive tracing from Traceloop +# 2. Gateway features from Portkey (caching, fallbacks, routing) +# 3. Combined insights in Portkey's dashboard +``` + +## Complete Example + +Here's a full example bringing everything together: + +```python +from traceloop.sdk import Traceloop +from openai import OpenAI +from portkey_ai import createHeaders + +# Step 1: Initialize Traceloop with Portkey endpoint +Traceloop.init( + disable_batch=True, + api_endpoint="https://api.portkey.ai/v1/logs/otel", + headers="x-portkey-api-key=YOUR_PORTKEY_API_KEY", + telemetry_enabled=False +) + +# Step 2: Configure Portkey Gateway +client = OpenAI( + api_key="YOUR_OPENAI_API_KEY", + base_url="https://api.portkey.ai/v1", + default_headers=createHeaders( + api_key="YOUR_PORTKEY_API_KEY", + virtual_key="YOUR_VIRTUAL_KEY" + ) +) + +# Step 3: Make instrumented calls +response = client.chat.completions.create( + model="gpt-4", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What makes observability important for production AI?"} + ] +) + +print(response.choices[0].message.content) +``` + +## Next Steps + + + +Set up intelligent routing, fallbacks, and caching + + +Secure your API keys with Portkey's vault + + +Analyze costs, performance, and usage patterns + + +Configure alerts for anomalies and performance issues + + + +--- + +## See Your Traces in Action + +Once configured, navigate to the [Portkey dashboard](https://app.portkey.ai/logs) to see your Traceloop instrumentation combined with gateway intelligence: + + + OpenTelemetry traces in Portkey + diff --git a/product/observability.mdx b/product/observability.mdx index d7884765..450e2a3f 100644 --- a/product/observability.mdx +++ b/product/observability.mdx @@ -1,5 +1,5 @@ --- -title: "Observability (OpenTelemetry)" +title: "Observability" description: "Gain real-time insights, track key metrics, and streamline debugging with our comprehensive observability suite." --- diff --git a/product/observability/opentelemetry.mdx b/product/observability/opentelemetry.mdx new file mode 100644 index 00000000..3d37d707 --- /dev/null +++ b/product/observability/opentelemetry.mdx @@ -0,0 +1,148 @@ +--- +title: OpenTelemetry for LLM Observability +description: Leverage OpenTelemetry with Portkey for comprehensive LLM application observability, combining gateway insights with full-stack telemetry. +--- + +[OpenTelemetry (OTel)](https://opentelemetry.io/) is a Cloud Native Computing Foundation (CNCF) open-source framework. It provides a standardized way to collect, process, and export telemetry data (traces, metrics, and logs) from your applications. This is vital for monitoring performance, debugging issues, and understanding complex system behavior. + +Many popular AI development tools and SDKs, like the Vercel AI SDK, LlamaIndex, OpenLLMetry, and Logfire, utilize OpenTelemetry for observability. Portkey now embraces OTel, allowing you to send telemetry data from any OTel-compatible source directly into Portkey's observability platform. + +## The Portkey Advantage: Gateway Intelligence Meets Full-Stack Observability + +Portkey's strength lies in its unique combination of an intelligent **LLM Gateway** and a powerful **Observability** backend. + +- **Enriched Data from the Gateway:** Your LLM calls routed through the Portkey Gateway are automatically enriched with deep contextual information—virtual keys, caching status, retry attempts, prompt versions, and more. This data flows seamlessly into Portkey Observability. + +- **Holistic View with OpenTelemetry:** By adding an OTel endpoint, Portkey now ingests traces and logs from your *entire* application stack, not just the LLM calls. Instrument your frontend, backend services, databases, and any other component with OTel, and send that data to Portkey. + +This combination provides an unparalleled, end-to-end view of your LLM application's performance, cost, and behavior. You can correlate application-level events with specific LLM interactions managed by the Portkey Gateway. + +## How OpenTelemetry Data Flows to Portkey + +The following diagram illustrates how telemetry data from your instrumented applications and the Portkey Gateway itself is consolidated within Portkey Observability: + +```mermaid +graph LR + subgraph Your Application Stack + A[Application Code] + LIB[OTel Libraries e.g. Logfire] + A -- Instruments with --> LIB + end + + PG[Portkey Gateway] + PK_OTEL[Portkey OTel Endpoint api.portkey.ai/v1/otel] + PK_OBS[Portkey Observability Backend] + + LIB -- Direct Export --> PK_OTEL + A -- LLM Calls via --> PG + PG -- Rich Telemetry & Logs --> PK_OBS + PK_OTEL -- Ingests Data --> PK_OBS +```` + +**Explanation:** + +1. Your **Application Code** is instrumented using **OTel Instrumentation Libraries**. +2. This telemetry data (traces, logs) can be sent to the **Portkey OTel Backend Endpoint**. +3. Simultaneously, LLM calls made via the **Portkey Gateway** generate their own rich, structured telemetry. +4. All this data is consolidated in the **Portkey Observability Stack**, giving you a unified view. + +## Setting Up Portkey as an OpenTelemetry Backend + +To send your OpenTelemetry data to Portkey, configure your OTel exporter to point to Portkey's OTLP endpoint and provide your Portkey API Key for authentication. + +**Key Environment Variables:** + +```bash +# Portkey's OTLP HTTP Endpoint for traces and logs +OTEL_EXPORTER_OTLP_ENDPOINT="https://api.portkey.ai/v1/otel" +# Your Portkey API Key (ensure it's a Server Key) +OTEL_EXPORTER_OTLP_HEADERS="x-portkey-api-key=YOUR_PORTKEY_API_KEY" +``` + + + Replace `YOUR_PORTKEY_API_KEY` with your actual Portkey API Key found in your Portkey Dashboard. + + + +**Signal-Specific Endpoints:** + If your OTel collector or SDK strictly requires signal-specific endpoints: + +**For Traces:** +`OTEL_EXPORTER_OTLP_TRACES_ENDPOINT="https://api.portkey.ai/v1/otel/v1/traces"` + +**For Logs:** +`OTEL_EXPORTER_OTLP_LOGS_ENDPOINT="https://api.portkey.ai/v1/otel/v1/logs"` + + + +Remember to include the `OTEL_EXPORTER_OTLP_HEADERS` with your API key for these as well. + + +## Viewing Traces + +Once configured, your OpenTelemetry traces appear in the Portkey dashboard with full visibility for your AI application: + + OpenTelemetry traces in Portkey + + + +## Why Use OpenTelemetry with Portkey? + +Portkey's OTel backend is compatible with any OTel-compliant library. Here are a few popular ones for GenAI and general application observability: + + + + Works with any programming language that supports OpenTelemetry - Python, JavaScript, Java, Go, and more + + + Compatible with all major LLM frameworks through their OTel instrumentation + + + Many libraries offer auto-instrumentation that requires no changes to your application code + + + Built on industry-standard protocols ensuring long-term compatibility + + + + +Navigate to the [Logs page](https://app.portkey.ai/logs) to view your traces, filter by various attributes, and drill down into specific requests. + + + + + +## Getting Started + + + + Sign up for [Portkey](https://app.portkey.ai) and grab your API key from the settings page + + + Pick from our [supported integrations](#supported-integrations) based on your stack + + + Point your OTel exporter to `https://api.portkey.ai/v1/logs/otel` with your API key + + + Run your application and view traces in the Portkey dashboard + + + +## Next Steps + + + + Browse all available OpenTelemetry integrations + + + Learn how to analyze traces in Portkey + + + Discover Portkey's native auto-instrumentation features + + + Join our Discord community for support + + +``` diff --git a/product/observability/opentelemetry/list-of-supported-otel-instrumenters.mdx b/product/observability/opentelemetry/list-of-supported-otel-instrumenters.mdx new file mode 100644 index 00000000..81bee552 --- /dev/null +++ b/product/observability/opentelemetry/list-of-supported-otel-instrumenters.mdx @@ -0,0 +1,16 @@ +--- +title: "Supported OTel Libraries" +--- + +Portkey works with any OpenTelemetry-compatible instrumentation. Here are some popular options: + + + + + + + + + + +