Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
15 commits
Select commit Hold shift + click to select a range
83b8e35
[NET-447] feat: Add root instrument filter processor and default inst…
akash-vijay-kv Apr 20, 2026
fb273fd
[NET-447] refactor: Cleanup and reformat claude agent sdk instrumenta…
akash-vijay-kv Apr 20, 2026
e6be573
[NET-447] fix: Bug in allowing netra spawned spans from root instrume…
akash-vijay-kv Mar 23, 2026
acd1deb
[NET-477] chore: refactor CrewAI instrumentation naming
akash-vijay-kv Mar 26, 2026
642d848
[NET-447] feat: Add dict support in initialization headers (#241)
akash-vijay-kv Mar 31, 2026
8865103
[NET-576] fix: Add support for dropping auto metrics (#242)
akash-vijay-kv Mar 31, 2026
a977658
[NET-497] feat: Add input and output attributes for spans
akash-vijay-kv Apr 20, 2026
0d17f73
feat: add utility to fetch trace id from current span context (#244)
muhammedrazalak Apr 8, 2026
702920f
[NET-624] feat: Add a centralized span processor to manage root span …
akash-vijay-kv Apr 20, 2026
4cf055b
[NET-654] fix: Push span entity to respective stack based on span typ…
akash-vijay-kv Apr 10, 2026
743db68
[NET-701] fix: Cleanup and fix custom ADK instrumentation (#261)
Nithish-KV Apr 24, 2026
5d7931a
fix: Remove duplicate __all__ assignments and dead code (#254)
akash-vijay-kv Apr 24, 2026
34ad477
fix: Replace import-time evaluation of start time with a lambda (#255)
akash-vijay-kv Apr 24, 2026
21eddc3
fix: Update SessionManager and span maps as thread-safe (#259)
akash-vijay-kv Apr 24, 2026
5902ce1
feat: Introduce base HTTP client provider and refactor streaming span…
akash-vijay-kv Apr 24, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,11 @@ All notable changes to this project will be documented in this file.
The format is based on Keep a Changelog and this project adheres to Semantic Versioning.


## [0.1.82] - 2026-04-21

- Refine custom ADK instrumentation to produce a cleaner trace hierarchy, include sufficient metadata, and eliminate duplicate spans.


## [0.1.81] - 2026-04-16

- Fix root span attachment issue in tracer provider
Expand Down Expand Up @@ -239,4 +244,4 @@ The format is based on Keep a Changelog and this project adheres to Semantic Ver

- Added utility to set input and output data for any active span in a trace

[0.1.81]: https://github.com/KeyValueSoftwareSystems/netra-sdk-py/tree/main
[0.1.82]: https://github.com/KeyValueSoftwareSystems/netra-sdk-py/tree/main
25 changes: 9 additions & 16 deletions examples/07_custom_metrics/custom_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
from typing import Any, Dict, List

from dotenv import load_dotenv

from opentelemetry.metrics import Observation

from netra import Netra
Expand All @@ -44,6 +43,7 @@
# 1. Initialise the SDK with metrics enabled
# ---------------------------------------------------------------------------


def init_sdk() -> None:
Netra.init(
app_name="custom-metrics-example",
Expand All @@ -61,6 +61,7 @@ def init_sdk() -> None:
# 2. Create instruments from a named Meter
# ---------------------------------------------------------------------------


def create_instruments():
"""Return a dict of OTel instruments scoped to an 'ai_service' meter."""
meter = Netra.get_meter("ai_service")
Expand Down Expand Up @@ -195,9 +196,7 @@ def run_inference(
results.append({"prompt": prompt[:40], "error": str(exc)})

successes = sum(1 for r in results if "error" not in r)
logger.info(
"Inference workflow complete: %d/%d succeeded", successes, len(results)
)
logger.info("Inference workflow complete: %d/%d succeeded", successes, len(results))
return results


Expand Down Expand Up @@ -244,6 +243,7 @@ def _gpu_utilization_callback(_):
# 5. Async example — concurrent requests with shared instruments
# ---------------------------------------------------------------------------


@task(name="async_llm_call") # type: ignore[arg-type]
async def async_llm_call(
prompt: str,
Expand Down Expand Up @@ -285,10 +285,7 @@ async def run_async_inference(
logger.info("Starting async inference for %d prompts", len(prompts))
instruments["queue_depth"].add(len(prompts), {"source": "async_batch"})

tasks = [
async_llm_call(prompt, select_model(prompt), instruments) # type: ignore[misc]
for prompt in prompts
]
tasks = [async_llm_call(prompt, select_model(prompt), instruments) for prompt in prompts] # type: ignore[misc]
results = await asyncio.gather(*tasks, return_exceptions=True)

ok = [r for r in results if isinstance(r, dict)]
Expand All @@ -300,6 +297,7 @@ async def run_async_inference(
# 6. Dedicated-meter example — separate meter per domain
# ---------------------------------------------------------------------------


def demonstrate_multiple_meters(instruments: Dict[str, Any]) -> None:
"""Show that different parts of an app can own independent meters."""
billing_meter = Netra.get_meter("billing_service")
Expand Down Expand Up @@ -333,6 +331,7 @@ def demonstrate_multiple_meters(instruments: Dict[str, Any]) -> None:
# Main
# ---------------------------------------------------------------------------


def main() -> None:
print("=" * 65)
print(" Netra SDK — Custom Metrics Example")
Expand Down Expand Up @@ -367,18 +366,12 @@ def main() -> None:
# --- Async fan-out ---
print("\n--- Async concurrent inference ---")
async_prompts = [f"Async prompt #{i}: tell me something interesting" for i in range(8)]
async_results = asyncio.run(
run_async_inference(async_prompts, instruments) # type: ignore[misc]
)
async_results = asyncio.run(run_async_inference(async_prompts, instruments)) # type: ignore[misc]
for r in async_results:
if "error" in r:
print(f" FAIL: {r}")
else:
print(
f" {r['model']:>15} "
f"tokens={r['tokens']:<5} "
f"latency={r['latency_ms']}ms"
)
print(f" {r['model']:>15} " f"tokens={r['tokens']:<5} " f"latency={r['latency_ms']}ms")

# --- Multiple meters ---
print("\n--- Multiple independent meters ---")
Expand Down
136 changes: 114 additions & 22 deletions netra/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from netra.dashboard import Dashboard
from netra.evaluation import Evaluation
from netra.instrumentation import init_instrumentations
from netra.instrumentation.instruments import NetraInstruments
from netra.instrumentation.instruments import DEFAULT_INSTRUMENTS_FOR_ROOT, NetraInstruments
from netra.logging_utils import configure_package_logging
from netra.meter import MetricsSetup
from netra.meter import get_meter as _get_meter
Expand All @@ -23,13 +23,6 @@
from netra.tracer import Tracer
from netra.usage import Usage

__all__ = [
"Netra",
"UsageModel",
"ActionModel",
"Prompts",
]

logger = logging.getLogger(__name__)


Expand All @@ -46,6 +39,12 @@ class Netra:
_root_ctx_token = None
_metrics_enabled = False

evaluation: Optional["Evaluation"] = None
usage: Optional["Usage"] = None
dashboard: Optional["Dashboard"] = None
prompts: Optional["Prompts"] = None
simulation: Optional["Simulation"] = None

@classmethod
def is_initialized(cls) -> bool:
"""
Expand All @@ -61,7 +60,7 @@ def is_initialized(cls) -> bool:
def init(
cls,
app_name: Optional[str] = None,
headers: Optional[str] = None,
headers: Optional[str | Dict[str, str]] = None,
disable_batch: Optional[bool] = None,
trace_content: Optional[bool] = None,
debug_mode: Optional[bool] = None,
Expand All @@ -75,6 +74,7 @@ def init(
enable_metrics: Optional[bool] = None,
metrics_export_interval_ms: Optional[int] = None,
export_auto_metrics: Optional[bool] = None,
root_instruments: Optional[Set[NetraInstruments]] = None,
) -> None:
"""
Thread-safe initialization of Netra.
Expand All @@ -95,6 +95,12 @@ def init(
enable_metrics: Whether to enable OTLP custom metrics export (default: False)
metrics_export_interval_ms: Metrics push interval in milliseconds (default: 60000)
export_auto_metrics: Whether to export OTel auto-instrumented metrics (default: False)
root_instruments: Set of instruments allowed to produce root-level
spans. When a root span is blocked, its entire subtree is
discarded. Resolution priority:
1. Explicit ``root_instruments`` value if provided.
2. The ``instruments`` value if provided (but ``root_instruments`` is not).
3. ``DEFAULT_INSTRUMENTS_FOR_ROOT`` if neither is provided.

Returns:
None
Expand Down Expand Up @@ -124,8 +130,17 @@ def init(
# Configure logging based on debug mode
configure_package_logging(debug_mode=cfg.debug_mode)

# Resolve root_instruments → set of instrumentation-name strings.
resolved_root: Optional[Set[str]] = None
if root_instruments is not None:
resolved_root = {m.value for m in root_instruments}
elif instruments is not None:
resolved_root = {m.value for m in instruments}
else:
resolved_root = {m.value for m in DEFAULT_INSTRUMENTS_FOR_ROOT}

# Initialize tracer (OTLP exporter, span processor, resource)
Tracer(cfg)
Tracer(cfg, root_instrument_names=resolved_root)

# Initialize metrics pipeline when explicitly enabled
if cfg.enable_metrics:
Expand All @@ -137,38 +152,38 @@ def init(

# Initialize evaluation client and expose as class attribute
try:
cls.evaluation = Evaluation(cfg) # type:ignore[attr-defined]
cls.evaluation = Evaluation(cfg)
except Exception as e:
logger.warning("Failed to initialize evaluation client: %s", e, exc_info=True)
cls.evaluation = None # type:ignore[attr-defined]
cls.evaluation = None

# Initialize usage client and expose as class attribute
try:
cls.usage = Usage(cfg) # type:ignore[attr-defined]
cls.usage = Usage(cfg)
except Exception as e:
logger.warning("Failed to initialize usage client: %s", e, exc_info=True)
cls.usage = None # type:ignore[attr-defined]
cls.usage = None

# Initialize dashboard client and expose as class attribute
try:
cls.dashboard = Dashboard(cfg) # type:ignore[attr-defined]
cls.dashboard = Dashboard(cfg)
except Exception as e:
logger.warning("Failed to initialize dashboard client: %s", e, exc_info=True)
cls.dashboard = None # type:ignore[attr-defined]
cls.dashboard = None

# Initialize prompts client and expose as class attribute
try:
cls.prompts = Prompts(cfg) # type:ignore[attr-defined]
cls.prompts = Prompts(cfg)
except Exception as e:
logger.warning("Failed to initialize prompts client: %s", e, exc_info=True)
cls.prompts = None # type:ignore[attr-defined]
cls.prompts = None

# Initialize simulation client and expose as class attribute
try:
cls.simulation = Simulation(cfg) # type:ignore[attr-defined]
cls.simulation = Simulation(cfg)
except Exception as e:
logger.warning("Failed to initialize simulation client: %s", e, exc_info=True)
cls.simulation = None # type:ignore[attr-defined]
cls.simulation = None

# Instrument all supported modules
init_instrumentations(
Expand Down Expand Up @@ -241,6 +256,33 @@ def shutdown(cls) -> None:
except Exception:
pass

# Close HTTP clients to release connection-pool resources
if cls.evaluation is not None:
try:
cls.evaluation._client.close()
except Exception:
pass
if cls.usage is not None:
try:
cls.usage._client.close()
except Exception:
pass
if cls.dashboard is not None:
try:
cls.dashboard._client.close()
except Exception:
pass
if cls.prompts is not None:
try:
cls.prompts._client.close()
except Exception:
pass
if cls.simulation is not None:
try:
cls.simulation._client.close()
except Exception:
pass

@classmethod
def get_meter(cls, name: str = "netra", version: Optional[str] = None) -> otel_metrics.Meter:
"""
Expand Down Expand Up @@ -370,12 +412,52 @@ def add_conversation(cls, conversation_type: ConversationType, role: str, conten
"""
SessionManager.add_conversation(conversation_type=conversation_type, role=role, content=content)

@classmethod
def set_input(cls, value: Any) -> None:
"""
Set the input attribute on the current active span.

Args:
value: The input value to record
"""
SessionManager.set_input(value)

@classmethod
def set_output(cls, value: Any) -> None:
"""
Set the output attribute on the current active span.

Args:
value: The output value to record
"""
SessionManager.set_output(value)

@classmethod
def set_root_input(cls, value: Any) -> None:
"""
Set the input attribute on the root span of the current trace.

Args:
value: The input value to record
"""
SessionManager.set_root_input(value)

@classmethod
def set_root_output(cls, value: Any) -> None:
"""
Set the output attribute on the root span of the current trace.

Args:
value: The output value to record
"""
SessionManager.set_root_output(value)

@classmethod
def start_span(
cls,
name: str,
attributes: Optional[Dict[str, str]] = None,
module_name: str = "combat_sdk",
module_name: str = Config.SDK_NAME,
as_type: Optional[SpanType] = SpanType.SPAN,
) -> SpanWrapper:
"""
Expand All @@ -392,5 +474,15 @@ def start_span(
"""
return SpanWrapper(name, attributes, module_name, as_type=as_type)

@classmethod
def get_trace_id(cls) -> Optional[str]:
"""
Return the trace ID of the currently active span.

Returns:
str: 32-character lowercase hex trace ID, or None if no active span exists.
"""
return SessionManager.get_trace_id()


__all__ = ["Netra", "UsageModel", "ActionModel", "SpanType", "EvaluationScore", "Prompts", "ConversationType"]
__all__ = ["Netra", "UsageModel", "ActionModel", "SpanType", "Prompts", "ConversationType"]
Loading