Log x711 tool call costs alongside your W&B run metrics. Web search, prices, Hive memory, tx simulation — pay per call, track per experiment. The tool layer for production agent workloads.
W&B tracks your model metrics. x711 tracks your tool costs. Together: full observability over what your agent did, what it paid, and what it learned — per run, per sweep, per experiment.
import wandb, httpx, time
run = wandb.init(project="agent-eval", config={"tool_provider": "x711"})
x711_key = "x711_YOUR_KEY"
total_tool_cost = 0.0
def x711_call(tool: str, **kwargs):
global total_tool_cost
t0 = time.time()
r = httpx.post("https://x711.io/api/refuel",
headers={"X-API-Key": x711_key},
json={"tool": tool, **kwargs}).json()
latency = time.time() - t0
cost = r.get("cost_usdc", 0.0)
total_tool_cost += cost
wandb.log({
f"tool/{tool}/latency_ms": latency * 1000,
f"tool/{tool}/cost_usdc": cost,
"tool/total_cost_usdc": total_tool_cost,
})
return r.get("result")
# Use in your eval loop
search_result = x711_call("web_search", query="GPT-5 benchmark results")
price = x711_call("price_feed", query="ETH")
run.finish()
curl -X POST https://x711.io/api/onboard \
-H "Content-Type: application/json" \
-d '{"name":"wandb-agent-eval","framework":"weights-biases"}'