Skip to main content
Advanced Rust SDK usage for fine-grained control over agent construction, provider configuration, and sub-agent management.

AgentBuilder

For most use cases, prefer SessionService via build_ephemeral_service() (see overview). AgentBuilder is used internally by AgentFactory::build_agent(). Direct usage is only needed when bypassing the session service entirely.
use meerkat::AgentBuilder;

let agent = AgentBuilder::new()
    .model("claude-sonnet-4-5")
    .system_prompt("You are a helpful assistant.")
    .max_tokens_per_turn(4096)
    .temperature(0.7)
    .build(llm_client, tool_dispatcher, session_store);
MethodDescriptionDefault
model(name)Set the model identifier"claude-opus-4-6"
system_prompt(prompt)Set the system promptNone
max_tokens_per_turn(n)Max tokens per LLM call8192
temperature(t)Sampling temperature (0.0-1.0)None (model default)
budget(limits)Set resource limitsUnlimited
retry_policy(policy)Configure retry behavior3 retries with backoff
resume_session(session)Resume from existing sessionNew session
provider_params(json)Provider-specific parametersNone
with_hook_engine(engine)Attach a hook engineNone
with_hook_run_overrides(overrides)Run-scoped hook overridesEmpty

Providers

Built-in clients for major LLM providers:
use meerkat::AnthropicClient;

let client = AnthropicClient::new("sk-ant-...".to_string());
let client = AnthropicClient::from_env()?;
let client = AnthropicClient::new("key".to_string())
    .with_base_url("https://my-proxy.example.com".to_string());

Provider parameters

Pass provider-specific options via AgentBuildConfig:
let mut build_config = AgentBuildConfig::new("claude-sonnet-4-5".into());
build_config.provider_params = Some(json!({"thinking_budget": 10000}));
use async_trait::async_trait;
use meerkat::{AgentLlmClient, AgentError, LlmStreamResult, Message, ToolDef, StopReason, Usage};
use serde_json::Value;

struct MyCustomClient { api_key: String }

#[async_trait]
impl AgentLlmClient for MyCustomClient {
    async fn stream_response(
        &self,
        messages: &[Message],
        tools: &[ToolDef],
        max_tokens: u32,
        temperature: Option<f32>,
        provider_params: Option<&Value>,
    ) -> Result<LlmStreamResult, AgentError> {
        // Call your LLM API here
        Ok(LlmStreamResult {
            content: "Response text".to_string(),
            tool_calls: vec![],
            stop_reason: StopReason::EndTurn,
            usage: Usage { input_tokens: 10, output_tokens: 20, ..Default::default() },
        })
    }

    fn provider(&self) -> &'static str { "my-provider" }
}

Budget configuration

use meerkat::BudgetLimits;
use std::time::Duration;

let budget = BudgetLimits::default()
    .with_max_tokens(100_000)
    .with_max_duration(Duration::from_secs(300))
    .with_max_tool_calls(50);

let mut build_config = AgentBuildConfig::new("claude-sonnet-4-5".into());
build_config.budget = Some(budget);

Retry configuration

use meerkat::RetryPolicy;
use std::time::Duration;

let retry = RetryPolicy {
    max_retries: 5,
    initial_delay: Duration::from_millis(500),
    max_delay: Duration::from_secs(30),
    multiplier: 2.0,
};

Sub-agent spawning

Spawn parallel sub-agents for concurrent work:
use meerkat::{SpawnSpec, ContextStrategy, ToolAccessPolicy, BudgetLimits};

let spec = SpawnSpec {
    prompt: "Analyze this data...".to_string(),
    system_prompt: Some("You are a data analyst.".to_string()),
    context: ContextStrategy::LastN(5),
    tool_access: ToolAccessPolicy::AllowList(vec!["read_file".to_string()]),
    budget: BudgetLimits::default().with_max_tokens(10000),
};

let op_id = agent.spawn(spec).await?;
let results = agent.collect_sub_agent_results().await;
use meerkat::{ForkBranch, ForkBudgetPolicy, ToolAccessPolicy};

let branches = vec![
    ForkBranch {
        name: "approach_a".to_string(),
        prompt: "Try approach A...".to_string(),
        tool_access: None,
    },
    ForkBranch {
        name: "approach_b".to_string(),
        prompt: "Try approach B...".to_string(),
        tool_access: Some(ToolAccessPolicy::DenyList(vec!["dangerous_tool".to_string()])),
    },
];

let op_ids = agent.fork(branches, ForkBudgetPolicy::Split).await?;

Hook helpers

use meerkat::{create_default_hook_engine, resolve_layered_hooks_config};

let config = meerkat::Config::load().await?;
let cwd = std::env::current_dir()?;

// Called internally by AgentFactory::build_agent()
let layered_hooks = resolve_layered_hooks_config(&cwd, &config).await;
let hook_engine = create_default_hook_engine(layered_hooks);

Complete example

use meerkat::{AgentFactory, Config, build_ephemeral_service};
use meerkat::service::{CreateSessionRequest, StartTurnRequest, SessionService};

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    let config = Config::load().await?;
    let factory = AgentFactory::new(std::env::current_dir()?);
    let service = build_ephemeral_service(factory, config, 64);

    let result = service.create_session(CreateSessionRequest {
        model: "claude-sonnet-4-5".into(),
        prompt: "What is 25 * 17?".into(),
        system_prompt: Some("You are a helpful math assistant.".into()),
        max_tokens: Some(2048),
        event_tx: None,
        host_mode: false,
    }).await?;

    println!("Response: {}", result.text);

    let result = service.start_turn(&result.session_id, StartTurnRequest {
        prompt: "Now divide that result by 5.".into(),
        event_tx: None,
        host_mode: false,
    }).await?;

    println!("Follow-up: {}", result.text);

    let view = service.read(&result.session_id).await?;
    println!("Total tokens: {}", view.billing.total_tokens);

    Ok(())
}

Python and TypeScript SDKs

Both communicate with a local rkat rpc subprocess over JSON-RPC 2.0 — no native bindings required.

See also