Agent API Reference

Complete API reference for the Agent class and related components.

Import Statement

from aakit import Agent, AgentConfig, AgentMetrics

Agent Class

The main Agent class for creating AI agents

__init__

Agent( name: str, instruction: str, model: Union[str, List[str]] = "gpt-4", tools: List[Union[Callable, str, Agent]] = None, memory: Union[str, MemoryBackend] = None, reasoning: Union[str, ReasoningPattern] = "simple", config: AgentConfig = None, **kwargs )

Initialize a new Agent instance

Parameters

name(str)- Unique identifier for the agent
instruction(str)- System prompt defining agent behavior
model(Union[str, List[str]])- LLM model(s) to use
tools(List)- Functions, URLs, or other agents as tools
memory(Union[str, MemoryBackend])- Memory backend configuration
reasoning(Union[str, ReasoningPattern])- Reasoning pattern to use
config(AgentConfig)- Additional configuration options

Example

python
agent = Agent(
    name="assistant",
    instruction="You are a helpful AI assistant",
    model="gpt-4",
    tools=[web_search, calculator],
    memory="redis"
)

chat

def chat(message: str, session_id: str = None, **kwargs) -> str

Send a message to the agent and get a response (synchronous)

Parameters

message(str)- The message to send to the agent
session_id(str)- Optional session ID for conversation context
**kwargs(dict)- Additional parameters passed to the model

Example

python
# Synchronous - no async/await needed!
response = agent.chat(
    "What's the weather like?",
    session_id="user_123"
)

achat

async def achat(message: str, session_id: str = None, **kwargs) -> str

Send a message to the agent and get a response (asynchronous)

Parameters

message(str)- The message to send to the agent
session_id(str)- Optional session ID for conversation context
**kwargs(dict)- Additional parameters passed to the model

Example

python
# Asynchronous version
response = await agent.achat(
    "What's the weather like?",
    session_id="user_123"
)

stream_chat

def stream_chat(message: str, session_id: str = None, **kwargs) -> Iterator[str]

Stream responses token by token (synchronous)

Parameters

message(str)- The message to send to the agent
session_id(str)- Optional session ID for conversation context
**kwargs(dict)- Additional parameters passed to the model

Example

python
# Synchronous streaming
for chunk in agent.stream_chat("Tell me a story"):
    print(chunk, end="")

astream_chat

async def astream_chat(message: str, session_id: str = None, **kwargs) -> AsyncIterator[str]

Stream responses token by token (asynchronous)

Parameters

message(str)- The message to send to the agent
session_id(str)- Optional session ID for conversation context
**kwargs(dict)- Additional parameters passed to the model

Example

python
# Asynchronous streaming
async for chunk in agent.astream_chat("Tell me a story"):
    print(chunk, end="")

add_tool

def add_tool(tool: Union[Callable, str, Agent]) -> None

Add a tool to the agent dynamically

Parameters

tool(Union[Callable, str, Agent])- Function, MCP URL, or another agent

Example

python
def new_tool(query: str) -> str:
    return f"Processed: {query}"

agent.add_tool(new_tool)

serve_mcp

def serve_mcp( port: int = 8080, name: str = None, description: str = None, **kwargs ) -> None

Serve the agent as an MCP server

Parameters

port(int)- Port to serve on (default: 8080)
name(str)- Server name for MCP metadata
description(str)- Server description
**kwargs(dict)- Additional server configuration

Example

python
agent.serve_mcp(
    port=8080,
    name="My Assistant",
    description="AI assistant with web search"
)

get_metrics

def get_metrics() -> AgentMetrics

Get performance metrics for the agent

Example

python
metrics = agent.get_metrics()
print(f"Total requests: {metrics.total_requests}")
print(f"Average latency: {metrics.avg_latency}ms")

AgentConfig Class

Configuration options for agents

__init__

AgentConfig( temperature: float = 0.7, max_tokens: int = None, timeout: int = 30, retry_max: int = 3, retry_delay: float = 1.0, cache_ttl: int = 3600, rate_limit: RateLimiter = None, **kwargs )

Configure agent behavior

Parameters

temperature(float)- Sampling temperature (0-2)
max_tokens(int)- Maximum response length
timeout(int)- Request timeout in seconds
retry_max(int)- Maximum retry attempts
retry_delay(float)- Initial retry delay
cache_ttl(int)- Cache time-to-live in seconds
rate_limit(RateLimiter)- Rate limiting configuration

Example

python
config = AgentConfig(
    temperature=0.8,
    max_tokens=2000,
    timeout=60,
    cache_ttl=7200
)

Memory Methods

Methods for managing agent memory

clear_memory

async def clear_memory(session_id: str = None) -> None

Clear conversation memory

Parameters

session_id(str)- Session to clear (None clears all)

Example

python
await agent.clear_memory(session_id="user_123")

get_history

async def get_history(session_id: str) -> List[Message]

Get conversation history

Parameters

session_id(str)- Session ID to retrieve history for

Example

python
history = await agent.get_history("user_123")
for msg in history:
    print(f"{msg.role}: {msg.content}")

add_to_memory

async def add_to_memory( session_id: str, role: str, content: str ) -> None

Add a message to memory manually

Parameters

session_id(str)- Session ID
role(str)- Message role (user/assistant/system)
content(str)- Message content

Example

python
await agent.add_to_memory(
    "user_123",
    "system",
    "User prefers concise answers"
)

Complete Example

python
from aakit import Agent, AgentConfig, tool
import asyncio

# Define a custom tool
@tool(description="Search for information on the web")
def web_search(query: str) -> str:
    # Implementation here
    return f"Search results for: {query}"

# Configure the agent
config = AgentConfig(
    temperature=0.7,
    max_tokens=2000,
    timeout=30,
    cache_ttl=3600
)

# Create the agent
agent = Agent(
    name="research_assistant",
    instruction="""You are a research assistant that helps users find 
    and analyze information. Be thorough and cite sources.""",
    model=["gpt-4", "claude-3-opus"],  # Fallback chain
    tools=[web_search],
    memory="redis",
    reasoning="chain_of_thought",
    config=config
)

# Use the agent
async def main():
    # NEW: Synchronous API - no async/await needed!
    # Simple chat
    response = agent.chat("What is quantum computing?")
    print(response)
    
    # Streaming response
    print("\nStreaming response:")
    for chunk in agent.stream_chat("Explain the basics of machine learning"):
        print(chunk, end="", flush=True)
    
    # Get metrics
    metrics = agent.get_metrics()
    print(f"\n\nTotal tokens used: {metrics.total_tokens}")
    
    # Async version (when you need it)
    response = await agent.achat("What is AI?")
    async for chunk in agent.astream_chat("Write a poem"):
        print(chunk, end="")

# Run the example
asyncio.run(main())

# Serve as MCP server
if __name__ == "__main__":
    agent.serve_mcp(
        port=8080,
        name="Research Assistant",
        description="AI assistant with web search capabilities"
    )

Type Definitions

Message

class Message:
  role: str # "user", "assistant", "system"
  content: str
  timestamp: datetime
  metadata: dict

AgentMetrics

class AgentMetrics:
  total_requests: int
  total_tokens: int
  avg_latency: float
  error_rate: float
  cache_hit_rate: float

Environment Variables

  • OPENAI_API_KEY - OpenAI API key for GPT models
  • ANTHROPIC_API_KEY - Anthropic API key for Claude models
  • AGENTZ_LOG_LEVEL - Logging level (DEBUG, INFO, WARNING, ERROR)
  • AGENTZ_CACHE_BACKEND - Default cache backend

Next Steps

Continue exploring the API reference for tools and tool creation.

Continue to Tools API →