Agent API Reference
Complete API reference for the Agent class and related components.
Import Statement
from aakit import Agent, AgentConfig, AgentMetricsAgent Class
The main Agent class for creating AI agents
__init__
Agent(
name: str,
instruction: str,
model: Union[str, List[str]] = "gpt-4",
tools: List[Union[Callable, str, Agent]] = None,
memory: Union[str, MemoryBackend] = None,
reasoning: Union[str, ReasoningPattern] = "simple",
config: AgentConfig = None,
**kwargs
)Initialize a new Agent instance
Parameters
name(str)- Unique identifier for the agentinstruction(str)- System prompt defining agent behaviormodel(Union[str, List[str]])- LLM model(s) to usetools(List)- Functions, URLs, or other agents as toolsmemory(Union[str, MemoryBackend])- Memory backend configurationreasoning(Union[str, ReasoningPattern])- Reasoning pattern to useconfig(AgentConfig)- Additional configuration optionsExample
python
agent = Agent(
name="assistant",
instruction="You are a helpful AI assistant",
model="gpt-4",
tools=[web_search, calculator],
memory="redis"
)chat
def chat(message: str, session_id: str = None, **kwargs) -> strSend a message to the agent and get a response (synchronous)
Parameters
message(str)- The message to send to the agentsession_id(str)- Optional session ID for conversation context**kwargs(dict)- Additional parameters passed to the modelExample
python
# Synchronous - no async/await needed!
response = agent.chat(
"What's the weather like?",
session_id="user_123"
)achat
async def achat(message: str, session_id: str = None, **kwargs) -> strSend a message to the agent and get a response (asynchronous)
Parameters
message(str)- The message to send to the agentsession_id(str)- Optional session ID for conversation context**kwargs(dict)- Additional parameters passed to the modelExample
python
# Asynchronous version
response = await agent.achat(
"What's the weather like?",
session_id="user_123"
)stream_chat
def stream_chat(message: str, session_id: str = None, **kwargs) -> Iterator[str]Stream responses token by token (synchronous)
Parameters
message(str)- The message to send to the agentsession_id(str)- Optional session ID for conversation context**kwargs(dict)- Additional parameters passed to the modelExample
python
# Synchronous streaming
for chunk in agent.stream_chat("Tell me a story"):
print(chunk, end="")astream_chat
async def astream_chat(message: str, session_id: str = None, **kwargs) -> AsyncIterator[str]Stream responses token by token (asynchronous)
Parameters
message(str)- The message to send to the agentsession_id(str)- Optional session ID for conversation context**kwargs(dict)- Additional parameters passed to the modelExample
python
# Asynchronous streaming
async for chunk in agent.astream_chat("Tell me a story"):
print(chunk, end="")add_tool
def add_tool(tool: Union[Callable, str, Agent]) -> NoneAdd a tool to the agent dynamically
Parameters
tool(Union[Callable, str, Agent])- Function, MCP URL, or another agentExample
python
def new_tool(query: str) -> str:
return f"Processed: {query}"
agent.add_tool(new_tool)serve_mcp
def serve_mcp(
port: int = 8080,
name: str = None,
description: str = None,
**kwargs
) -> NoneServe the agent as an MCP server
Parameters
port(int)- Port to serve on (default: 8080)name(str)- Server name for MCP metadatadescription(str)- Server description**kwargs(dict)- Additional server configurationExample
python
agent.serve_mcp(
port=8080,
name="My Assistant",
description="AI assistant with web search"
)get_metrics
def get_metrics() -> AgentMetricsGet performance metrics for the agent
Example
python
metrics = agent.get_metrics()
print(f"Total requests: {metrics.total_requests}")
print(f"Average latency: {metrics.avg_latency}ms")AgentConfig Class
Configuration options for agents
__init__
AgentConfig(
temperature: float = 0.7,
max_tokens: int = None,
timeout: int = 30,
retry_max: int = 3,
retry_delay: float = 1.0,
cache_ttl: int = 3600,
rate_limit: RateLimiter = None,
**kwargs
)Configure agent behavior
Parameters
temperature(float)- Sampling temperature (0-2)max_tokens(int)- Maximum response lengthtimeout(int)- Request timeout in secondsretry_max(int)- Maximum retry attemptsretry_delay(float)- Initial retry delaycache_ttl(int)- Cache time-to-live in secondsrate_limit(RateLimiter)- Rate limiting configurationExample
python
config = AgentConfig(
temperature=0.8,
max_tokens=2000,
timeout=60,
cache_ttl=7200
)Memory Methods
Methods for managing agent memory
clear_memory
async def clear_memory(session_id: str = None) -> NoneClear conversation memory
Parameters
session_id(str)- Session to clear (None clears all)Example
python
await agent.clear_memory(session_id="user_123")get_history
async def get_history(session_id: str) -> List[Message]Get conversation history
Parameters
session_id(str)- Session ID to retrieve history forExample
python
history = await agent.get_history("user_123")
for msg in history:
print(f"{msg.role}: {msg.content}")add_to_memory
async def add_to_memory(
session_id: str,
role: str,
content: str
) -> NoneAdd a message to memory manually
Parameters
session_id(str)- Session IDrole(str)- Message role (user/assistant/system)content(str)- Message contentExample
python
await agent.add_to_memory(
"user_123",
"system",
"User prefers concise answers"
)Complete Example
python
from aakit import Agent, AgentConfig, tool
import asyncio
# Define a custom tool
@tool(description="Search for information on the web")
def web_search(query: str) -> str:
# Implementation here
return f"Search results for: {query}"
# Configure the agent
config = AgentConfig(
temperature=0.7,
max_tokens=2000,
timeout=30,
cache_ttl=3600
)
# Create the agent
agent = Agent(
name="research_assistant",
instruction="""You are a research assistant that helps users find
and analyze information. Be thorough and cite sources.""",
model=["gpt-4", "claude-3-opus"], # Fallback chain
tools=[web_search],
memory="redis",
reasoning="chain_of_thought",
config=config
)
# Use the agent
async def main():
# NEW: Synchronous API - no async/await needed!
# Simple chat
response = agent.chat("What is quantum computing?")
print(response)
# Streaming response
print("\nStreaming response:")
for chunk in agent.stream_chat("Explain the basics of machine learning"):
print(chunk, end="", flush=True)
# Get metrics
metrics = agent.get_metrics()
print(f"\n\nTotal tokens used: {metrics.total_tokens}")
# Async version (when you need it)
response = await agent.achat("What is AI?")
async for chunk in agent.astream_chat("Write a poem"):
print(chunk, end="")
# Run the example
asyncio.run(main())
# Serve as MCP server
if __name__ == "__main__":
agent.serve_mcp(
port=8080,
name="Research Assistant",
description="AI assistant with web search capabilities"
)Type Definitions
Message
class Message:
role: str # "user", "assistant", "system"
content: str
timestamp: datetime
metadata: dictAgentMetrics
class AgentMetrics:
total_requests: int
total_tokens: int
avg_latency: float
error_rate: float
cache_hit_rate: floatEnvironment Variables
OPENAI_API_KEY- OpenAI API key for GPT modelsANTHROPIC_API_KEY- Anthropic API key for Claude modelsAGENTZ_LOG_LEVEL- Logging level (DEBUG, INFO, WARNING, ERROR)AGENTZ_CACHE_BACKEND- Default cache backend