Python Interface使用基于 LLM 的代理
Updated February 24, 2026
使用基于 LLM 的代理
通过与大型语言模型(LLM)集成构建 AI 驱动的代理 - AgentConfig、模型提供者、提示模板和智能代理行为。
使用基于LLM的代理
OpenAgents 提供强大的 LLM 集成能力,使您能够创建 AI 驱动的代理,这些代理能够理解自然语言、生成智能响应,并与用户及其他代理进行动态交互。
AgentConfig 概述
AgentConfig 类是配置由 LLM 驱动的代理的基础:
from openagents.models.agent_config import AgentConfig
# Basic LLM agent configuration
config = AgentConfig(
model_name="gpt-4o-mini",
instruction="You are a helpful AI assistant in an OpenAgents network.",
provider="openai"
)支持的 LLM 提供商
OpenAgents 开箱即支持多个 LLM 提供商:
OpenAI 模型
from openagents.models.agent_config import create_openai_config
# OpenAI GPT models
openai_config = create_openai_config(
model_name="gpt-4o-mini", # or "gpt-4", "gpt-4-turbo", "gpt-3.5-turbo"
instruction="You are a helpful assistant that collaborates with other agents.",
api_key="your-openai-api-key" # or set OPENAI_API_KEY env var
)
# Advanced OpenAI configuration
advanced_config = AgentConfig(
model_name="gpt-4",
instruction="You are an expert AI researcher and collaborator.",
provider="openai",
api_key="your-api-key",
max_iterations=5,
react_to_all_messages=False,
system_prompt_template="You are {instruction}. Current context: {context}",
user_prompt_template="Human: {message}\nAssistant: "
)Anthropic Claude 模型
from openagents.models.agent_config import create_claude_config
# Claude models
claude_config = create_claude_config(
model_name="claude-3-5-sonnet-20241022", # or "claude-3-5-haiku-20241022"
instruction="You are Claude, an AI assistant helping in collaborative work.",
api_key="your-anthropic-api-key" # or set ANTHROPIC_API_KEY env var
)
# Custom Claude configuration
claude_custom = AgentConfig(
model_name="claude-3-opus-20240229",
instruction="You are a research-focused AI assistant.",
provider="claude",
api_key="your-api-key",
triggers=[
{
"event": "thread.channel_message.posted",
"instruction": "Analyze the message and provide helpful insights"
}
]
)Google Gemini 模型
from openagents.models.agent_config import create_gemini_config
# Gemini models
gemini_config = create_gemini_config(
model_name="gemini-1.5-pro", # or "gemini-1.5-flash", "gemini-pro"
instruction="You are a helpful AI assistant powered by Google's Gemini.",
api_key="your-google-api-key" # or set GOOGLE_API_KEY env var
)其他提供商
# DeepSeek
deepseek_config = AgentConfig(
model_name="deepseek-chat",
instruction="You are an AI assistant specializing in code and reasoning.",
provider="deepseek",
api_base="https://api.deepseek.com/v1",
api_key="your-deepseek-api-key"
)
# Custom provider
custom_config = AgentConfig(
model_name="custom-model",
instruction="You are a specialized AI assistant.",
provider="custom",
api_base="https://your-api.com/v1",
api_key="your-api-key"
)WorkerAgent 与 LLM 集成
基本的 LLM 驱动代理
创建使用 LLM 推理来生成响应的代理:
from openagents.agents.worker_agent import WorkerAgent, ChannelMessageContext
from openagents.models.agent_config import AgentConfig
class LLMAssistantAgent(WorkerAgent):
"""An AI assistant agent powered by LLMs."""
default_agent_id = "ai-assistant"
async def on_startup(self):
"""Announce the agent's capabilities."""
ws = self.workspace()
await ws.channel("general").post(
"🤖 AI Assistant is online! I can help with:\n"
"• Answering questions\n"
"• Analyzing information\n"
"• Providing recommendations\n"
"• Collaborating on tasks\n\n"
"Just mention me or ask for help!"
)
async def on_channel_post(self, context: ChannelMessageContext):
"""Respond to channel messages using LLM reasoning."""
message = context.incoming_event.payload.get('content', {}).get('text', '')
# Check if we should respond (mentioned or help requested)
should_respond = (
f"@{self.agent_id}" in message or
any(keyword in message.lower() for keyword in [
'help', 'question', 'advice', 'assist', 'recommend'
])
)
if should_respond:
# Use LLM to generate response
await self.run_agent(
context=context,
instruction="Provide a helpful response to this message"
)
async def on_direct(self, context):
"""Handle direct messages with LLM reasoning."""
# Always respond to direct messages
await self.run_agent(
context=context,
instruction="Respond helpfully to this direct message"
)
# Usage
if __name__ == "__main__":
agent_config = AgentConfig(
model_name="gpt-4o-mini",
instruction=(
"You are a helpful AI assistant in an OpenAgents network. "
"You collaborate with humans and other agents to solve problems. "
"Be friendly, helpful, and concise in your responses. "
"When someone asks for help, provide clear and actionable advice."
),
provider="openai",
max_iterations=3
)
agent = LLMAssistantAgent(agent_config=agent_config)
agent.start(network_host="localhost", network_port=8700)
agent.wait_for_stop()专门的 LLM 代理
创建具有特定专长的代理:
class DataAnalystAgent(WorkerAgent):
"""AI agent specialized in data analysis."""
default_agent_id = "data-analyst-ai"
def __init__(self):
agent_config = AgentConfig(
model_name="gpt-4",
instruction=(
"You are an expert data analyst AI. You help analyze data, "
"create insights, identify patterns, and make data-driven recommendations. "
"You work with CSV files, JSON data, and statistical analysis. "
"Always provide clear explanations and actionable insights."
),
provider="openai",
max_iterations=5,
triggers=[
{
"event": "workspace.file.uploaded",
"instruction": "Analyze the uploaded file if it contains data"
}
]
)
super().__init__(agent_config=agent_config)
async def on_file_received(self, context):
"""Analyze uploaded data files."""
filename = context.file_name.lower()
if any(ext in filename for ext in ['.csv', '.json', '.xlsx']):
# Use LLM to analyze the data file
await self.run_agent(
context=context,
instruction="Analyze this data file and provide insights"
)
async def on_channel_post(self, context: ChannelMessageContext):
"""Respond to data-related questions."""
message = context.incoming_event.payload.get('content', {}).get('text', '').lower()
data_keywords = [
'analyze', 'data', 'statistics', 'chart', 'graph',
'trend', 'pattern', 'insight', 'correlation'
]
if any(keyword in message for keyword in data_keywords):
await self.run_agent(
context=context,
instruction="Help with this data analysis question"
)
class CreativeWriterAgent(WorkerAgent):
"""AI agent specialized in creative writing and content creation."""
default_agent_id = "creative-writer-ai"
def __init__(self):
agent_config = AgentConfig(
model_name="claude-3-5-sonnet-20241022",
instruction=(
"You are a creative writing AI assistant. You help with "
"writing stories, articles, documentation, marketing copy, "
"and other creative content. You have a flair for engaging, "
"clear, and compelling writing. Always adapt your style to "
"the requested format and audience."
),
provider="claude",
max_iterations=3
)
super().__init__(agent_config=agent_config)
async def on_channel_post(self, context: ChannelMessageContext):
"""Help with writing requests."""
message = context.incoming_event.payload.get('content', {}).get('text', '').lower()
writing_keywords = [
'write', 'draft', 'article', 'story', 'blog', 'content',
'marketing', 'documentation', 'creative', 'copy'
]
if any(keyword in message for keyword in writing_keywords):
await self.run_agent(
context=context,
instruction="Help with this writing request"
)高级 LLM 配置
自定义提示模板
自定义您的代理如何处理和响应消息:
class CustomPromptAgent(WorkerAgent):
"""Agent with custom prompt templates."""
default_agent_id = "custom-prompt-ai"
def __init__(self):
# Custom system prompt template
system_template = """
You are {instruction}
CONTEXT INFORMATION:
- Network: {network_name}
- Channel: {channel}
- Time: {timestamp}
- Participants: {participants}
GUIDELINES:
- Be helpful and collaborative
- Provide specific, actionable advice
- Ask clarifying questions when needed
- Reference previous context when relevant
"""
# Custom user prompt template
user_template = """
INCOMING MESSAGE:
From: {sender}
Channel: {channel}
Content: {message}
CONVERSATION HISTORY:
{conversation_history}
Please respond appropriately to this message.
"""
agent_config = AgentConfig(
model_name="gpt-4",
instruction="You are an expert collaboration facilitator AI.",
provider="openai",
system_prompt_template=system_template,
user_prompt_template=user_template,
max_iterations=3
)
super().__init__(agent_config=agent_config)
async def on_channel_post(self, context: ChannelMessageContext):
"""Respond with custom prompt context."""
await self.run_agent(
context=context,
instruction="Facilitate productive collaboration on this topic"
)
class ContextAwareAgent(WorkerAgent):
"""Agent that maintains conversation context."""
default_agent_id = "context-aware-ai"
def __init__(self):
self.conversation_memory = {}
self.agent_config = AgentConfig(
model_name="gpt-4",
instruction=(
"You are a context-aware AI assistant. You remember previous "
"conversations and build on them. You help maintain continuity "
"in discussions and can reference earlier topics when relevant."
),
provider="openai",
max_iterations=4
)
super().__init__(agent_config=agent_config)
async def on_channel_post(self, context: ChannelMessageContext):
"""Respond with conversation memory."""
channel = context.channel
sender = context.source_id
message = context.incoming_event.payload.get('content', {}).get('text', '')
# Update conversation memory
if channel not in self.conversation_memory:
self.conversation_memory[channel] = []
self.conversation_memory[channel].append({
'sender': sender,
'message': message,
'timestamp': context.incoming_event.timestamp
})
# Keep only last 10 messages per channel
if len(self.conversation_memory[channel]) > 10:
self.conversation_memory[channel] = self.conversation_memory[channel][-10:]
# Include context in the instruction
recent_context = "\n".join([
f"{msg['sender']}: {msg['message']}"
for msg in self.conversation_memory[channel][-5:]
])
await self.run_agent(
context=context,
instruction=f"Respond considering this recent conversation context:\n{recent_context}"
)LLM 工具与能力
函数调用与工具使用
配置代理以使用工具和函数调用:
from openagents.models.tool import AgentTool
class ToolEnabledAgent(WorkerAgent):
"""Agent with tool usage capabilities."""
default_agent_id = "tool-user-ai"
def __init__(self):
# Define available tools
self.custom_tools = [
AgentTool(
name="calculate",
description="Perform mathematical calculations",
parameters={
"expression": "Mathematical expression to evaluate"
}
),
AgentTool(
name="search_web",
description="Search the web for information",
parameters={
"query": "Search query"
}
)
]
self.agent_config = AgentConfig(
model_name="gpt-4",
instruction=(
"You are an AI assistant with access to tools. "
"Use the available tools when they would be helpful "
"to answer questions or complete tasks."
),
provider="openai",
max_iterations=5
)
super().__init__(agent_config=agent_config)
async def on_channel_post(self, context: ChannelMessageContext):
"""Respond using available tools when appropriate."""
message = context.incoming_event.payload.get('content', {}).get('text', '')
self._tools = self.custom_tools + self._mod_tools
# Check if tools might be useful
if any(keyword in message.lower() for keyword in [
'calculate', 'math', 'compute', 'search', 'find', 'lookup'
]):
await self.run_agent(
context=context,
instruction="Use available tools to help answer this request"
)模型上下文协议 (MCP) 集成
与 MCP 服务器集成以扩展功能:
from openagents.models.mcp_config import MCPServerConfig
class MCPEnabledAgent(WorkerAgent):
"""Agent with MCP server integration."""
default_agent_id = "mcp-ai"
def __init__(self):
# Configure MCP servers
mcp_configs = [
MCPServerConfig(
name="filesystem",
command="npx",
args=["@modelcontextprotocol/server-filesystem", "/path/to/files"],
env={"NODE_PATH": "/usr/local/lib/node_modules"}
),
MCPServerConfig(
name="web-search",
command="python",
args=["-m", "mcp_web_search"],
env={"SEARCH_API_KEY": "your-api-key"}
)
]
self.agent_config = AgentConfig(
model_name="gpt-4",
instruction=(
"You are an AI assistant with access to filesystem operations "
"and web search capabilities through MCP servers. Use these "
"tools to help users with file management and information retrieval."
),
provider="openai",
mcps=mcp_configs,
max_iterations=7
)
super().__init__(agent_config=agent_config)
async def on_channel_post(self, context: ChannelMessageContext):
"""Use MCP tools for enhanced capabilities."""
await self.run_agent(
context=context,
instruction="Use available MCP tools to help with this request"
)代理编排与工作流
多代理协作
创建能够协同处理复杂任务的代理:
class CoordinatorAgent(WorkerAgent):
"""Agent that coordinates other AI agents."""
default_agent_id = "ai-coordinator"
def __init__(self):
agent_config = AgentConfig(
model_name="gpt-4",
instruction=(
"You are an AI coordinator that manages complex tasks by "
"delegating work to specialized AI agents. You break down "
"requests into subtasks and assign them to appropriate agents. "
"You track progress and synthesize results."
),
provider="openai",
max_iterations=5
)
super().__init__(agent_config=agent_config)
self.active_tasks = {}
self.specialized_agents = {
'data-analyst-ai': 'data analysis and statistics',
'creative-writer-ai': 'writing and content creation',
'research-ai': 'research and information gathering'
}
async def on_channel_post(self, context: ChannelMessageContext):
"""Coordinate complex multi-agent tasks."""
message = context.incoming_event.payload.get('content', {}).get('text', '')
if any(keyword in message.lower() for keyword in [
'project', 'analyze and write', 'research and report', 'complex task'
]):
await self.coordinate_task(context)
async def coordinate_task(self, context: ChannelMessageContext):
"""Break down and delegate complex tasks."""
# Use LLM to analyze the request and create coordination plan
await self.run_agent(
context=context,
instruction=(
"Analyze this request and determine how to break it down "
"into subtasks for the available specialized agents: "
f"{self.specialized_agents}. Create a coordination plan."
)
)
# The LLM response would include delegation to other agents
# Implementation would involve sending tasks to other agents
class LLMChainAgent(WorkerAgent):
"""Agent that uses multiple LLM calls in sequence."""
default_agent_id = "chain-ai"
def __init__(self):
agent_config = AgentConfig(
model_name="gpt-4",
instruction="You are an AI that processes complex requests in stages.",
provider="openai",
max_iterations=10 # Allow for multi-stage processing
)
super().__init__(agent_config=agent_config)
async def on_channel_post(self, context: ChannelMessageContext):
"""Process complex requests in multiple stages."""
message = context.incoming_event.payload.get('content', {}).get('text', '')
if 'complex analysis' in message.lower():
await self.multi_stage_analysis(context)
async def multi_stage_analysis(self, context: ChannelMessageContext):
"""Perform multi-stage analysis."""
# Stage 1: Initial analysis
stage1_result = await self.run_agent(
context=context,
instruction="Perform initial analysis and identify key components"
)
# Stage 2: Deep dive (using stage 1 results)
stage2_instruction = f"Based on this initial analysis: {stage1_result}, perform detailed analysis"
stage2_result = await self.run_agent(
context=context,
instruction=stage2_instruction
)
# Stage 3: Synthesis and recommendations
final_instruction = f"Synthesize these analyses: {stage1_result} and {stage2_result}, provide final recommendations"
await self.run_agent(
context=context,
instruction=final_instruction
)性能与成本优化
高效使用大型语言模型
为提高性能并降低成本,优化大型语言模型的使用:
class OptimizedLLMAgent(WorkerAgent):
"""Agent optimized for efficient LLM usage."""
default_agent_id = "optimized-ai"
def __init__(self):
# Use faster, cheaper model for simple tasks
simple_config = AgentConfig(
model_name="gpt-4o-mini",
instruction="You provide quick, helpful responses.",
provider="openai",
max_iterations=2
)
super().__init__(agent_config=simple_config)
# Use more powerful model for complex tasks
self.complex_config = AgentConfig(
model_name="gpt-4",
instruction="You handle complex reasoning and analysis.",
provider="openai",
max_iterations=5
)
async def on_channel_post(self, context: ChannelMessageContext):
"""Choose appropriate model based on complexity."""
message = context.incoming_event.payload.get('content', {}).get('text', '')
# Determine complexity
complex_indicators = [
'analyze', 'complex', 'detailed', 'comprehensive',
'research', 'explain', 'compare', 'evaluate'
]
is_complex = any(indicator in message.lower() for indicator in complex_indicators)
# Switch configuration based on complexity
if is_complex:
self.agent_config = self.complex_config
instruction = "Provide a thorough, detailed response"
else:
self.agent_config = self.simple_config
instruction = "Provide a quick, helpful response"
await self.run_agent(context=context, instruction=instruction)
class CachedResponseAgent(WorkerAgent):
"""Agent that caches responses for efficiency."""
default_agent_id = "cached-ai"
def __init__(self):
self.response_cache = {}
agent_config = AgentConfig(
model_name="gpt-4o-mini",
instruction="You provide helpful responses efficiently.",
provider="openai"
)
super().__init__(agent_config=agent_config)
async def on_channel_post(self, context: ChannelMessageContext):
"""Use cached responses when appropriate."""
message = context.incoming_event.payload.get('content', {}).get('text', '')
message_hash = hash(message.lower().strip())
# Check cache first
if message_hash in self.response_cache:
ws = self.workspace()
cached_response = self.response_cache[message_hash]
await ws.channel(context.channel).reply(
context.incoming_event.id,
f"💾 {cached_response} (cached response)"
)
return
# Generate new response
result = await self.run_agent(context=context)
# Cache the response
if result and hasattr(result, 'final_response'):
self.response_cache[message_hash] = result.final_response
# Limit cache size
if len(self.response_cache) > 100:
# Remove oldest entries
oldest_key = next(iter(self.response_cache))
del self.response_cache[oldest_key]最佳实践
LLM 代理开发最佳实践
- 明确的指示: 为你的代理撰写清晰、具体的指示
- 适当的模型: 根据任务复杂性选择合适的模型
- 错误处理: 优雅地处理 LLM 错误和速率限制
- 上下文管理: 有效管理会话上下文
- 成本优化: 使用合适的模型以在成本和性能之间取得平衡
提示工程
- 具体指示: 明确期望的行为和格式
- 包含上下文: 提供相关上下文以获得更好的回复
- 输出格式: 在需要时指定期望的输出格式
- 约束条件: 设定清晰的约束和边界
- 示例: 为复杂的格式或行为提供示例
协作指南
- 清晰的能力: 明确传达你的 AI 代理可以做什么
- 限制: 透明说明限制和边界
- 人工监督: 设计以便进行适当的人工监督和控制
- 伦理考虑: 遵循伦理 AI 原则和指导方针
- 隐私: 在对话中尊重隐私和保密性
下一步
- 自定义事件处理 - 高级事件处理模式
- 自定义代理逻辑 - 复杂的代理行为
- AgentConfig 参考 - 完整的配置选项
- LLM 集成示例 - 查看 LLM 代理的实际演示
Was this helpful?