[ PROMPT_NODE_22196 ]
Agents Autogpt 高级用法
[ SKILL_DOCUMENTATION ]
# AutoGPT 高级用法指南
## 自定义 Block 开发
### Block 结构
python
from backend.data.block import Block, BlockSchema, BlockType
from pydantic import BaseModel
class MyBlockInput(BaseModel):
"""Block 的输入模式。"""
query: str
max_results: int = 10
class MyBlockOutput(BaseModel):
"""Block 的输出模式。"""
results: list[str]
count: int
class MyCustomBlock(Block):
"""用于特定功能的自定义 Block。"""
id = "my-custom-block-uuid"
name = "My Custom Block"
description = "执行特定功能"
block_type = BlockType.STANDARD
input_schema = MyBlockInput
output_schema = MyBlockOutput
async def execute(self, input_data: MyBlockInput) -> dict:
"""执行 Block 逻辑。"""
# 实现你的逻辑
results = await self.process(input_data.query, input_data.max_results)
yield "results", results
yield "count", len(results)
async def process(self, query: str, max_results: int) -> list[str]:
"""内部处理逻辑。"""
# 实现细节
return ["result1", "result2"]
### Block 注册
python
# backend/blocks/__init__.py
from backend.blocks.my_block import MyCustomBlock
# 添加到 Block 注册表
BLOCKS = [
MyCustomBlock,
# ... 其他 blocks
]
### 包含凭据的 Block
python
from backend.data.block import Block
from backend.integrations.providers import ProviderName
class APIIntegrationBlock(Block):
"""使用外部 API 凭据的 Block。"""
credentials_required = [ProviderName.OPENAI]
async def execute(self, input_data):
# 从系统中获取凭据
credentials = await self.get_credentials(ProviderName.OPENAI)
# 使用凭据
client = OpenAI(api_key=credentials.api_key)
response = await client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": input_data.prompt}]
)
yield "response", response.choices[0].message.content
### 包含成本追踪的 Block
python
from backend.data.block import Block
from backend.data.block_cost_config import BlockCostConfig
class LLMBlock(Block):
"""带有成本追踪功能的 Block。"""
cost_config = BlockCostConfig(
cost_type="token",
cost_per_unit=0.00002, # 每个 token 的成本
provider="openai"
)
async def execute(self, input_data):
response = await self.call_llm(input_data.prompt)
# 上报 token 使用量以进行成本追踪
self.report_usage(
input_tokens=response.usage.prompt_tokens,
output_tokens=response.usage.completion_tokens
)
yield "output", response.content
## 高级执行模式
### 并行节点执行
python
from backend.executor.manager import ExecutionManager
async def execute_parallel_nodes(graph_exec_id: str, node_ids: list[str]):
"""并行执行多个节点"""