Skip to content

Commit 90e2116

Browse files
committed
fix some typos
1 parent 3b86d1f commit 90e2116

File tree

5 files changed

+183
-195
lines changed

5 files changed

+183
-195
lines changed

workflows/agents/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
"""
22
Agents Package for Code Implementation Workflow
3-
代码实现工作流的代理包
43
54
This package contains specialized agents for different aspects of code implementation:
65
- CodeImplementationAgent: Handles file-by-file code generation

workflows/agents/code_implementation_agent.py

Lines changed: 58 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,8 @@
11
"""
22
Code Implementation Agent for File-by-File Development
3-
文件逐个开发的代码实现代理
43
54
Handles systematic code implementation with progress tracking and
65
memory optimization for long-running development sessions.
7-
处理系统性代码实现,具有进度跟踪和长时间开发会话的内存优化。
86
"""
97

108
import json
@@ -35,14 +33,13 @@
3533
class CodeImplementationAgent:
3634
"""
3735
Code Implementation Agent for systematic file-by-file development
38-
用于系统性文件逐个开发的代码实现代理
39-
40-
Responsibilities / 职责:
41-
- Track file implementation progress / 跟踪文件实现进度
42-
- Execute MCP tool calls for code generation / 执行MCP工具调用进行代码生成
43-
- Monitor implementation status / 监控实现状态
44-
- Coordinate with Summary Agent for memory optimization / 与总结代理协调进行内存优化
45-
- Calculate token usage for context management / 计算token使用量用于上下文管理
36+
37+
Responsibilities:
38+
- Track file implementation progress
39+
- Execute MCP tool calls for code generation
40+
- Monitor implementation status
41+
- Coordinate with Summary Agent for memory optimization
42+
- Calculate token usage for context management
4643
"""
4744

4845
def __init__(
@@ -53,7 +50,6 @@ def __init__(
5350
):
5451
"""
5552
Initialize Code Implementation Agent
56-
初始化代码实现代理
5753
5854
Args:
5955
mcp_agent: MCP agent instance for tool calls
@@ -72,26 +68,32 @@ def __init__(
7268
"dependency_analysis": [], # Track dependency analysis and file reads
7369
}
7470
self.files_implemented_count = 0
75-
self.implemented_files_set = set() # Track unique file paths to avoid duplicate counting / 跟踪唯一文件路径以避免重复计数
71+
self.implemented_files_set = (
72+
set()
73+
) # Track unique file paths to avoid duplicate counting
7674
self.files_read_for_dependencies = (
7775
set()
78-
) # Track files read for dependency analysis / 跟踪为依赖分析而读取的文件
79-
self.last_summary_file_count = 0 # Track the file count when last summary was triggered / 跟踪上次触发总结时的文件数
76+
) # Track files read for dependency analysis
77+
self.last_summary_file_count = (
78+
0 # Track the file count when last summary was triggered
79+
)
8080

81-
# Token calculation settings / Token计算设置
82-
self.max_context_tokens = 200000 # Default max context tokens for Claude-3.5-Sonnet / Claude-3.5-Sonnet的默认最大上下文tokens
83-
self.token_buffer = (
84-
10000 # Safety buffer before reaching max / 达到最大值前的安全缓冲区
81+
# Token calculation settings
82+
self.max_context_tokens = (
83+
200000 # Default max context tokens for Claude-3.5-Sonnet
8584
)
85+
self.token_buffer = 10000 # Safety buffer before reaching max
8686
self.summary_trigger_tokens = (
8787
self.max_context_tokens - self.token_buffer
88-
) # Trigger summary when approaching limit / 接近限制时触发总结
89-
self.last_summary_token_count = 0 # Track token count when last summary was triggered / 跟踪上次触发总结时的token数
88+
) # Trigger summary when approaching limit
89+
self.last_summary_token_count = (
90+
0 # Track token count when last summary was triggered
91+
)
9092

91-
# Initialize tokenizer / 初始化tokenizer
93+
# Initialize tokenizer
9294
if TIKTOKEN_AVAILABLE:
9395
try:
94-
# Use Claude-3 tokenizer (approximation with OpenAI's o200k_base) / 使用Claude-3 tokenizer(用OpenAI的o200k_base近似)
96+
# Use Claude-3 tokenizer (approximation with OpenAI's o200k_base)
9597
self.tokenizer = tiktoken.get_encoding("o200k_base")
9698
self.logger.info("Token calculation enabled with o200k_base encoding")
9799
except Exception as e:
@@ -103,14 +105,14 @@ def __init__(
103105
"tiktoken not available, token-based summary triggering disabled"
104106
)
105107

106-
# Analysis loop detection / 分析循环检测
107-
self.recent_tool_calls = [] # Track recent tool calls to detect analysis loops / 跟踪最近的工具调用以检测分析循环
108-
self.max_read_without_write = 5 # Max read_file calls without write_file / 没有write_file的最大read_file调用次数
108+
# Analysis loop detection
109+
self.recent_tool_calls = [] # Track recent tool calls to detect analysis loops
110+
self.max_read_without_write = 5 # Max read_file calls without write_file
109111

110-
# Memory agent integration / 内存代理集成
111-
self.memory_agent = None # Will be set externally / 将从外部设置
112-
self.llm_client = None # Will be set externally / 将从外部设置
113-
self.llm_client_type = None # Will be set externally / 将从外部设置
112+
# Memory agent integration
113+
self.memory_agent = None # Will be set externally
114+
self.llm_client = None # Will be set externally
115+
self.llm_client_type = None # Will be set externally
114116

115117
# Log read tools configuration
116118
read_tools_status = "ENABLED" if self.enable_read_tools else "DISABLED"
@@ -123,7 +125,7 @@ def __init__(
123125
)
124126

125127
def _create_default_logger(self) -> logging.Logger:
126-
"""Create default logger if none provided / 如果未提供则创建默认日志记录器"""
128+
"""Create default logger if none provided"""
127129
logger = logging.getLogger(f"{__name__}.CodeImplementationAgent")
128130
# Don't add handlers to child loggers - let them propagate to root
129131
logger.setLevel(logging.INFO)
@@ -132,14 +134,12 @@ def _create_default_logger(self) -> logging.Logger:
132134
def get_system_prompt(self) -> str:
133135
"""
134136
Get the system prompt for code implementation
135-
获取代码实现的系统提示词
136137
"""
137138
return GENERAL_CODE_IMPLEMENTATION_SYSTEM_PROMPT
138139

139140
def set_memory_agent(self, memory_agent, llm_client=None, llm_client_type=None):
140141
"""
141142
Set memory agent for code summary generation
142-
设置内存代理用于代码总结生成
143143
144144
Args:
145145
memory_agent: Memory agent instance
@@ -154,7 +154,6 @@ def set_memory_agent(self, memory_agent, llm_client=None, llm_client_type=None):
154154
async def execute_tool_calls(self, tool_calls: List[Dict]) -> List[Dict]:
155155
"""
156156
Execute MCP tool calls and track implementation progress
157-
执行MCP工具调用并跟踪实现进度
158157
159158
Args:
160159
tool_calls: List of tool calls to execute
@@ -226,18 +225,18 @@ async def execute_tool_calls(self, tool_calls: List[Dict]) -> List[Dict]:
226225
)
227226

228227
if self.mcp_agent:
229-
# Execute tool call through MCP protocol / 通过MCP协议执行工具调用
228+
# Execute tool call through MCP protocol
230229
result = await self.mcp_agent.call_tool(tool_name, tool_input)
231230

232-
# Track file implementation progress / 跟踪文件实现进度
231+
# Track file implementation progress
233232
if tool_name == "write_file":
234233
await self._track_file_implementation_with_summary(
235234
tool_call, result
236235
)
237236
elif tool_name == "read_file":
238237
self._track_dependency_analysis(tool_call, result)
239238

240-
# Track tool calls for analysis loop detection / 跟踪工具调用以检测分析循环
239+
# Track tool calls for analysis loop detection
241240
self._track_tool_call_for_loop_detection(tool_name)
242241

243242
results.append(
@@ -282,8 +281,6 @@ async def _handle_read_file_with_memory_optimization(self, tool_call: Dict) -> D
282281
"""
283282
Intercept read_file calls and redirect to read_code_mem if a summary exists.
284283
This prevents unnecessary file reads if the summary is already available.
285-
拦截read_file调用,如果存在摘要则重定向到read_code_mem。
286-
这可以防止在摘要已经存在时进行不必要的文件读取。
287284
"""
288285
file_path = tool_call["input"].get("file_path")
289286
if not file_path:
@@ -389,7 +386,6 @@ async def _track_file_implementation_with_summary(
389386
):
390387
"""
391388
Track file implementation and create code summary
392-
跟踪文件实现并创建代码总结
393389
394390
Args:
395391
tool_call: The write_file tool call
@@ -428,31 +424,30 @@ async def _track_file_implementation_with_summary(
428424
def _track_file_implementation(self, tool_call: Dict, result: Any):
429425
"""
430426
Track file implementation progress
431-
跟踪文件实现进度
432427
"""
433428
try:
434-
# Handle different result types from MCP / 处理MCP的不同结果类型
429+
# Handle different result types from MCP
435430
result_data = None
436431

437-
# Check if result is a CallToolResult object / 检查结果是否为CallToolResult对象
432+
# Check if result is a CallToolResult object
438433
if hasattr(result, "content"):
439-
# Extract content from CallToolResult / 从CallToolResult提取内容
434+
# Extract content from CallToolResult
440435
if hasattr(result.content, "text"):
441436
result_content = result.content.text
442437
else:
443438
result_content = str(result.content)
444439

445-
# Try to parse as JSON / 尝试解析为JSON
440+
# Try to parse as JSON
446441
try:
447442
result_data = json.loads(result_content)
448443
except json.JSONDecodeError:
449-
# If not JSON, create a structure / 如果不是JSON,创建一个结构
444+
# If not JSON, create a structure
450445
result_data = {
451446
"status": "success",
452447
"file_path": tool_call["input"].get("file_path", "unknown"),
453448
}
454449
elif isinstance(result, str):
455-
# Try to parse string result / 尝试解析字符串结果
450+
# Try to parse string result
456451
try:
457452
result_data = json.loads(result)
458453
except json.JSONDecodeError:
@@ -461,16 +456,16 @@ def _track_file_implementation(self, tool_call: Dict, result: Any):
461456
"file_path": tool_call["input"].get("file_path", "unknown"),
462457
}
463458
elif isinstance(result, dict):
464-
# Direct dictionary result / 直接字典结果
459+
# Direct dictionary result
465460
result_data = result
466461
else:
467-
# Fallback: assume success and extract file path from input / 后备方案:假设成功并从输入中提取文件路径
462+
# Fallback: assume success and extract file path from input
468463
result_data = {
469464
"status": "success",
470465
"file_path": tool_call["input"].get("file_path", "unknown"),
471466
}
472467

473-
# Extract file path for tracking / 提取文件路径用于跟踪
468+
# Extract file path for tracking
474469
file_path = None
475470
if result_data and result_data.get("status") == "success":
476471
file_path = result_data.get(
@@ -479,15 +474,15 @@ def _track_file_implementation(self, tool_call: Dict, result: Any):
479474
else:
480475
file_path = tool_call["input"].get("file_path")
481476

482-
# Only count unique files, not repeated tool calls on same file / 只计数唯一文件,不重复计数同一文件的工具调用
477+
# Only count unique files, not repeated tool calls on same file
483478
if file_path and file_path not in self.implemented_files_set:
484-
# This is a new file implementation / 这是一个新的文件实现
479+
# This is a new file implementation
485480
self.implemented_files_set.add(file_path)
486481
self.files_implemented_count += 1
487482
# self.logger.info(f"New file implementation tracked: count={self.files_implemented_count}, file={file_path}")
488483
# print(f"New file implementation tracked: count={self.files_implemented_count}, file={file_path}")
489484

490-
# Add to completed files list / 添加到已完成文件列表
485+
# Add to completed files list
491486
self.implementation_summary["completed_files"].append(
492487
{
493488
"file": file_path,
@@ -503,17 +498,17 @@ def _track_file_implementation(self, tool_call: Dict, result: Any):
503498
# print(f"📝 NEW FILE IMPLEMENTED: count={self.files_implemented_count}, file={file_path}")
504499
# print(f"🔧 OPTIMIZATION NOW ENABLED: files_implemented_count > 0 = {self.files_implemented_count > 0}")
505500
elif file_path and file_path in self.implemented_files_set:
506-
# This file was already implemented (duplicate tool call) / 这个文件已经被实现过了(重复工具调用)
501+
# This file was already implemented (duplicate tool call)
507502
self.logger.debug(
508503
f"File already tracked, skipping duplicate count: {file_path}"
509504
)
510505
else:
511-
# No valid file path found / 没有找到有效的文件路径
506+
# No valid file path found
512507
self.logger.warning("No valid file path found for tracking")
513508

514509
except Exception as e:
515510
self.logger.warning(f"Failed to track file implementation: {e}")
516-
# Even if tracking fails, try to count based on tool input (but check for duplicates) / 即使跟踪失败,也尝试根据工具输入计数(但检查重复)
511+
# Even if tracking fails, try to count based on tool input (but check for duplicates)
517512

518513
file_path = tool_call["input"].get("file_path")
519514
if file_path and file_path not in self.implemented_files_set:
@@ -526,16 +521,15 @@ def _track_file_implementation(self, tool_call: Dict, result: Any):
526521
def _track_dependency_analysis(self, tool_call: Dict, result: Any):
527522
"""
528523
Track dependency analysis through read_file calls
529-
跟踪通过read_file调用进行的依赖分析
530524
"""
531525
try:
532526
file_path = tool_call["input"].get("file_path")
533527
if file_path:
534-
# Track unique files read for dependency analysis / 跟踪为依赖分析而读取的唯一文件
528+
# Track unique files read for dependency analysis
535529
if file_path not in self.files_read_for_dependencies:
536530
self.files_read_for_dependencies.add(file_path)
537531

538-
# Add to dependency analysis summary / 添加到依赖分析总结
532+
# Add to dependency analysis summary
539533
self.implementation_summary["dependency_analysis"].append(
540534
{
541535
"file_read": file_path,
@@ -554,7 +548,6 @@ def _track_dependency_analysis(self, tool_call: Dict, result: Any):
554548
def calculate_messages_token_count(self, messages: List[Dict]) -> int:
555549
"""
556550
Calculate total token count for a list of messages
557-
计算消息列表的总token数
558551
559552
Args:
560553
messages: List of chat messages with 'role' and 'content' keys
@@ -563,9 +556,9 @@ def calculate_messages_token_count(self, messages: List[Dict]) -> int:
563556
Total token count
564557
"""
565558
if not self.tokenizer:
566-
# Fallback: rough estimation based on character count / 回退:基于字符数的粗略估计
559+
# Fallback: rough estimation based on character count
567560
total_chars = sum(len(str(msg.get("content", ""))) for msg in messages)
568-
# Rough approximation: 1 token ≈ 4 characters / 粗略近似:1个token ≈ 4个字符
561+
# Rough approximation: 1 token ≈ 4 characters
569562
return total_chars // 4
570563

571564
try:
@@ -574,31 +567,28 @@ def calculate_messages_token_count(self, messages: List[Dict]) -> int:
574567
content = str(message.get("content", ""))
575568
role = message.get("role", "")
576569

577-
# Count tokens for content / 计算内容的token数
570+
# Count tokens for content
578571
if content:
579572
content_tokens = len(
580573
self.tokenizer.encode(content, disallowed_special=())
581574
)
582575
total_tokens += content_tokens
583576

584-
# Add tokens for role and message structure / 为角色和消息结构添加token
577+
# Add tokens for role and message structure
585578
role_tokens = len(self.tokenizer.encode(role, disallowed_special=()))
586-
total_tokens += (
587-
role_tokens + 4
588-
) # Extra tokens for message formatting / 消息格式化的额外token
579+
total_tokens += role_tokens + 4 # Extra tokens for message formatting
589580

590581
return total_tokens
591582

592583
except Exception as e:
593584
self.logger.warning(f"Token calculation failed: {e}")
594-
# Fallback estimation / 回退估计
585+
# Fallback estimation
595586
total_chars = sum(len(str(msg.get("content", ""))) for msg in messages)
596587
return total_chars // 4
597588

598589
def should_trigger_summary_by_tokens(self, messages: List[Dict]) -> bool:
599590
"""
600591
Check if summary should be triggered based on token count
601-
根据token数检查是否应触发总结
602592
603593
Args:
604594
messages: Current conversation messages

workflows/agents/memory_agent_concise.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
"""
22
Concise Memory Agent for Code Implementation Workflow
3-
简洁的代码实现工作流内存代理
43
54
This memory agent implements a focused approach:
65
1. Before first file: Normal conversation flow
@@ -406,7 +405,6 @@ async def _call_llm_for_summary(
406405
) -> Dict[str, Any]:
407406
"""
408407
Call LLM for code implementation summary generation ONLY
409-
调用LLM生成代码实现总结(仅用于代码总结)
410408
411409
This method is used only for creating code implementation summaries,
412410
NOT for conversation summarization which has been removed.

workflows/agents/memory_agent_concise_index.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
"""
22
Concise Memory Agent for Code Implementation Workflow
3-
简洁的代码实现工作流内存代理
43
54
This memory agent implements a focused approach:
65
1. Before first file: Normal conversation flow
@@ -406,7 +405,6 @@ async def _call_llm_for_summary(
406405
) -> Dict[str, Any]:
407406
"""
408407
Call LLM for code implementation summary generation ONLY
409-
调用LLM生成代码实现总结(仅用于代码总结)
410408
411409
This method is used only for creating code implementation summaries,
412410
NOT for conversation summarization which has been removed.

0 commit comments

Comments
 (0)