Merge pull request #775 from tnfssc/fix/llm-repeats-forever

This commit is contained in:
Sharath 2025-06-19 23:36:18 +05:30 committed by GitHub
commit 1c9573dc31
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 12 additions and 1 deletions

View File

@ -250,7 +250,7 @@ class ThreadManager:
logger.warning(f"Further token compression is needed: {compressed_token_count} > {max_tokens}")
result = self._compress_messages(messages, llm_model, max_tokens, int(token_threshold / 2), max_iterations - 1)
return result
return self._middle_out_messages(result)
def _compress_messages_by_omitting_messages(
self,
@ -322,6 +322,17 @@ class ThreadManager:
logger.info(f"_compress_messages_by_omitting_messages: {initial_token_count} -> {final_token_count} tokens ({len(messages)} -> {len(final_messages)} messages)")
return final_messages
def _middle_out_messages(self, messages: List[Dict[str, Any]], max_messages: int = 320) -> List[Dict[str, Any]]:
"""Remove messages from the middle of the list, keeping max_messages total."""
if len(messages) <= max_messages:
return messages
# Keep half from the beginning and half from the end
keep_start = max_messages // 2
keep_end = max_messages - keep_start
return messages[:keep_start] + messages[-keep_end:]
def add_tool(self, tool_class: Type[Tool], function_names: Optional[List[str]] = None, **kwargs):