mirror of https://github.com/kortix-ai/suna.git
Merge branch 'mcp-5a' into mcp-5a-agentbuilder
This commit is contained in:
commit
3c82e2152f
|
@ -18,6 +18,7 @@ from agent.tools.sb_shell_tool import SandboxShellTool
|
|||
from agent.tools.sb_files_tool import SandboxFilesTool
|
||||
from agent.tools.sb_browser_tool import SandboxBrowserTool
|
||||
from agent.tools.data_providers_tool import DataProvidersTool
|
||||
from agent.tools.expand_msg_tool import ExpandMessageTool
|
||||
from agent.prompt import get_system_prompt
|
||||
from utils.logger import logger
|
||||
from utils.auth_utils import get_account_id_from_thread
|
||||
|
@ -101,14 +102,15 @@ async def run_agent(
|
|||
thread_manager.add_tool(SandboxBrowserTool, project_id=project_id, thread_id=thread_id, thread_manager=thread_manager)
|
||||
thread_manager.add_tool(SandboxDeployTool, project_id=project_id, thread_manager=thread_manager)
|
||||
thread_manager.add_tool(SandboxExposeTool, project_id=project_id, thread_manager=thread_manager)
|
||||
thread_manager.add_tool(ExpandMessageTool, thread_id=thread_id, thread_manager=thread_manager)
|
||||
thread_manager.add_tool(MessageTool)
|
||||
thread_manager.add_tool(SandboxWebSearchTool, project_id=project_id, thread_manager=thread_manager)
|
||||
thread_manager.add_tool(SandboxVisionTool, project_id=project_id, thread_id=thread_id, thread_manager=thread_manager)
|
||||
if config.RAPID_API_KEY:
|
||||
thread_manager.add_tool(DataProvidersTool)
|
||||
else:
|
||||
# Agent specified - only register explicitly enabled tools
|
||||
logger.info("Custom agent specified - registering only enabled tools")
|
||||
thread_manager.add_tool(ExpandMessageTool, thread_id=thread_id, thread_manager=thread_manager)
|
||||
if enabled_tools.get('sb_shell_tool', {}).get('enabled', False):
|
||||
thread_manager.add_tool(SandboxShellTool, project_id=project_id, thread_manager=thread_manager)
|
||||
if enabled_tools.get('sb_files_tool', {}).get('enabled', False):
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
from agentpress.tool import Tool, ToolResult, openapi_schema, xml_schema
|
||||
from agentpress.thread_manager import ThreadManager
|
||||
import json
|
||||
|
||||
class ExpandMessageTool(Tool):
|
||||
"""Tool for expanding a previous message to the user."""
|
||||
|
||||
def __init__(self, thread_id: str, thread_manager: ThreadManager):
|
||||
super().__init__()
|
||||
self.thread_manager = thread_manager
|
||||
self.thread_id = thread_id
|
||||
|
||||
@openapi_schema({
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "expand_message",
|
||||
"description": "Expand a message from the previous conversation with the user. Use this tool to expand a message that was truncated in the earlier conversation.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message_id": {
|
||||
"type": "string",
|
||||
"description": "The ID of the message to expand. Must be a UUID."
|
||||
}
|
||||
},
|
||||
"required": ["message_id"]
|
||||
}
|
||||
}
|
||||
})
|
||||
@xml_schema(
|
||||
tag_name="expand-message",
|
||||
mappings=[
|
||||
{"param_name": "message_id", "node_type": "attribute", "path": "."}
|
||||
],
|
||||
example='''
|
||||
<!-- Example 1: Expand a message that was truncated in the previous conversation -->
|
||||
<function_calls>
|
||||
<invoke name="expand_message">
|
||||
<parameter name="message_id">ecde3a4c-c7dc-4776-ae5c-8209517c5576</parameter>
|
||||
</invoke>
|
||||
</function_calls>
|
||||
|
||||
<!-- Example 2: Expand a message to create reports or analyze truncated data -->
|
||||
<function_calls>
|
||||
<invoke name="expand_message">
|
||||
<parameter name="message_id">f47ac10b-58cc-4372-a567-0e02b2c3d479</parameter>
|
||||
</invoke>
|
||||
</function_calls>
|
||||
|
||||
<!-- Example 3: Expand a message when you need the full content for analysis -->
|
||||
<function_calls>
|
||||
<invoke name="expand_message">
|
||||
<parameter name="message_id">550e8400-e29b-41d4-a716-446655440000</parameter>
|
||||
</invoke>
|
||||
</function_calls>
|
||||
'''
|
||||
)
|
||||
async def expand_message(self, message_id: str) -> ToolResult:
|
||||
"""Expand a message from the previous conversation with the user.
|
||||
|
||||
Args:
|
||||
message_id: The ID of the message to expand
|
||||
|
||||
Returns:
|
||||
ToolResult indicating the message was successfully expanded
|
||||
"""
|
||||
try:
|
||||
client = await self.thread_manager.db.client
|
||||
message = await client.table('messages').select('*').eq('message_id', message_id).eq('thread_id', self.thread_id).execute()
|
||||
|
||||
if not message.data or len(message.data) == 0:
|
||||
return self.fail_response(f"Message with ID {message_id} not found in thread {self.thread_id}")
|
||||
|
||||
message_data = message.data[0]
|
||||
message_content = message_data['content']
|
||||
final_content = message_content
|
||||
if isinstance(message_content, dict) and 'content' in message_content:
|
||||
final_content = message_content['content']
|
||||
elif isinstance(message_content, str):
|
||||
try:
|
||||
parsed_content = json.loads(message_content)
|
||||
if isinstance(parsed_content, dict) and 'content' in parsed_content:
|
||||
final_content = parsed_content['content']
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return self.success_response({"status": "Message expanded successfully.", "message": final_content})
|
||||
except Exception as e:
|
||||
return self.fail_response(f"Error expanding message: {str(e)}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
|
||||
async def test_expand_message_tool():
|
||||
expand_message_tool = ExpandMessageTool()
|
||||
|
||||
# Test expand message
|
||||
expand_message_result = await expand_message_tool.expand_message(
|
||||
message_id="004ab969-ef9a-4656-8aba-e392345227cd"
|
||||
)
|
||||
print("Expand message result:", expand_message_result)
|
||||
|
||||
asyncio.run(test_expand_message_tool())
|
|
@ -119,7 +119,8 @@ class ThreadManager:
|
|||
client = await self.db.client
|
||||
|
||||
try:
|
||||
result = await client.rpc('get_llm_formatted_messages', {'p_thread_id': thread_id}).execute()
|
||||
# result = await client.rpc('get_llm_formatted_messages', {'p_thread_id': thread_id}).execute()
|
||||
result = await client.table('messages').select('message_id, content').eq('thread_id', thread_id).eq('is_llm_message', True).order('created_at').execute()
|
||||
|
||||
# Parse the returned data which might be stringified JSON
|
||||
if not result.data:
|
||||
|
@ -128,23 +129,17 @@ class ThreadManager:
|
|||
# Return properly parsed JSON objects
|
||||
messages = []
|
||||
for item in result.data:
|
||||
if isinstance(item, str):
|
||||
if isinstance(item['content'], str):
|
||||
try:
|
||||
parsed_item = json.loads(item)
|
||||
parsed_item = json.loads(item['content'])
|
||||
parsed_item['message_id'] = item['message_id']
|
||||
messages.append(parsed_item)
|
||||
except json.JSONDecodeError:
|
||||
logger.error(f"Failed to parse message: {item}")
|
||||
logger.error(f"Failed to parse message: {item['content']}")
|
||||
else:
|
||||
messages.append(item)
|
||||
|
||||
# Ensure tool_calls have properly formatted function arguments
|
||||
for message in messages:
|
||||
if message.get('tool_calls'):
|
||||
for tool_call in message['tool_calls']:
|
||||
if isinstance(tool_call, dict) and 'function' in tool_call:
|
||||
# Ensure function.arguments is a string
|
||||
if 'arguments' in tool_call['function'] and not isinstance(tool_call['function']['arguments'], str):
|
||||
tool_call['function']['arguments'] = json.dumps(tool_call['function']['arguments'])
|
||||
content = item['content']
|
||||
content['message_id'] = item['message_id']
|
||||
messages.append(content)
|
||||
|
||||
return messages
|
||||
|
||||
|
@ -327,6 +322,26 @@ Here are the XML tools available with examples:
|
|||
openapi_tool_schemas = self.tool_registry.get_openapi_schemas()
|
||||
logger.debug(f"Retrieved {len(openapi_tool_schemas) if openapi_tool_schemas else 0} OpenAPI tool schemas")
|
||||
|
||||
|
||||
uncompressed_total_token_count = token_counter(model=llm_model, messages=prepared_messages)
|
||||
|
||||
if uncompressed_total_token_count > (llm_max_tokens or (100 * 1000)):
|
||||
_i = 0 # Count the number of ToolResult messages
|
||||
for msg in reversed(prepared_messages): # Start from the end and work backwards
|
||||
if "content" in msg and msg['content'] and "ToolResult" in msg['content']: # Only compress ToolResult messages
|
||||
_i += 1 # Count the number of ToolResult messages
|
||||
msg_token_count = token_counter(messages=[msg]) # Count the number of tokens in the message
|
||||
if msg_token_count > 5000: # If the message is too long
|
||||
if _i > 1: # If this is not the most recent ToolResult message
|
||||
message_id = msg.get('message_id') # Get the message_id
|
||||
if message_id:
|
||||
msg["content"] = msg["content"][:10000] + "... (truncated)" + f"\n\nThis message is too long, use the expand-message tool with message_id \"{message_id}\" to see the full message" # Truncate the message
|
||||
else:
|
||||
msg["content"] = msg["content"][:200000] + f"\n\nThis message is too long, repeat relevant information in your response to remember it" # Truncate to 300k characters to avoid overloading the context at once, but don't truncate otherwise
|
||||
|
||||
compressed_total_token_count = token_counter(model=llm_model, messages=prepared_messages)
|
||||
logger.info(f"token_compression: {uncompressed_total_token_count} -> {compressed_total_token_count}") # Log the token compression for debugging later
|
||||
|
||||
# 5. Make LLM API call
|
||||
logger.debug("Making LLM API call")
|
||||
try:
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -944,6 +944,18 @@ http2 = ["h2 (>=3,<5)"]
|
|||
socks = ["socksio (==1.*)"]
|
||||
zstd = ["zstandard (>=0.18.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "httpx-sse"
|
||||
version = "0.4.0"
|
||||
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"},
|
||||
{file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "huggingface-hub"
|
||||
version = "0.30.2"
|
||||
|
@ -1376,6 +1388,34 @@ dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"]
|
|||
docs = ["autodocsumm (==0.2.14)", "furo (==2024.8.6)", "sphinx (==8.1.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.0)", "sphinxext-opengraph (==0.9.1)"]
|
||||
tests = ["pytest", "simplejson"]
|
||||
|
||||
[[package]]
|
||||
name = "mcp"
|
||||
version = "1.9.2"
|
||||
description = "Model Context Protocol SDK"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "mcp-1.9.2-py3-none-any.whl", hash = "sha256:bc29f7fd67d157fef378f89a4210384f5fecf1168d0feb12d22929818723f978"},
|
||||
{file = "mcp-1.9.2.tar.gz", hash = "sha256:3c7651c053d635fd235990a12e84509fe32780cd359a5bbef352e20d4d963c05"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
anyio = ">=4.5"
|
||||
httpx = ">=0.27"
|
||||
httpx-sse = ">=0.4"
|
||||
pydantic = ">=2.7.2,<3.0.0"
|
||||
pydantic-settings = ">=2.5.2"
|
||||
python-multipart = ">=0.0.9"
|
||||
sse-starlette = ">=1.6.1"
|
||||
starlette = ">=0.27"
|
||||
uvicorn = {version = ">=0.23.1", markers = "sys_platform != \"emscripten\""}
|
||||
|
||||
[package.extras]
|
||||
cli = ["python-dotenv (>=1.0.0)", "typer (>=0.12.4)"]
|
||||
rich = ["rich (>=13.9.4)"]
|
||||
ws = ["websockets (>=15.0.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "multidict"
|
||||
version = "6.4.3"
|
||||
|
@ -2210,6 +2250,30 @@ files = [
|
|||
[package.dependencies]
|
||||
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-settings"
|
||||
version = "2.9.1"
|
||||
description = "Settings management using Pydantic"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pydantic_settings-2.9.1-py3-none-any.whl", hash = "sha256:59b4f431b1defb26fe620c71a7d3968a710d719f5f4cdbbdb7926edeb770f6ef"},
|
||||
{file = "pydantic_settings-2.9.1.tar.gz", hash = "sha256:c509bf79d27563add44e8446233359004ed85066cd096d8b510f715e6ef5d268"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pydantic = ">=2.7.0"
|
||||
python-dotenv = ">=0.21.0"
|
||||
typing-inspection = ">=0.4.0"
|
||||
|
||||
[package.extras]
|
||||
aws-secrets-manager = ["boto3 (>=1.35.0)", "boto3-stubs[secretsmanager]"]
|
||||
azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"]
|
||||
gcp-secret-manager = ["google-cloud-secret-manager (>=2.23.1)"]
|
||||
toml = ["tomli (>=2.0.1)"]
|
||||
yaml = ["pyyaml (>=6.0.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyjwt"
|
||||
version = "2.10.1"
|
||||
|
@ -2884,6 +2948,27 @@ files = [
|
|||
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sse-starlette"
|
||||
version = "2.3.6"
|
||||
description = "SSE plugin for Starlette"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "sse_starlette-2.3.6-py3-none-any.whl", hash = "sha256:d49a8285b182f6e2228e2609c350398b2ca2c36216c2675d875f81e93548f760"},
|
||||
{file = "sse_starlette-2.3.6.tar.gz", hash = "sha256:0382336f7d4ec30160cf9ca0518962905e1b69b72d6c1c995131e0a703b436e3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
anyio = ">=4.7.0"
|
||||
|
||||
[package.extras]
|
||||
daphne = ["daphne (>=4.2.0)"]
|
||||
examples = ["aiosqlite (>=0.21.0)", "fastapi (>=0.115.12)", "sqlalchemy[asyncio,examples] (>=2.0.41)", "starlette (>=0.41.3)", "uvicorn (>=0.34.0)"]
|
||||
granian = ["granian (>=2.3.1)"]
|
||||
uvicorn = ["uvicorn (>=0.34.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "starlette"
|
||||
version = "0.36.3"
|
||||
|
@ -3634,4 +3719,4 @@ testing = ["coverage[toml]", "zope.event", "zope.testing"]
|
|||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = "^3.11"
|
||||
content-hash = "0d5f69b6f57f669bd9eb878001924caeaf438951941bfbc9cf1d75d2eea14ef6"
|
||||
content-hash = "ed0ccb92ccc81ecff536968c883a2ea96d2ee8a6c06a18d0023b0cd4185f8a28"
|
||||
|
|
|
@ -54,6 +54,7 @@ pika = "^1.3.2"
|
|||
prometheus-client = "^0.21.1"
|
||||
langfuse = "^2.60.5"
|
||||
Pillow = "^10.0.0"
|
||||
mcp = "^1.0.0"
|
||||
sentry-sdk = {extras = ["fastapi"], version = "^2.29.1"}
|
||||
|
||||
[tool.poetry.scripts]
|
||||
|
|
|
@ -36,3 +36,4 @@ langfuse>=2.60.5
|
|||
httpx>=0.24.0
|
||||
Pillow>=10.0.0
|
||||
sentry-sdk[fastapi]>=2.29.1
|
||||
mcp>=1.0.0
|
File diff suppressed because it is too large
Load Diff
|
@ -417,7 +417,8 @@
|
|||
margin-top: 0.5em;
|
||||
margin-bottom: 0.5em;
|
||||
padding: 0.75em 1em;
|
||||
background-color: theme('colors.slate.100');
|
||||
/* background 95 */
|
||||
background-color: theme('colors.background/95');
|
||||
border-radius: 0.375rem;
|
||||
overflow-x: auto;
|
||||
font-family: var(--font-mono);
|
||||
|
@ -439,7 +440,7 @@
|
|||
padding: 0.2em 0.4em;
|
||||
font-size: 0.85em;
|
||||
font-family: var(--font-mono);
|
||||
background-color: theme('colors.slate.100');
|
||||
background-color: theme('colors.background/95');
|
||||
border-radius: 3px;
|
||||
word-break: break-word;
|
||||
}
|
||||
|
@ -478,14 +479,14 @@
|
|||
.dark & {
|
||||
/* Code blocks in dark mode */
|
||||
& pre {
|
||||
background-color: theme('colors.zinc.800');
|
||||
border: 1px solid theme('colors.zinc.700');
|
||||
background-color: theme('colors.background/95');
|
||||
/* border: 1px solid theme('colors.zinc.700'); */
|
||||
}
|
||||
|
||||
& code:not([class*='language-']) {
|
||||
background-color: theme('colors.zinc.800');
|
||||
background-color: theme('colors.background/95');
|
||||
color: theme('colors.zinc.200');
|
||||
border: 1px solid theme('colors.zinc.700');
|
||||
/* border: 1px solid theme('colors.zinc.700'); */
|
||||
}
|
||||
|
||||
/* Tables in dark mode */
|
||||
|
|
|
@ -348,7 +348,7 @@ export const ThreadContent: React.FC<ThreadContentProps> = ({
|
|||
className={containerClassName}
|
||||
onScroll={handleScroll}
|
||||
>
|
||||
<div className="mx-auto max-w-3xl">
|
||||
<div className="mx-auto max-w-3xl min-w-0">
|
||||
{displayMessages.length === 0 && !streamingTextContent && !streamingToolCall &&
|
||||
!streamingText && !currentToolCall && agentStatus === 'idle' ? (
|
||||
<div className="flex h-full items-center justify-center">
|
||||
|
@ -357,7 +357,7 @@ export const ThreadContent: React.FC<ThreadContentProps> = ({
|
|||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<div className="space-y-8">
|
||||
<div className="space-y-8 min-w-0">
|
||||
{(() => {
|
||||
|
||||
type MessageGroup = {
|
||||
|
@ -465,8 +465,8 @@ export const ThreadContent: React.FC<ThreadContentProps> = ({
|
|||
if (debugMode) {
|
||||
return (
|
||||
<div key={group.key} className="flex justify-end">
|
||||
<div className="inline-flex max-w-[85%] rounded-xl bg-primary/10 px-4 py-3">
|
||||
<pre className="text-xs font-mono whitespace-pre-wrap overflow-x-auto">
|
||||
<div className="flex max-w-[85%] rounded-xl bg-primary/10 px-4 py-3 break-words overflow-hidden">
|
||||
<pre className="text-xs font-mono whitespace-pre-wrap overflow-x-auto min-w-0 flex-1">
|
||||
{message.content}
|
||||
</pre>
|
||||
</div>
|
||||
|
@ -488,10 +488,10 @@ export const ThreadContent: React.FC<ThreadContentProps> = ({
|
|||
|
||||
return (
|
||||
<div key={group.key} className="flex justify-end">
|
||||
<div className="inline-flex max-w-[85%] rounded-xl bg-primary/10 px-4 py-3">
|
||||
<div className="space-y-3">
|
||||
<div className="flex max-w-[85%] rounded-xl bg-primary/10 px-4 py-3 break-words overflow-hidden">
|
||||
<div className="space-y-3 min-w-0 flex-1">
|
||||
{cleanContent && (
|
||||
<Markdown className="text-sm prose prose-sm dark:prose-invert chat-markdown max-w-none [&>:first-child]:mt-0 prose-headings:mt-3">{cleanContent}</Markdown>
|
||||
<Markdown className="text-sm prose prose-sm dark:prose-invert chat-markdown max-w-none [&>:first-child]:mt-0 prose-headings:mt-3 break-words overflow-wrap-anywhere">{cleanContent}</Markdown>
|
||||
)}
|
||||
|
||||
{/* Use the helper function to render user attachments */}
|
||||
|
@ -508,8 +508,8 @@ export const ThreadContent: React.FC<ThreadContentProps> = ({
|
|||
<KortixLogo />
|
||||
</div>
|
||||
<div className="flex-1">
|
||||
<div className="inline-flex max-w-[90%] rounded-lg px-4 text-sm">
|
||||
<div className="space-y-2">
|
||||
<div className="flex max-w-[90%] rounded-lg px-4 text-sm break-words overflow-hidden">
|
||||
<div className="space-y-2 min-w-0 flex-1">
|
||||
{(() => {
|
||||
// In debug mode, just show raw messages content
|
||||
if (debugMode) {
|
||||
|
@ -573,7 +573,7 @@ export const ThreadContent: React.FC<ThreadContentProps> = ({
|
|||
|
||||
elements.push(
|
||||
<div key={msgKey} className={assistantMessageCount > 0 ? "mt-2" : ""}>
|
||||
<div className="prose prose-sm dark:prose-invert chat-markdown max-w-none [&>:first-child]:mt-0 prose-headings:mt-3">
|
||||
<div className="prose prose-sm dark:prose-invert chat-markdown max-w-none [&>:first-child]:mt-0 prose-headings:mt-3 break-words overflow-hidden">
|
||||
{renderedContent}
|
||||
</div>
|
||||
</div>
|
||||
|
@ -628,7 +628,7 @@ export const ThreadContent: React.FC<ThreadContentProps> = ({
|
|||
return (
|
||||
<>
|
||||
{textBeforeTag && (
|
||||
<Markdown className="text-sm prose prose-sm dark:prose-invert chat-markdown max-w-none [&>:first-child]:mt-0 prose-headings:mt-3">{textBeforeTag}</Markdown>
|
||||
<Markdown className="text-sm prose prose-sm dark:prose-invert chat-markdown max-w-none [&>:first-child]:mt-0 prose-headings:mt-3 break-words overflow-wrap-anywhere">{textBeforeTag}</Markdown>
|
||||
)}
|
||||
{showCursor && (
|
||||
<span className="inline-block h-4 w-0.5 bg-primary ml-0.5 -mb-1 animate-pulse" />
|
||||
|
@ -728,7 +728,7 @@ export const ThreadContent: React.FC<ThreadContentProps> = ({
|
|||
) : (
|
||||
<>
|
||||
{textBeforeTag && (
|
||||
<Markdown className="text-sm prose prose-sm dark:prose-invert chat-markdown max-w-none [&>:first-child]:mt-0 prose-headings:mt-3">{textBeforeTag}</Markdown>
|
||||
<Markdown className="text-sm prose prose-sm dark:prose-invert chat-markdown max-w-none [&>:first-child]:mt-0 prose-headings:mt-3 break-words overflow-wrap-anywhere">{textBeforeTag}</Markdown>
|
||||
)}
|
||||
{showCursor && (
|
||||
<span className="inline-block h-4 w-0.5 bg-primary ml-0.5 -mb-1 animate-pulse" />
|
||||
|
|
|
@ -12,7 +12,7 @@ export type CodeBlockProps = {
|
|||
|
||||
function CodeBlock({ children, className, ...props }: CodeBlockProps) {
|
||||
return (
|
||||
<div className={cn(className)} {...props}>
|
||||
<div className={cn('w-px flex-grow min-w-0 overflow-hidden flex', className)} {...props}>
|
||||
{children}
|
||||
</div>
|
||||
);
|
||||
|
@ -41,6 +41,10 @@ function CodeBlockCode({
|
|||
|
||||
useEffect(() => {
|
||||
async function highlight() {
|
||||
if (!code || typeof code !== 'string') {
|
||||
setHighlightedHtml(null);
|
||||
return;
|
||||
}
|
||||
const html = await codeToHtml(code, {
|
||||
lang: language,
|
||||
theme,
|
||||
|
@ -60,7 +64,7 @@ function CodeBlockCode({
|
|||
highlight();
|
||||
}, [code, language, theme]);
|
||||
|
||||
const classNames = cn('[&_pre]:!bg-background/95 [&_pre]:rounded-lg [&_pre]:p-4', className);
|
||||
const classNames = cn('[&_pre]:!bg-background/95 [&_pre]:rounded-lg [&_pre]:p-4 [&_pre]:!overflow-x-auto [&_pre]:!w-px [&_pre]:!flex-grow [&_pre]:!min-w-0 [&_pre]:!box-border [&_.shiki]:!overflow-x-auto [&_.shiki]:!w-px [&_.shiki]:!flex-grow [&_.shiki]:!min-w-0 [&_code]:!min-w-0 [&_code]:!whitespace-pre', 'w-px flex-grow min-w-0 overflow-hidden flex w-full', className);
|
||||
|
||||
// SSR fallback: render plain code if not hydrated yet
|
||||
return highlightedHtml ? (
|
||||
|
@ -71,7 +75,7 @@ function CodeBlockCode({
|
|||
/>
|
||||
) : (
|
||||
<div className={classNames} {...props}>
|
||||
<pre>
|
||||
<pre className="!overflow-x-auto !w-px !flex-grow !min-w-0 !box-border">
|
||||
<code>{code}</code>
|
||||
</pre>
|
||||
</div>
|
||||
|
|
|
@ -47,11 +47,11 @@ const INITIAL_COMPONENTS: Partial<Components> = {
|
|||
const language = extractLanguage(className);
|
||||
|
||||
return (
|
||||
<CodeBlock className="rounded-md overflow-hidden my-4 border border-zinc-200 dark:border-zinc-800">
|
||||
<CodeBlock className="rounded-md overflow-hidden my-4 border border-zinc-200 dark:border-zinc-800 max-w-full min-w-0 w-full">
|
||||
<CodeBlockCode
|
||||
code={children as string}
|
||||
language={language}
|
||||
className="text-sm overflow-x-auto"
|
||||
className="text-sm"
|
||||
/>
|
||||
</CodeBlock>
|
||||
);
|
||||
|
|
|
@ -128,15 +128,33 @@ export function useCachedFile<T = string>(
|
|||
url.searchParams.append('path', normalizedPath);
|
||||
|
||||
// Fetch with authentication
|
||||
const response = await fetch(url.toString(), {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${session?.access_token}`,
|
||||
},
|
||||
});
|
||||
const attemptFetch = async (isRetry: boolean = false): Promise<Response> => {
|
||||
const response = await fetch(url.toString(), {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${session?.access_token}`
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const responseText = await response.text();
|
||||
const errorMessage = `Failed to load file: ${response.status} ${response.statusText}`;
|
||||
|
||||
// Check if this is a workspace initialization error and we haven't retried yet
|
||||
const isWorkspaceNotRunning = responseText.includes('Workspace is not running');
|
||||
if (isWorkspaceNotRunning && !isRetry) {
|
||||
console.log(`[FILE CACHE] Workspace not ready, retrying in 2s for ${normalizedPath}`);
|
||||
await new Promise(resolve => setTimeout(resolve, 2000));
|
||||
return attemptFetch(true);
|
||||
}
|
||||
|
||||
console.error(`[FILE CACHE] Failed response for ${normalizedPath}: Status ${response.status}`);
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
|
||||
return response;
|
||||
};
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to load file: ${response.status} ${response.statusText}`);
|
||||
}
|
||||
const response = await attemptFetch();
|
||||
|
||||
// Process content based on contentType
|
||||
let content;
|
||||
|
@ -502,13 +520,32 @@ export const FileCache = {
|
|||
// Properly encode the path parameter for UTF-8 support
|
||||
url.searchParams.append('path', normalizedPath);
|
||||
|
||||
const response = await fetch(url.toString(), {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${token}`
|
||||
},
|
||||
});
|
||||
const attemptFetch = async (isRetry: boolean = false): Promise<Response> => {
|
||||
const response = await fetch(url.toString(), {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${token}`
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const responseText = await response.text();
|
||||
const errorMessage = `Failed to preload file: ${response.status}`;
|
||||
|
||||
// Check if this is a workspace initialization error and we haven't retried yet
|
||||
const isWorkspaceNotRunning = responseText.includes('Workspace is not running');
|
||||
if (isWorkspaceNotRunning && !isRetry) {
|
||||
console.log(`[FILE CACHE] Workspace not ready during preload, retrying in 2s for ${normalizedPath}`);
|
||||
await new Promise(resolve => setTimeout(resolve, 2000));
|
||||
return attemptFetch(true);
|
||||
}
|
||||
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
|
||||
return response;
|
||||
};
|
||||
|
||||
if (!response.ok) throw new Error(`Failed to preload file: ${response.status}`);
|
||||
const response = await attemptFetch();
|
||||
|
||||
// Determine how to process the content based on file type
|
||||
const extension = path.split('.').pop()?.toLowerCase();
|
||||
|
@ -623,16 +660,33 @@ export async function getCachedFile(
|
|||
|
||||
console.log(`[FILE CACHE] Fetching file: ${url.toString()}`);
|
||||
|
||||
const response = await fetch(url.toString(), {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${options.token}`
|
||||
const attemptFetch = async (isRetry: boolean = false): Promise<Response> => {
|
||||
const response = await fetch(url.toString(), {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${options.token}`
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const responseText = await response.text();
|
||||
const errorMessage = `Failed to load file: ${response.status} ${response.statusText}`;
|
||||
|
||||
// Check if this is a workspace initialization error and we haven't retried yet
|
||||
const isWorkspaceNotRunning = responseText.includes('Workspace is not running');
|
||||
if (isWorkspaceNotRunning && !isRetry) {
|
||||
console.log(`[FILE CACHE] Workspace not ready, retrying in 2s for ${normalizedPath}`);
|
||||
await new Promise(resolve => setTimeout(resolve, 2000));
|
||||
return attemptFetch(true);
|
||||
}
|
||||
|
||||
console.error(`[FILE CACHE] Failed response for ${normalizedPath}: Status ${response.status}`);
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
});
|
||||
|
||||
return response;
|
||||
};
|
||||
|
||||
if (!response.ok) {
|
||||
console.error(`[FILE CACHE] Failed response for ${normalizedPath}: Status ${response.status}`);
|
||||
throw new Error(`Failed to load file: ${response.status} ${response.statusText}`);
|
||||
}
|
||||
const response = await attemptFetch();
|
||||
|
||||
// Process content based on type
|
||||
let content;
|
||||
|
@ -688,51 +742,66 @@ export async function fetchFileContent(
|
|||
const requestId = Math.random().toString(36).substring(2, 9);
|
||||
console.log(`[FILE CACHE] Fetching fresh content for ${sandboxId}:${filePath}`);
|
||||
|
||||
try {
|
||||
// Prepare the API URL
|
||||
const apiUrl = `${process.env.NEXT_PUBLIC_BACKEND_URL}/sandboxes/${sandboxId}/files/content`;
|
||||
const url = new URL(apiUrl);
|
||||
url.searchParams.append('path', filePath);
|
||||
|
||||
// Set up fetch options
|
||||
const fetchOptions: RequestInit = {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
},
|
||||
};
|
||||
|
||||
// Execute fetch
|
||||
const response = await fetch(url.toString(), fetchOptions);
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
throw new Error(`Failed to fetch file content: ${response.status} ${errorText}`);
|
||||
}
|
||||
|
||||
// CRITICAL: Detect correct response handling based on file type
|
||||
// Excel files, PDFs and other binary documents should be handled as blobs
|
||||
const extension = filePath.split('.').pop()?.toLowerCase();
|
||||
const isBinaryFile = ['xlsx', 'xls', 'docx', 'doc', 'pptx', 'ppt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'zip'].includes(extension || '');
|
||||
|
||||
// Handle response based on content type
|
||||
if (contentType === 'blob' || isBinaryFile) {
|
||||
const blob = await response.blob();
|
||||
const attemptFetch = async (isRetry: boolean = false): Promise<string | Blob | any> => {
|
||||
try {
|
||||
// Prepare the API URL
|
||||
const apiUrl = `${process.env.NEXT_PUBLIC_BACKEND_URL}/sandboxes/${sandboxId}/files/content`;
|
||||
const url = new URL(apiUrl);
|
||||
url.searchParams.append('path', filePath);
|
||||
|
||||
// Set correct MIME type for known file types
|
||||
if (extension) {
|
||||
const mimeType = FileCache.getMimeType(filePath);
|
||||
if (mimeType && mimeType !== blob.type) {
|
||||
// Create a new blob with correct type
|
||||
return new Blob([blob], { type: mimeType });
|
||||
}
|
||||
// Set up fetch options
|
||||
const fetchOptions: RequestInit = {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
},
|
||||
};
|
||||
|
||||
// Execute fetch
|
||||
const response = await fetch(url.toString(), fetchOptions);
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
throw new Error(`Failed to fetch file content: ${response.status} ${errorText}`);
|
||||
}
|
||||
|
||||
return blob;
|
||||
} else if (contentType === 'json') {
|
||||
return await response.json();
|
||||
} else {
|
||||
return await response.text();
|
||||
// CRITICAL: Detect correct response handling based on file type
|
||||
// Excel files, PDFs and other binary documents should be handled as blobs
|
||||
const extension = filePath.split('.').pop()?.toLowerCase();
|
||||
const isBinaryFile = ['xlsx', 'xls', 'docx', 'doc', 'pptx', 'ppt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'zip'].includes(extension || '');
|
||||
|
||||
// Handle response based on content type
|
||||
if (contentType === 'blob' || isBinaryFile) {
|
||||
const blob = await response.blob();
|
||||
|
||||
// Set correct MIME type for known file types
|
||||
if (extension) {
|
||||
const mimeType = FileCache.getMimeType(filePath);
|
||||
if (mimeType && mimeType !== blob.type) {
|
||||
// Create a new blob with correct type
|
||||
return new Blob([blob], { type: mimeType });
|
||||
}
|
||||
}
|
||||
|
||||
return blob;
|
||||
} else if (contentType === 'json') {
|
||||
return await response.json();
|
||||
} else {
|
||||
return await response.text();
|
||||
}
|
||||
} catch (error: any) {
|
||||
// Check if this is a workspace initialization error and we haven't retried yet
|
||||
const isWorkspaceNotRunning = error.message?.includes('Workspace is not running');
|
||||
if (isWorkspaceNotRunning && !isRetry) {
|
||||
console.log(`[FILE CACHE] Workspace not ready, retrying in 2s for ${filePath}`);
|
||||
await new Promise(resolve => setTimeout(resolve, 2000));
|
||||
return attemptFetch(true);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
return await attemptFetch();
|
||||
} catch (error) {
|
||||
console.error(`[FILE CACHE] Error fetching file content:`, error);
|
||||
throw error;
|
||||
|
|
Loading…
Reference in New Issue