Merge pull request #781 from buster-so/hot-fix-chats-stalling

hotfix: tool sorting
This commit is contained in:
dal 2025-09-02 16:10:13 -06:00 committed by GitHub
commit 50dc34bf5f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 123 additions and 50 deletions

View File

@ -1,15 +1,13 @@
import { updateMessage, updateMessageEntries } from '@buster/database';
import type { ToolCallOptions } from 'ai';
import type { UpdateMessageEntriesParams } from '../../../../../database/src/queries/messages/update-message-entries';
import { createRawToolResultEntry } from '../../shared/create-raw-llm-tool-result-entry';
import {
createMessageUserClarifyingQuestionRawLlmMessageEntry,
createMessageUserClarifyingQuestionResponseMessage,
} from './helpers/message-user-clarifying-question-transform-helper';
import {
MESSAGE_USER_CLARIFYING_QUESTION_TOOL_NAME,
type MessageUserClarifyingQuestionContext,
type MessageUserClarifyingQuestionState,
import type {
MessageUserClarifyingQuestionContext,
MessageUserClarifyingQuestionState,
} from './message-user-clarifying-question';
// Factory function that creates a type-safe callback for the specific agent context
@ -34,16 +32,6 @@ export function createMessageUserClarifyingQuestionStart(
options.toolCallId
);
// Create the tool result immediately with success: true
// This ensures it's always present even if the stream terminates early
const rawToolResultEntry = createRawToolResultEntry(
options.toolCallId,
MESSAGE_USER_CLARIFYING_QUESTION_TOOL_NAME,
{
success: true,
}
);
const entries: UpdateMessageEntriesParams = {
messageId: context.messageId,
};
@ -52,16 +40,10 @@ export function createMessageUserClarifyingQuestionStart(
entries.responseMessages = [responseEntry];
}
// Include both the tool call and tool result in raw LLM messages
// Since it's an upsert, sending both together ensures completeness
const rawLlmMessages = [];
// Only include the tool call message, not the result
// The result will be added in the execute function
if (rawLlmMessage) {
rawLlmMessages.push(rawLlmMessage);
}
rawLlmMessages.push(rawToolResultEntry);
if (rawLlmMessages.length > 0) {
entries.rawLlmMessages = rawLlmMessages;
entries.rawLlmMessages = [rawLlmMessage];
}
try {

View File

@ -1,15 +1,13 @@
import { updateMessage, updateMessageEntries } from '@buster/database';
import type { ToolCallOptions } from 'ai';
import type { UpdateMessageEntriesParams } from '../../../../../database/src/queries/messages/update-message-entries';
import { createRawToolResultEntry } from '../../shared/create-raw-llm-tool-result-entry';
import {
createRespondWithoutAssetCreationRawLlmMessageEntry,
createRespondWithoutAssetCreationResponseMessage,
} from './helpers/respond-without-asset-creation-transform-helper';
import {
RESPOND_WITHOUT_ASSET_CREATION_TOOL_NAME,
type RespondWithoutAssetCreationContext,
type RespondWithoutAssetCreationState,
import type {
RespondWithoutAssetCreationContext,
RespondWithoutAssetCreationState,
} from './respond-without-asset-creation-tool';
// Factory function that creates a type-safe callback for the specific agent context
@ -32,16 +30,6 @@ export function createRespondWithoutAssetCreationStart(
options.toolCallId
);
// Create the tool result immediately with success: true
// This ensures it's always present even if the stream terminates early
const rawToolResultEntry = createRawToolResultEntry(
options.toolCallId,
RESPOND_WITHOUT_ASSET_CREATION_TOOL_NAME,
{
success: true,
}
);
const entries: UpdateMessageEntriesParams = {
messageId: context.messageId,
};
@ -50,16 +38,10 @@ export function createRespondWithoutAssetCreationStart(
entries.responseMessages = [responseEntry];
}
// Include both the tool call and tool result in raw LLM messages
// Since it's an upsert, sending both together ensures completeness
const rawLlmMessages = [];
// Only include the tool call message, not the result
// The result will be added in the execute function
if (rawLlmMessage) {
rawLlmMessages.push(rawLlmMessage);
}
rawLlmMessages.push(rawToolResultEntry);
if (rawLlmMessages.length > 0) {
entries.rawLlmMessages = rawLlmMessages;
entries.rawLlmMessages = [rawLlmMessage];
}
try {

View File

@ -129,17 +129,125 @@ function getRawLlmMessageKey(message: ModelMessage): string {
return `${role}:${toolCallIds}`;
}
/**
* Ensures tool calls always precede their corresponding tool results
* This fixes any ordering issues that may occur during concurrent updates
*/
function sortToolCallsBeforeResults(messages: ModelMessage[]): ModelMessage[] {
// Map to store tool call/result pairs
const toolPairs = new Map<
string,
{ call?: ModelMessage; result?: ModelMessage; callIndex?: number; resultIndex?: number }
>();
const standaloneMessages: { message: ModelMessage; index: number }[] = [];
// First pass: identify tool calls and results
messages.forEach((msg, index) => {
const toolCallIds = getToolCallIds(msg.content);
if (toolCallIds) {
// This message has tool call IDs
const toolCallIdList = toolCallIds.split(',');
for (const toolCallId of toolCallIdList) {
if (!toolCallId) continue;
const pair = toolPairs.get(toolCallId) || {};
if (msg.role === 'assistant') {
// This is a tool call
pair.call = msg;
pair.callIndex = index;
} else if (msg.role === 'tool') {
// This is a tool result
pair.result = msg;
pair.resultIndex = index;
}
toolPairs.set(toolCallId, pair);
}
} else {
// Standalone message without tool call IDs
standaloneMessages.push({ message: msg, index });
}
});
// Build the sorted array
const sorted: ModelMessage[] = [];
const processedIndices = new Set<number>();
// Process messages in original order, but ensure tool pairs are correctly ordered
messages.forEach((msg, index) => {
if (processedIndices.has(index)) {
return; // Already processed as part of a tool pair
}
const toolCallIds = getToolCallIds(msg.content);
if (toolCallIds) {
const toolCallIdList = toolCallIds.split(',');
for (const toolCallId of toolCallIdList) {
if (!toolCallId) continue;
const pair = toolPairs.get(toolCallId);
if (!pair) continue;
// If this is a tool result that appears before its call, skip it for now
if (
msg.role === 'tool' &&
pair.call &&
pair.callIndex !== undefined &&
pair.callIndex > index &&
!processedIndices.has(pair.callIndex)
) {
continue; // Will be added when we process the call
}
// If this is a tool call, add both call and result in correct order
if (msg.role === 'assistant' && pair.call && !processedIndices.has(index)) {
sorted.push(pair.call);
processedIndices.add(index);
// Add the corresponding result immediately after
if (
pair.result &&
pair.resultIndex !== undefined &&
!processedIndices.has(pair.resultIndex)
) {
sorted.push(pair.result);
processedIndices.add(pair.resultIndex);
}
}
// If this is an orphaned tool result (no corresponding call), add it
if (msg.role === 'tool' && !pair.call && !processedIndices.has(index)) {
sorted.push(msg);
processedIndices.add(index);
}
}
} else {
// Standalone message
sorted.push(msg);
processedIndices.add(index);
}
});
return sorted;
}
/**
* Merges raw LLM messages by combination of 'role' and 'toolCallId', preserving order
* Messages with the same role and tool call IDs replace existing ones at their original position
* New messages are appended
* Tool calls are guaranteed to precede their corresponding tool results
*/
export function mergeRawLlmMessages(
existing: ModelMessage[],
updates: ModelMessage[]
): ModelMessage[] {
if (!existing || existing.length === 0) {
return updates;
return sortToolCallsBeforeResults(updates);
}
// Create a map of new messages by their unique key
@ -175,5 +283,6 @@ export function mergeRawLlmMessages(
}
}
return merged;
// Ensure tool calls always precede their results
return sortToolCallsBeforeResults(merged);
}