Refactor updateMessageEntries for cache-first streaming approach

- Updated logic to prioritize cache as the source of truth during streaming, with asynchronous database updates for persistence.
- Improved error handling for background database updates, ensuring cache validity even if DB update fails.
- Cleaned up mergeResponseMessages and mergeReasoningMessages functions by removing redundant code.
This commit is contained in:
dal 2025-08-22 20:43:44 -06:00
parent 0d44a230d7
commit ce8d9a3064
No known key found for this signature in database
GPG Key ID: 16F4B0E1E9F61122
2 changed files with 14 additions and 18 deletions

View File

@ -19,11 +19,9 @@ export function mergeResponseMessages(
// Create a map of new messages by ID // Create a map of new messages by ID
const updateMap = new Map<string, ChatMessageResponseMessage>(); const updateMap = new Map<string, ChatMessageResponseMessage>();
const updateMap = new Map<string, ChatMessageResponseMessage>();
for (const msg of updates) { for (const msg of updates) {
updateMap.set(msg.id, msg); updateMap.set(msg.id, msg);
updateIds.add(msg.id);
} }
// Keep track of which IDs we've already processed // Keep track of which IDs we've already processed
@ -67,11 +65,9 @@ export function mergeReasoningMessages(
// Create a map of new messages by ID // Create a map of new messages by ID
const updateMap = new Map<string, ChatMessageReasoningMessage>(); const updateMap = new Map<string, ChatMessageReasoningMessage>();
const updateMap = new Map<string, ChatMessageReasoningMessage>();
for (const msg of updates) { for (const msg of updates) {
updateMap.set(msg.id, msg); updateMap.set(msg.id, msg);
updateIds.add(msg.id);
} }
// Keep track of which IDs we've already processed // Keep track of which IDs we've already processed

View File

@ -22,8 +22,8 @@ const UpdateMessageEntriesSchema = z.object({
export type UpdateMessageEntriesParams = z.infer<typeof UpdateMessageEntriesSchema>; export type UpdateMessageEntriesParams = z.infer<typeof UpdateMessageEntriesSchema>;
/** /**
* Updates message entries using TypeScript-based merge logic with write-through caching. * Updates message entries with cache-first approach for streaming.
* Fetches existing entries from cache/database, merges with updates, and saves back. * Cache is the source of truth during streaming, DB is updated for persistence.
* *
* Merge logic: * Merge logic:
* - responseMessages: upsert by 'id' field, maintaining order * - responseMessages: upsert by 'id' field, maintaining order
@ -57,7 +57,11 @@ export async function updateMessageEntries({
: existingEntries.rawLlmMessages, : existingEntries.rawLlmMessages,
}; };
// Update database with merged entries // Update cache immediately (cache is source of truth during streaming)
messageEntriesCache.set(messageId, mergedEntries);
// Update database asynchronously for persistence (fire-and-forget)
// If this fails, cache still has the latest state for next update
const updateData: Record<string, unknown> = { const updateData: Record<string, unknown> = {
updatedAt: new Date().toISOString(), updatedAt: new Date().toISOString(),
}; };
@ -74,18 +78,14 @@ export async function updateMessageEntries({
updateData.rawLlmMessages = mergedEntries.rawLlmMessages; updateData.rawLlmMessages = mergedEntries.rawLlmMessages;
} }
await db // Non-blocking DB update - don't await
.update(messages) db.update(messages)
.set(updateData) .set(updateData)
.where(and(eq(messages.id, messageId), isNull(messages.deletedAt))); .where(and(eq(messages.id, messageId), isNull(messages.deletedAt)))
.catch((error) => {
await db // Log but don't fail - cache has the truth
.update(messages) console.error('Background DB update failed (cache still valid):', error);
.set(updateData) });
.where(and(eq(messages.id, messageId), isNull(messages.deletedAt)));
// Write-through: update cache with merged entries only after successful DB update
messageEntriesCache.set(messageId, mergedEntries);
return { success: true }; return { success: true };
} catch (error) { } catch (error) {