A React Native app for the ultimate thinking partner.

feat(ui): add live status indicators and improve streaming UX

- Add live status indicator that shows current action (e.g., "co is searching the web")
- Status appears immediately with fade-in animation when message is sent
- Update status dynamically based on tool calls being executed
- Add parentheses to all status indicators for consistency
- Show "(co is thinking)", "(co is searching the web)", etc.
- Tool calls now display with present/past tense based on completion
- Remove duplicate streamingStep display
- Fix viewport jumping when expanding/collapsing blocks with maintainVisibleContentPosition
- Auto-scroll to bottom on initial message load without animation
- Unify spacing between streaming and historical message display

+382 -400
+340 -358
App.tsx
··· 104 104 const streamCompleteRef = useRef(false); 105 105 const rainbowAnimValue = useRef(new Animated.Value(0)).current; 106 106 const [isInputFocused, setIsInputFocused] = useState(false); 107 + const [currentStreamingStatus, setCurrentStreamingStatus] = useState<string>('thinking'); 108 + const statusFadeAnim = useRef(new Animated.Value(0)).current; 107 109 108 - // Token buffering for smooth streaming 109 - const tokenBufferRef = useRef<string>(''); 110 - const reasoningBufferRef = useRef<string>(''); 110 + // Streaming state 111 111 const streamingReasoningRef = useRef<string>(''); 112 - const bufferIntervalRef = useRef<NodeJS.Timeout | null>(null); 113 112 const scrollIntervalRef = useRef<NodeJS.Timeout | null>(null); 114 - 115 - // Character reveal configuration 116 - const CHAR_REVEAL_INTERVAL_MS = 15; // How often to reveal characters (ms) 117 - const CHARS_PER_REVEAL = 2; // How many characters to reveal each interval 118 113 119 114 // HITL approval state 120 115 const [approvalVisible, setApprovalVisible] = useState(false); ··· 129 124 130 125 const toolCallMsgIdsRef = useRef<Map<string, string>>(new Map()); 131 126 const toolReturnMsgIdsRef = useRef<Map<string, string>>(new Map()); 132 - const pendingReasoningRef = useRef<string>(''); // Store reasoning for next tool call 127 + const currentReasoningIdRef = useRef<string | null>(null); 133 128 134 129 // Layout state for responsive design 135 130 const [screenData, setScreenData] = useState(Dimensions.get('window')); ··· 205 200 // Cleanup intervals on unmount 206 201 useEffect(() => { 207 202 return () => { 208 - if (bufferIntervalRef.current) { 209 - clearInterval(bufferIntervalRef.current); 210 - } 211 203 if (scrollIntervalRef.current) { 212 204 clearInterval(scrollIntervalRef.current); 213 205 } ··· 351 343 setEarliestCursor(loadedMessages[0].id); 352 344 pendingJumpToBottomRef.current = true; 353 345 pendingJumpRetriesRef.current = 3; 346 + // Immediately scroll to bottom without animation on initial load 347 + setTimeout(() => { 348 + scrollViewRef.current?.scrollToEnd({ animated: false }); 349 + }, 100); 354 350 } 355 351 } 356 352 setHasMoreBefore(loadedMessages.length === (before ? PAGE_SIZE : INITIAL_LOAD_LIMIT)); ··· 456 452 457 453 // Helper to flush accumulated streaming content into a message 458 454 const flushStreamingContent = useCallback(() => { 459 - const accumulatedMessage = streamingMessage + tokenBufferRef.current; 460 - const accumulatedReasoning = streamingReasoningRef.current + reasoningBufferRef.current; 455 + const accumulatedMessage = streamingMessage; 456 + const accumulatedReasoning = streamingReasoningRef.current; 461 457 462 458 if (accumulatedMessage || accumulatedReasoning) { 463 459 const newMessage: LettaMessage = { 464 460 id: streamingMessageId || `msg_${Date.now()}`, 465 461 role: 'assistant', 462 + message_type: 'assistant_message', 466 463 content: accumulatedMessage, 467 464 created_at: new Date().toISOString(), 468 465 reasoning: accumulatedReasoning || undefined, ··· 473 470 // Clear streaming state 474 471 setStreamingMessage(''); 475 472 setStreamingReasoning(''); 476 - tokenBufferRef.current = ''; 477 - reasoningBufferRef.current = ''; 478 473 streamingReasoningRef.current = ''; 479 474 setStreamingMessageId(''); 480 475 } ··· 527 522 } 528 523 529 524 if (contentText) { 530 - // Add to buffer instead of directly to state for smooth streaming 531 - tokenBufferRef.current += contentText; 525 + // Display content immediately for maximum speed 526 + setStreamingMessage(prev => prev + contentText); 532 527 setStreamingStep(''); 533 528 } 534 529 } else if (chunk.message_type === 'reasoning_message' && chunk.reasoning) { 535 - // Add to buffer for smooth character-by-character reveal 536 - reasoningBufferRef.current += chunk.reasoning; 537 - } else if ((chunk.message_type === 'tool_call_message' || chunk.message_type === 'tool_call') && chunk.tool_call) { 538 - // Tool call: save accumulated reasoning for attachment to tool call, don't flush content yet 539 - const accumulatedReasoning = streamingReasoningRef.current + reasoningBufferRef.current; 530 + // Accumulate reasoning content 531 + streamingReasoningRef.current += chunk.reasoning; 532 + setStreamingReasoning(prev => prev + chunk.reasoning); 540 533 541 - // Save reasoning for the first tool call 542 - if (accumulatedReasoning && !pendingReasoningRef.current) { 543 - pendingReasoningRef.current = accumulatedReasoning; 534 + // Create or update reasoning message 535 + if (!currentReasoningIdRef.current) { 536 + const reasoningId = `reasoning-${Date.now()}`; 537 + currentReasoningIdRef.current = reasoningId; 538 + setMessages(prev => [...prev, { 539 + id: reasoningId, 540 + role: 'assistant', 541 + message_type: 'reasoning_message', 542 + content: '', 543 + reasoning: streamingReasoningRef.current, 544 + created_at: new Date().toISOString(), 545 + }]); 546 + } else { 547 + // Update existing reasoning message 548 + setMessages(prev => prev.map(m => 549 + m.id === currentReasoningIdRef.current 550 + ? { ...m, reasoning: streamingReasoningRef.current } 551 + : m 552 + )); 544 553 } 554 + } else if ((chunk.message_type === 'tool_call_message' || chunk.message_type === 'tool_call') && chunk.tool_call) { 555 + // Clear reasoning tracking when we hit a tool call 556 + currentReasoningIdRef.current = null; 545 557 546 558 const callObj = chunk.tool_call.function || chunk.tool_call; 547 559 const toolName = callObj?.name || callObj?.tool_name || 'tool'; ··· 558 570 const toolLine = `${toolName}(${formatArgsPython(args)})`; 559 571 const stepId = (chunk as any).step ? String((chunk as any).step) : 'no-step'; 560 572 573 + // Update streaming status based on tool name 574 + const statusMap: Record<string, string> = { 575 + 'web_search': 'searching the web', 576 + 'conversation_search': 'searching conversation history', 577 + 'fetch_webpage': 'fetching webpage', 578 + 'memory_insert': 'inserting into memory', 579 + 'memory_replace': 'updating memory', 580 + 'send_message': 'sending message', 581 + }; 582 + setCurrentStreamingStatus(statusMap[toolName] || `calling ${toolName}`); 583 + 561 584 setMessages(prev => { 562 585 const existingId = toolCallMsgIdsRef.current.get(stepId); 563 586 if (existingId) { ··· 565 588 return prev.map(m => m.id === existingId ? { ...m, content: toolLine } : m); 566 589 } 567 590 568 - // Create new tool call message with reasoning attached to the first tool call 591 + // Create new tool call message 569 592 const newId = `toolcall-${stepId}-${Date.now()}`; 570 - const isFirstToolCall = toolCallMsgIdsRef.current.size === 0; 571 593 toolCallMsgIdsRef.current.set(stepId, newId); 572 594 573 595 return [...prev, { ··· 576 598 content: toolLine, 577 599 created_at: new Date().toISOString(), 578 600 message_type: chunk.message_type, 579 - reasoning: isFirstToolCall ? pendingReasoningRef.current : undefined, 580 601 step_id: stepId, 581 602 }]; 582 603 }); 583 - 584 - // Clear reasoning and streaming state after attaching to first tool call 585 - if (toolCallMsgIdsRef.current.size === 1 && pendingReasoningRef.current) { 586 - setStreamingReasoning(''); 587 - reasoningBufferRef.current = ''; 588 - streamingReasoningRef.current = ''; 589 - } 590 - 591 - setStreamingStep(`Calling ${toolName}...`); 592 604 } else if (chunk.message_type === 'tool_return_message' || chunk.message_type === 'tool_response') { 593 605 // Tool return: add as standalone message 594 606 const result = (chunk as any).tool_response || (chunk as any).toolReturn || (chunk as any).result; ··· 617 629 message_type: chunk.message_type, 618 630 }]; 619 631 }); 620 - 621 - setStreamingStep('Processing result...'); 622 632 } else if (chunk.message_type === 'approval_request_message') { 623 633 // Handle approval request - flush content first 624 634 flushStreamingContent(); ··· 677 687 const tempUserMessage: LettaMessage = { 678 688 id: `temp-${Date.now()}`, 679 689 role: 'user', 690 + message_type: 'user_message', 680 691 content: tempMessageContent, 681 692 created_at: new Date().toISOString(), 682 693 } as LettaMessage; ··· 702 713 setStreamingReasoning(''); 703 714 setIsReasoningStreaming(true); 704 715 setExpandedReasoning(prev => new Set(prev).add('streaming')); 705 - tokenBufferRef.current = ''; 706 - reasoningBufferRef.current = ''; 707 716 streamingReasoningRef.current = ''; 708 717 streamCompleteRef.current = false; 718 + setCurrentStreamingStatus('thinking'); 719 + 720 + // Fade in the status indicator 721 + statusFadeAnim.setValue(0); 722 + Animated.timing(statusFadeAnim, { 723 + toValue: 1, 724 + duration: 300, 725 + useNativeDriver: true, 726 + }).start(); 709 727 710 728 // Animate spacer growing to push user message up (push previous content out of view) 711 729 const targetHeight = Math.max(containerHeight * 0.9, 450); ··· 732 750 } 733 751 }, 400); 734 752 735 - // Start smooth token release interval 736 - if (bufferIntervalRef.current) { 737 - clearInterval(bufferIntervalRef.current); 738 - } 739 - bufferIntervalRef.current = setInterval(() => { 740 - // Reveal reasoning characters with adaptive speed based on buffer size 741 - if (reasoningBufferRef.current.length > 0) { 742 - // Speed up if buffer is large: 2 chars normally, up to 10 chars if buffer > 500 743 - const speedMultiplier = reasoningBufferRef.current.length > 500 ? 5 : 744 - reasoningBufferRef.current.length > 200 ? 3 : 1; 745 - const reasoningChunkSize = Math.min(CHARS_PER_REVEAL * speedMultiplier, reasoningBufferRef.current.length); 746 - const reasoningChunk = reasoningBufferRef.current.slice(0, reasoningChunkSize); 747 - reasoningBufferRef.current = reasoningBufferRef.current.slice(reasoningChunkSize); 748 - streamingReasoningRef.current += reasoningChunk; 749 - setStreamingReasoning(prev => prev + reasoningChunk); 750 - } 751 - 752 - // Reveal message characters with adaptive speed based on buffer size 753 - if (tokenBufferRef.current.length > 0) { 754 - // Speed up if buffer is large: 2 chars normally, up to 10 chars if buffer > 500 755 - const speedMultiplier = tokenBufferRef.current.length > 500 ? 5 : 756 - tokenBufferRef.current.length > 200 ? 3 : 1; 757 - const chunkSize = Math.min(CHARS_PER_REVEAL * speedMultiplier, tokenBufferRef.current.length); 758 - const chunk = tokenBufferRef.current.slice(0, chunkSize); 759 - tokenBufferRef.current = tokenBufferRef.current.slice(chunkSize); 760 - setStreamingMessage(prev => prev + chunk); 761 - } else if (streamCompleteRef.current && reasoningBufferRef.current.length === 0) { 762 - // Buffer is empty and streaming is done - finalize 763 - if (bufferIntervalRef.current) { 764 - clearInterval(bufferIntervalRef.current); 765 - bufferIntervalRef.current = null; 766 - } 767 - 768 - // Get the final content and add it to messages 769 - setStreamingMessage(currentContent => { 770 - const finalReasoning = streamingReasoningRef.current || ''; 771 - console.log('Finalizing message with reasoning:', finalReasoning); 772 - 773 - const finalMessage: LettaMessage = { 774 - id: streamingMessageId || `msg_${Date.now()}`, 775 - role: 'assistant', 776 - content: currentContent, 777 - created_at: new Date().toISOString(), 778 - reasoning: finalReasoning || undefined, 779 - }; 780 - 781 - console.log('Final message object:', finalMessage); 782 - 783 - // Add final message to messages array first 784 - setMessages(prev => [...prev, finalMessage]); 785 - 786 - // Transfer reasoning expansion state from 'streaming' to the actual message ID 787 - setExpandedReasoning(prev => { 788 - if (prev.has('streaming')) { 789 - const next = new Set(prev); 790 - next.delete('streaming'); 791 - next.add(finalMessage.id); 792 - return next; 793 - } 794 - return prev; 795 - }); 796 - 797 - // Don't clear yet - keep content visible 798 - return currentContent; 799 - }); 800 - 801 - // Use requestAnimationFrame to ensure final message is rendered before clearing 802 - requestAnimationFrame(() => { 803 - setIsStreaming(false); 804 - setIsReasoningStreaming(false); 805 - setStreamingStep(''); 806 - setStreamingMessage(''); 807 - setStreamingMessageId(''); 808 - setStreamingReasoning(''); 809 - reasoningBufferRef.current = ''; 810 - streamingReasoningRef.current = ''; 811 - }); 812 - } 813 - }, CHAR_REVEAL_INTERVAL_MS); 814 753 815 754 toolCallMsgIdsRef.current.clear(); 816 755 toolReturnMsgIdsRef.current.clear(); 817 - pendingReasoningRef.current = ''; 756 + currentReasoningIdRef.current = null; 818 757 819 758 // Build message content based on whether we have images 820 759 let messageContent: any; ··· 878 817 }, 879 818 async (response) => { 880 819 console.log('Stream complete'); 881 - // Signal that streaming is done - buffer interval will finalize when empty 882 820 streamCompleteRef.current = true; 821 + 822 + // Finalize messages immediately 823 + const finalReasoning = streamingReasoningRef.current || ''; 824 + console.log('Finalizing with reasoning:', finalReasoning); 825 + 826 + // Only create reasoning message if we haven't already created one during streaming 827 + if (finalReasoning && !currentReasoningIdRef.current) { 828 + const reasoningMessage: LettaMessage = { 829 + id: `reasoning-final-${Date.now()}`, 830 + role: 'assistant', 831 + message_type: 'reasoning_message', 832 + content: '', 833 + reasoning: finalReasoning, 834 + created_at: new Date().toISOString(), 835 + }; 836 + setMessages(prev => [...prev, reasoningMessage]); 837 + 838 + // Transfer reasoning expansion state 839 + setExpandedReasoning(prev => { 840 + if (prev.has('streaming')) { 841 + const next = new Set(prev); 842 + next.delete('streaming'); 843 + next.add(reasoningMessage.id); 844 + return next; 845 + } 846 + return prev; 847 + }); 848 + } 849 + 850 + // Create assistant message if there's actual content 851 + setStreamingMessage(currentContent => { 852 + if (currentContent && currentContent.trim()) { 853 + const finalMessage: LettaMessage = { 854 + id: streamingMessageId || `msg_${Date.now()}`, 855 + role: 'assistant', 856 + message_type: 'assistant_message', 857 + content: currentContent, 858 + created_at: new Date().toISOString(), 859 + }; 860 + 861 + console.log('Final assistant message object:', finalMessage); 862 + setMessages(prev => [...prev, finalMessage]); 863 + } else { 864 + console.log('Skipping empty assistant message'); 865 + } 866 + 867 + return currentContent; 868 + }); 869 + 870 + // Clear streaming state 871 + requestAnimationFrame(() => { 872 + setIsStreaming(false); 873 + setIsReasoningStreaming(false); 874 + setStreamingStep(''); 875 + setStreamingMessage(''); 876 + setStreamingMessageId(''); 877 + setStreamingReasoning(''); 878 + streamingReasoningRef.current = ''; 879 + currentReasoningIdRef.current = null; // Clear so reasoning message becomes visible 880 + }); 883 881 }, 884 882 (error) => { 885 883 console.error('=== APP STREAMING ERROR CALLBACK ==='); ··· 901 899 console.error('Could not stringify error:', e); 902 900 } 903 901 904 - // Clear intervals on error 905 - if (bufferIntervalRef.current) { 906 - clearInterval(bufferIntervalRef.current); 907 - bufferIntervalRef.current = null; 908 - } 902 + // Clear scroll interval on error 909 903 if (scrollIntervalRef.current) { 910 904 clearInterval(scrollIntervalRef.current); 911 905 scrollIntervalRef.current = null; ··· 921 915 setStreamingStep(''); 922 916 setStreamingMessageId(''); 923 917 setStreamingReasoning(''); 924 - tokenBufferRef.current = ''; 925 - reasoningBufferRef.current = ''; 926 918 streamingReasoningRef.current = ''; 927 919 928 920 // Create detailed error message ··· 1497 1489 // State for tracking expanded reasoning 1498 1490 const [expandedReasoning, setExpandedReasoning] = useState<Set<string>>(new Set()); 1499 1491 const [expandedCompaction, setExpandedCompaction] = useState<Set<string>>(new Set()); 1492 + const [expandedToolReturns, setExpandedToolReturns] = useState<Set<string>>(new Set()); 1500 1493 1501 1494 // Animate sidebar 1502 1495 useEffect(() => { ··· 1531 1524 }); 1532 1525 }, []); 1533 1526 1527 + const toggleToolReturn = useCallback((messageId: string) => { 1528 + setExpandedToolReturns(prev => { 1529 + const next = new Set(prev); 1530 + if (next.has(messageId)) { 1531 + next.delete(messageId); 1532 + } else { 1533 + next.add(messageId); 1534 + } 1535 + return next; 1536 + }); 1537 + }, []); 1538 + 1534 1539 // Group messages for efficient FlatList rendering 1535 1540 type MessageGroup = 1536 1541 | { key: string; type: 'toolPair'; call: LettaMessage; ret?: LettaMessage; reasoning?: string } 1537 1542 | { key: string; type: 'message'; message: LettaMessage; reasoning?: string }; 1538 1543 1539 - const groupedMessages = useMemo(() => { 1540 - const groups: MessageGroup[] = []; 1541 - const toolCallsMap = new Map<string, LettaMessage>(); 1542 - const processedIds = new Set<string>(); 1543 - 1544 - // Log raw messages 1545 - console.log('=== MESSAGE GROUPING DEBUG ==='); 1546 - console.log('Raw messages count:', messages.length); 1547 - messages.forEach((msg, idx) => { 1548 - console.log(`[${idx}] ${msg.created_at} | ${msg.role} | ${msg.message_type || 'no-type'} | id:${msg.id.substring(0, 8)} | step:${msg.step_id || 'none'}`); 1549 - }); 1544 + // Helper to check if a tool call has a result 1545 + const toolCallHasResult = useMemo(() => { 1546 + const hasResultMap = new Map<string, boolean>(); 1547 + for (let i = 0; i < messages.length; i++) { 1548 + const msg = messages[i]; 1549 + if (msg.message_type === 'tool_call_message') { 1550 + // Check if the next message is a tool_return 1551 + const nextMsg = messages[i + 1]; 1552 + hasResultMap.set(msg.id, nextMsg?.message_type === 'tool_return_message'); 1553 + } 1554 + } 1555 + return hasResultMap; 1556 + }, [messages]); 1550 1557 1558 + const displayMessages = useMemo(() => { 1551 1559 // Sort messages by created_at timestamp to ensure correct chronological order 1552 1560 const sortedMessages = [...messages].sort((a, b) => { 1553 1561 const timeA = new Date(a.created_at || 0).getTime(); ··· 1555 1563 return timeA - timeB; 1556 1564 }); 1557 1565 1558 - console.log('\nSorted messages:'); 1559 - sortedMessages.forEach((msg, idx) => { 1560 - console.log(`[${idx}] ${msg.created_at} | ${msg.role} | ${msg.message_type || 'no-type'} | id:${msg.id.substring(0, 8)} | step:${msg.step_id || 'none'}`); 1561 - }); 1562 - 1563 - sortedMessages.forEach(msg => { 1564 - if (msg.message_type?.includes('tool_call') && msg.step_id) { 1565 - console.log(`Adding to toolCallsMap: step_id=${msg.step_id}, msg_id=${msg.id.substring(0, 8)}`); 1566 - toolCallsMap.set(msg.step_id, msg); 1567 - } 1568 - }); 1569 - 1570 - console.log(`\nToolCallsMap size: ${toolCallsMap.size}`); 1571 - 1572 - // Build a map to find reasoning messages that precede tool calls 1573 - const reasoningBeforeToolCall = new Map<string, string>(); 1574 - const reasoningMessagesToSkip = new Set<string>(); 1575 - 1576 - console.log('\nChecking for reasoning messages that precede tool calls:'); 1577 - for (let i = 0; i < sortedMessages.length - 1; i++) { 1578 - const current = sortedMessages[i]; 1579 - const next = sortedMessages[i + 1]; 1580 - 1581 - // If current message has reasoning and next is a tool_call, associate them 1582 - // BUT only skip the current message if it's NOT a tool_call or tool_return itself 1583 - if (current.reasoning && next.message_type?.includes('tool_call') && next.id) { 1584 - const isToolMessage = current.message_type?.includes('tool_call') || current.message_type?.includes('tool_return'); 1585 - if (!isToolMessage) { 1586 - console.log(`Marking to skip: [${i}] ${current.message_type} ${current.id.substring(0, 8)} (has reasoning, precedes tool_call at [${i+1}])`); 1587 - reasoningBeforeToolCall.set(next.id, current.reasoning); 1588 - reasoningMessagesToSkip.add(current.id); // Mark this message to skip 1589 - } else { 1590 - console.log(`NOT skipping: [${i}] ${current.message_type} ${current.id.substring(0, 8)} (is a tool message, even though it has reasoning)`); 1591 - } 1592 - } 1593 - } 1594 - console.log(`Total messages to skip: ${reasoningMessagesToSkip.size}`); 1595 - 1596 - sortedMessages.forEach(msg => { 1597 - if (processedIds.has(msg.id)) { 1598 - console.log(`Skipping already processed: ${msg.id.substring(0, 8)} ${msg.message_type} step:${msg.step_id || 'none'}`); 1599 - return; 1600 - } 1601 - 1602 - // Skip reasoning messages that precede tool calls 1603 - if (reasoningMessagesToSkip.has(msg.id)) { 1604 - console.log(`Skipping reasoning message: ${msg.id.substring(0, 8)} ${msg.message_type}`); 1605 - processedIds.add(msg.id); 1606 - return; 1607 - } 1608 - 1609 - // Filter out system messages 1610 - if (msg.role === 'system') { 1611 - processedIds.add(msg.id); 1612 - return; 1613 - } 1566 + // Filter out system messages and login/heartbeat messages 1567 + const filtered = sortedMessages.filter(msg => { 1568 + if (msg.message_type === 'system_message') return false; 1614 1569 1615 - // Filter out login/heartbeat messages 1616 - if (msg.role === 'user' && msg.content) { 1570 + if (msg.message_type === 'user_message' && msg.content) { 1617 1571 try { 1618 1572 const contentStr = typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content); 1619 1573 const parsed = JSON.parse(contentStr); 1620 1574 if (parsed?.type === 'login' || parsed?.type === 'heartbeat') { 1621 - console.log(`Filtering out ${parsed.type} message:`, msg.id.substring(0, 8)); 1622 - processedIds.add(msg.id); 1623 - return; 1575 + return false; 1624 1576 } 1625 1577 } catch { 1626 - // Not JSON or array content, keep the message 1627 - } 1628 - } 1629 - 1630 - if (msg.message_type?.includes('tool_return') && msg.step_id) { 1631 - console.log(`Processing tool_return: step_id=${msg.step_id}, has toolCall in map:`, toolCallsMap.has(msg.step_id)); 1632 - const toolCall = toolCallsMap.get(msg.step_id); 1633 - if (toolCall) { 1634 - // Get reasoning from the message that preceded the tool call, or from the tool call itself 1635 - const reasoning = reasoningBeforeToolCall.get(toolCall.id) || toolCall.reasoning; 1636 - 1637 - console.log(`Adding toolPair group: ${toolCall.id.substring(0, 8)} step:${msg.step_id} with reasoning:`, !!reasoning); 1638 - groups.push({ 1639 - key: toolCall.id, 1640 - type: 'toolPair', 1641 - call: toolCall, 1642 - ret: msg, 1643 - reasoning: reasoning, 1644 - }); 1645 - processedIds.add(toolCall.id); 1646 - processedIds.add(msg.id); 1647 - return; 1648 - } else { 1649 - console.log(`WARN: tool_return with step_id ${msg.step_id} but no matching tool_call in map!`); 1578 + // Not JSON, keep the message 1650 1579 } 1651 1580 } 1652 1581 1653 - if (!msg.message_type?.includes('tool_call') && !msg.message_type?.includes('tool_return')) { 1654 - console.log(`Adding message group: ${msg.id.substring(0, 8)} role:${msg.role} reasoning:${!!msg.reasoning}`); 1655 - groups.push({ 1656 - key: msg.id, 1657 - type: 'message', 1658 - message: msg, 1659 - reasoning: msg.reasoning, 1660 - }); 1661 - processedIds.add(msg.id); 1662 - } 1663 - }); 1664 - 1665 - // Add any unpaired tool calls (tool calls without a matching return yet) 1666 - toolCallsMap.forEach((toolCall) => { 1667 - if (!processedIds.has(toolCall.id)) { 1668 - const reasoning = reasoningBeforeToolCall.get(toolCall.id) || toolCall.reasoning; 1669 - console.log(`Adding unpaired toolPair: ${toolCall.id.substring(0, 8)} with reasoning:`, !!reasoning); 1670 - groups.push({ 1671 - key: toolCall.id, 1672 - type: 'toolPair', 1673 - call: toolCall, 1674 - ret: undefined, 1675 - reasoning: reasoning, 1676 - }); 1677 - processedIds.add(toolCall.id); 1678 - } 1582 + return true; 1679 1583 }); 1680 1584 1681 - console.log('\nFinal groups order:'); 1682 - groups.forEach((g, idx) => { 1683 - if (g.type === 'toolPair') { 1684 - console.log(`[${idx}] toolPair - ${g.key.substring(0, 8)} reasoning:${!!g.reasoning}`); 1685 - } else { 1686 - console.log(`[${idx}] message - ${g.message.role} ${g.key.substring(0, 8)} reasoning:${!!g.reasoning}`); 1687 - } 1585 + console.log('[DISPLAY] Total messages:', filtered.length); 1586 + filtered.forEach((msg, idx) => { 1587 + console.log(`[DISPLAY ${idx}] ${msg.message_type} - ${msg.id.substring(0, 8)} - reasoning: ${!!msg.reasoning}`); 1688 1588 }); 1689 - console.log('=== END DEBUG ===\n'); 1690 1589 1691 - return groups; 1590 + return filtered; 1692 1591 }, [messages]); 1693 1592 1694 1593 // Animate rainbow gradient for "co is thinking", input box, reasoning sections, and empty state 1695 1594 useEffect(() => { 1696 - if (isReasoningStreaming || isInputFocused || expandedReasoning.size > 0 || groupedMessages.length === 0) { 1595 + if (isReasoningStreaming || isInputFocused || expandedReasoning.size > 0 || displayMessages.length === 0) { 1697 1596 rainbowAnimValue.setValue(0); 1698 1597 Animated.loop( 1699 1598 Animated.timing(rainbowAnimValue, { ··· 1705 1604 } else { 1706 1605 rainbowAnimValue.stopAnimation(); 1707 1606 } 1708 - }, [isReasoningStreaming, isInputFocused, expandedReasoning, groupedMessages.length]); 1607 + }, [isReasoningStreaming, isInputFocused, expandedReasoning, displayMessages.length]); 1709 1608 1710 - const renderMessageGroup = useCallback(({ item }: { item: MessageGroup }) => { 1711 - if (item.type === 'toolPair') { 1712 - // Extract tool call information from the message 1713 - const toolCall = item.call.tool_call || item.call.tool_calls?.[0]; 1714 - let callText = item.call.content || 'Tool call'; 1609 + const renderMessage = useCallback(({ item }: { item: LettaMessage }) => { 1610 + const msg = item; 1611 + const isUser = msg.message_type === 'user_message'; 1612 + const isSystem = msg.message_type === 'system_message'; 1613 + const isToolCall = msg.message_type === 'tool_call_message'; 1614 + const isToolReturn = msg.message_type === 'tool_return_message'; 1615 + const isAssistant = msg.message_type === 'assistant_message'; 1616 + const isReasoning = msg.message_type === 'reasoning_message'; 1617 + 1618 + if (isSystem) return null; 1715 1619 1716 - if (toolCall) { 1717 - // Handle both formats: with and without .function wrapper 1718 - const callObj = toolCall.function || toolCall; 1719 - const name = callObj.name || callObj.tool_name || 'tool'; 1720 - const argsRaw = callObj.arguments ?? callObj.args ?? '{}'; 1721 - let args = ''; 1722 - try { 1723 - args = typeof argsRaw === 'string' ? argsRaw : JSON.stringify(argsRaw); 1724 - } catch { 1725 - args = String(argsRaw); 1620 + // Handle reasoning messages - don't show if it's the currently streaming one 1621 + if (isReasoning) { 1622 + // Hide the reasoning message if it's currently being streamed 1623 + if (msg.id === currentReasoningIdRef.current) { 1624 + return null; 1726 1625 } 1727 - callText = `${name}(${args})`; 1626 + 1627 + console.log('[RENDER] Rendering reasoning message:', msg.id, 'reasoning:', msg.reasoning?.substring(0, 50)); 1628 + const isReasoningExpanded = expandedReasoning.has(msg.id); 1629 + 1630 + return ( 1631 + <View style={styles.messageContainer}> 1632 + <ReasoningToggle 1633 + reasoning={msg.reasoning || ''} 1634 + messageId={msg.id} 1635 + isExpanded={isReasoningExpanded} 1636 + onToggle={() => toggleReasoning(msg.id)} 1637 + /> 1638 + </View> 1639 + ); 1728 1640 } 1729 1641 1730 - const resultText = item.ret?.content || undefined; 1731 - const reasoning = item.reasoning || undefined; 1642 + // Handle tool calls 1643 + if (isToolCall) { 1644 + return ( 1645 + <View style={styles.messageContainer}> 1646 + <ToolCallItem 1647 + callText={msg.content} 1648 + resultText={undefined} 1649 + reasoning={undefined} 1650 + hasResult={toolCallHasResult.get(msg.id) || false} 1651 + /> 1652 + </View> 1653 + ); 1654 + } 1732 1655 1733 - return ( 1734 - <View key={item.key} style={styles.messageContainer}> 1735 - <ToolCallItem 1736 - callText={callText} 1737 - resultText={resultText} 1738 - reasoning={reasoning} 1739 - /> 1740 - </View> 1741 - ); 1742 - } else { 1743 - const msg = item.message; 1744 - const isUser = msg.role === 'user'; 1745 - const isSystem = msg.role === 'system'; 1656 + // Handle tool returns - just show the result text 1657 + if (isToolReturn) { 1658 + const isExpanded = expandedToolReturns.has(msg.id); 1746 1659 1747 - if (isSystem) return null; 1660 + return ( 1661 + <View style={styles.messageContainer}> 1662 + <View style={styles.toolReturnContainer}> 1663 + <TouchableOpacity 1664 + style={styles.toolReturnHeader} 1665 + onPress={() => toggleToolReturn(msg.id)} 1666 + activeOpacity={0.7} 1667 + > 1668 + <Ionicons 1669 + name={isExpanded ? 'chevron-down' : 'chevron-forward'} 1670 + size={12} 1671 + color={darkTheme.colors.text.tertiary} 1672 + /> 1673 + <Text style={styles.toolReturnLabel}>Result</Text> 1674 + </TouchableOpacity> 1675 + {isExpanded && ( 1676 + <View style={styles.toolReturnContent}> 1677 + <MessageContent content={msg.content} isUser={false} isDark={colorScheme === 'dark'} /> 1678 + </View> 1679 + )} 1680 + </View> 1681 + </View> 1682 + ); 1683 + } 1748 1684 1749 1685 if (isUser) { 1750 1686 // Check if this is a system_alert compaction message ··· 1879 1815 ); 1880 1816 } else { 1881 1817 const isReasoningExpanded = expandedReasoning.has(msg.id); 1882 - const isLastMessage = groupedMessages[groupedMessages.length - 1]?.key === item.key; 1818 + const isLastMessage = displayMessages[displayMessages.length - 1]?.id === msg.id; 1883 1819 const shouldHaveMinHeight = isLastMessage && lastMessageNeedsSpace; 1884 1820 1885 1821 return ( 1886 - <View key={item.key} style={[ 1822 + <View style={[ 1887 1823 styles.assistantFullWidthContainer, 1888 1824 shouldHaveMinHeight && { minHeight: Math.max(containerHeight * 0.9, 450) } 1889 1825 ]}> 1890 - {item.reasoning && ( 1826 + {msg.reasoning && ( 1891 1827 <ReasoningToggle 1892 - reasoning={item.reasoning} 1828 + reasoning={msg.reasoning} 1893 1829 messageId={msg.id} 1894 1830 isExpanded={isReasoningExpanded} 1895 1831 onToggle={() => toggleReasoning(msg.id)} ··· 1919 1855 </View> 1920 1856 ); 1921 1857 } 1922 - } 1923 - }, [expandedCompaction, expandedReasoning, groupedMessages, lastMessageNeedsSpace, containerHeight, colorScheme, copiedMessageId, toggleCompaction, toggleReasoning, copyToClipboard]); 1858 + 1859 + return null; 1860 + }, [expandedCompaction, expandedReasoning, expandedToolReturns, displayMessages, lastMessageNeedsSpace, containerHeight, colorScheme, copiedMessageId, toggleCompaction, toggleReasoning, toggleToolReturn, copyToClipboard, toolCallHasResult]); 1924 1861 1925 - const keyExtractor = useCallback((item: MessageGroup) => item.key, []); 1862 + const keyExtractor = useCallback((item: LettaMessage) => `${item.id}-${item.message_type}`, []); 1926 1863 1927 1864 const handleScroll = useCallback((e: any) => { 1928 1865 const y = e.nativeEvent.contentOffset.y; ··· 2200 2137 <View style={[ 2201 2138 styles.header, 2202 2139 { paddingTop: insets.top, backgroundColor: theme.colors.background.secondary, borderBottomColor: theme.colors.border.primary }, 2203 - groupedMessages.length === 0 && { backgroundColor: 'transparent', borderBottomWidth: 0 } 2140 + displayMessages.length === 0 && { backgroundColor: 'transparent', borderBottomWidth: 0 } 2204 2141 ]}> 2205 2142 <TouchableOpacity onPress={() => setSidebarVisible(!sidebarVisible)} style={styles.menuButton}> 2206 - <Ionicons name="menu" size={24} color={theme.colors.text.primary} /> 2143 + <Ionicons name="menu" size={24} color={colorScheme === 'dark' ? '#FFFFFF' : theme.colors.text.primary} /> 2207 2144 </TouchableOpacity> 2208 2145 2209 - {groupedMessages.length > 0 && ( 2146 + {displayMessages.length > 0 && ( 2210 2147 <> 2211 2148 <View style={styles.headerCenter}> 2212 2149 <TouchableOpacity ··· 2240 2177 </View> 2241 2178 2242 2179 {/* View Switcher - hidden when chat is empty */} 2243 - {groupedMessages.length > 0 && ( 2180 + {displayMessages.length > 0 && ( 2244 2181 <View style={[styles.viewSwitcher, { backgroundColor: theme.colors.background.secondary }]}> 2245 2182 <TouchableOpacity 2246 2183 style={[ ··· 2293 2230 style={styles.chatRow} 2294 2231 keyboardVerticalOffset={Platform.OS === 'ios' ? insets.top + 60 : 0} 2295 2232 > 2296 - {currentView === 'you' ? ( 2297 - /* You View */ 2298 - <View style={styles.memoryViewContainer}> 2233 + {/* You View */} 2234 + <View style={[styles.memoryViewContainer, { display: currentView === 'you' ? 'flex' : 'none' }]}> 2299 2235 {!hasCheckedYouBlock ? ( 2300 2236 /* Loading state - checking for You block */ 2301 2237 <View style={{ flex: 1, justifyContent: 'center', alignItems: 'center' }}> ··· 2351 2287 </ScrollView> 2352 2288 )} 2353 2289 </View> 2354 - ) : currentView === 'chat' ? ( 2355 - <> 2290 + 2291 + {/* Chat View */} 2292 + <View style={{ display: currentView === 'chat' ? 'flex' : 'none', flex: 1 }}> 2356 2293 {/* Messages */} 2357 2294 <View style={styles.messagesContainer} onLayout={handleMessagesLayout}> 2358 2295 <FlatList 2359 2296 ref={scrollViewRef} 2360 - data={groupedMessages} 2361 - renderItem={renderMessageGroup} 2297 + data={displayMessages} 2298 + renderItem={renderMessage} 2362 2299 keyExtractor={keyExtractor} 2363 2300 extraData={{ showCompaction, expandedReasoning, expandedCompaction, copiedMessageId }} 2364 2301 onScroll={handleScroll} 2365 2302 onContentSizeChange={handleContentSizeChange} 2303 + maintainVisibleContentPosition={{ 2304 + minIndexForVisible: 0, 2305 + autoscrollToTopThreshold: 10, 2306 + }} 2366 2307 windowSize={10} 2367 2308 removeClippedSubviews={true} 2368 2309 maxToRenderPerBatch={5} 2369 2310 updateCellsBatchingPeriod={50} 2370 2311 contentContainerStyle={[ 2371 2312 styles.messagesList, 2372 - groupedMessages.length === 0 && { flexGrow: 1 } 2313 + displayMessages.length === 0 && { flexGrow: 1 } 2373 2314 ]} 2374 2315 ListHeaderComponent={ 2375 2316 hasMoreBefore ? ( ··· 2386 2327 <> 2387 2328 {isStreaming && ( 2388 2329 <Animated.View style={[styles.assistantFullWidthContainer, { minHeight: spacerHeightAnim }]}> 2389 - {/* Always show reasoning section when streaming */} 2390 - {streamingReasoning && ( 2330 + {/* Show current status when streaming */} 2331 + {!streamingMessage && ( 2332 + <Animated.View style={{ flexDirection: 'row', alignItems: 'baseline', marginBottom: 12, opacity: statusFadeAnim }}> 2333 + <Text style={{ fontSize: 24, fontFamily: 'Lexend_400Regular', color: darkTheme.colors.text.primary }}>(</Text> 2334 + <Animated.Text 2335 + style={{ 2336 + fontSize: 24, 2337 + fontFamily: 'Lexend_700Bold', 2338 + color: rainbowAnimValue.interpolate({ 2339 + inputRange: [0, 0.2, 0.4, 0.6, 0.8, 1], 2340 + outputRange: ['#FF6B6B', '#FFD93D', '#6BCF7F', '#4D96FF', '#9D4EDD', '#FF6B6B'] 2341 + }) 2342 + }} 2343 + > 2344 + co 2345 + </Animated.Text> 2346 + <Text style={{ fontSize: 24, fontFamily: 'Lexend_400Regular', color: darkTheme.colors.text.primary }}> is {currentStreamingStatus})</Text> 2347 + </Animated.View> 2348 + )} 2349 + 2350 + {/* Show completed reasoning toggle */} 2351 + {streamingReasoning && streamingMessage && ( 2391 2352 <ReasoningToggle 2392 2353 reasoning={streamingReasoning} 2393 2354 messageId="streaming" 2394 2355 isExpanded={expandedReasoning.has('streaming')} 2395 2356 onToggle={() => toggleReasoning('streaming')} 2396 2357 customToggleContent={ 2397 - isReasoningStreaming ? ( 2398 - <View style={{ flexDirection: 'row', alignItems: 'baseline' }}> 2399 - <Animated.Text 2400 - style={{ 2401 - fontSize: 24, 2402 - fontFamily: 'Lexend_700Bold', 2403 - color: rainbowAnimValue.interpolate({ 2404 - inputRange: [0, 0.2, 0.4, 0.6, 0.8, 1], 2405 - outputRange: ['#FF6B6B', '#FFD93D', '#6BCF7F', '#4D96FF', '#9D4EDD', '#FF6B6B'] 2406 - }) 2407 - }} 2408 - > 2409 - co 2410 - </Animated.Text> 2411 - <Text style={{ fontSize: 24, fontFamily: 'Lexend_400Regular', color: darkTheme.colors.text.tertiary }}> is thinking</Text> 2412 - </View> 2413 - ) : ( 2414 - <> 2415 - <Text style={{ fontSize: 14, fontFamily: 'Lexend_500Medium', color: darkTheme.colors.text.secondary }}>Reasoning</Text> 2416 - <Ionicons 2417 - name={expandedReasoning.has('streaming') ? "chevron-up" : "chevron-down"} 2418 - size={16} 2419 - style={{ marginLeft: 4 }} 2420 - color={darkTheme.colors.text.tertiary} 2421 - /> 2422 - </> 2423 - ) 2358 + <> 2359 + <Text style={{ fontSize: 14, fontFamily: 'Lexend_500Medium', color: darkTheme.colors.text.secondary }}>(co thought)</Text> 2360 + <Ionicons 2361 + name={expandedReasoning.has('streaming') ? "chevron-up" : "chevron-down"} 2362 + size={16} 2363 + style={{ marginLeft: 4 }} 2364 + color={darkTheme.colors.text.tertiary} 2365 + /> 2366 + </> 2424 2367 } 2425 2368 /> 2426 2369 )} 2427 - {streamingStep && ( 2428 - <Text style={styles.streamingStep}>{streamingStep}</Text> 2429 - )} 2430 2370 {streamingMessage && ( 2431 2371 <> 2372 + <View style={{ flexDirection: 'row', alignItems: 'baseline', paddingVertical: 4, marginBottom: 8 }}> 2373 + <Text style={{ fontSize: 14, fontFamily: 'Lexend_500Medium', color: darkTheme.colors.text.secondary }}>(</Text> 2374 + <Animated.Text 2375 + style={{ 2376 + fontSize: 14, 2377 + fontFamily: 'Lexend_600SemiBold', 2378 + color: rainbowAnimValue.interpolate({ 2379 + inputRange: [0, 0.2, 0.4, 0.6, 0.8, 1], 2380 + outputRange: ['#FF6B6B', '#FFD93D', '#6BCF7F', '#4D96FF', '#9D4EDD', '#FF6B6B'] 2381 + }) 2382 + }} 2383 + > 2384 + co 2385 + </Animated.Text> 2386 + <Text style={{ fontSize: 14, fontFamily: 'Lexend_500Medium', color: darkTheme.colors.text.secondary }}> is saying)</Text> 2387 + </View> 2432 2388 <View style={{ flex: 1 }}> 2433 2389 <MessageContent 2434 2390 content={streamingMessage} 2435 2391 isUser={false} 2436 2392 isDark={colorScheme === 'dark'} 2437 2393 /> 2438 - <Text style={{ color: theme.colors.text.tertiary, marginTop: 4 }}>○</Text> 2439 2394 </View> 2440 2395 <View style={styles.copyButtonContainer}> 2441 2396 <TouchableOpacity ··· 2484 2439 style={[ 2485 2440 styles.inputContainer, 2486 2441 { paddingBottom: Math.max(insets.bottom, 16) }, 2487 - groupedMessages.length === 0 && styles.inputContainerCentered 2442 + displayMessages.length === 0 && styles.inputContainerCentered 2488 2443 ]} 2489 2444 onLayout={handleInputLayout} 2490 2445 > 2491 2446 <View style={styles.inputCentered}> 2492 2447 {/* Empty state intro - shown above input when chat is empty */} 2493 - {groupedMessages.length === 0 && ( 2448 + {displayMessages.length === 0 && ( 2494 2449 <View style={styles.emptyStateIntro}> 2495 2450 <Animated.Text 2496 2451 style={{ ··· 2506 2461 > 2507 2462 co 2508 2463 </Animated.Text> 2509 - <Text style={[styles.emptyText, { fontSize: 18, lineHeight: 28, marginBottom: 32 }]}> 2464 + <Text style={[styles.emptyText, { fontSize: 18, lineHeight: 28, marginBottom: 32, color: theme.colors.text.primary }]}> 2510 2465 I'm co, your thinking partner. 2511 2466 </Text> 2512 2467 </View> ··· 2584 2539 </Animated.View> 2585 2540 </View> 2586 2541 </View> 2587 - </> 2588 - ) : currentView === 'knowledge' ? ( 2589 - /* Knowledge View */ 2590 - <View style={styles.memoryViewContainer}> 2542 + </View> 2543 + 2544 + {/* Knowledge View */} 2545 + <View style={[styles.memoryViewContainer, { display: currentView === 'knowledge' ? 'flex' : 'none' }]}> 2591 2546 {/* Knowledge Tabs */} 2592 2547 <View style={[styles.knowledgeTabs, { backgroundColor: theme.colors.background.secondary, borderBottomColor: theme.colors.border.primary }]}> 2593 2548 <TouchableOpacity ··· 2886 2841 )} 2887 2842 </View> 2888 2843 </View> 2889 - ) : currentView === 'settings' ? ( 2890 - /* Settings View */ 2891 - <View style={styles.memoryViewContainer}> 2844 + 2845 + {/* Settings View */} 2846 + <View style={[styles.memoryViewContainer, { display: currentView === 'settings' ? 'flex' : 'none' }]}> 2892 2847 <View style={[styles.settingsHeader, { backgroundColor: theme.colors.background.secondary, borderBottomColor: theme.colors.border.primary }]}> 2893 2848 <Text style={[styles.settingsTitle, { color: theme.colors.text.primary }]}>Settings</Text> 2894 2849 </View> ··· 2911 2866 </View> 2912 2867 </View> 2913 2868 </View> 2914 - ) : null} 2915 2869 2916 2870 {/* Knowledge block viewer - right pane on desktop */} 2917 2871 {isDesktop && selectedBlock && ( ··· 3871 3825 }, 3872 3826 toggleThumbActive: { 3873 3827 alignSelf: 'flex-end', 3828 + }, 3829 + toolReturnContainer: { 3830 + width: '100%', 3831 + marginTop: -8, 3832 + marginBottom: 4, 3833 + }, 3834 + toolReturnHeader: { 3835 + flexDirection: 'row', 3836 + alignItems: 'center', 3837 + gap: 3, 3838 + paddingVertical: 4, 3839 + paddingHorizontal: 8, 3840 + backgroundColor: 'transparent', 3841 + borderRadius: 4, 3842 + }, 3843 + toolReturnLabel: { 3844 + fontSize: 10, 3845 + fontFamily: 'Lexend_400Regular', 3846 + color: darkTheme.colors.text.tertiary, 3847 + opacity: 0.5, 3848 + }, 3849 + toolReturnContent: { 3850 + backgroundColor: 'rgba(30, 30, 30, 0.3)', 3851 + borderRadius: 4, 3852 + padding: 8, 3853 + borderWidth: 1, 3854 + borderColor: 'rgba(255, 255, 255, 0.03)', 3855 + marginTop: 0, 3874 3856 }, 3875 3857 });
+1 -1
src/components/ReasoningToggle.tsx
··· 53 53 customToggleContent 54 54 ) : ( 55 55 <> 56 - <Text style={styles.reasoningToggleText}>Reasoning</Text> 56 + <Text style={styles.reasoningToggleText}>(co thought)</Text> 57 57 {!hideChevron && ( 58 58 <Ionicons 59 59 name={isExpanded ? "chevron-up" : "chevron-down"}
+41 -41
src/components/ToolCallItem.tsx
··· 8 8 callText: string; 9 9 resultText?: string; 10 10 reasoning?: string; 11 + hasResult?: boolean; 11 12 } 12 13 13 14 // Extract parameters for contextual display ··· 23 24 }; 24 25 25 26 // Map tool names to friendly display messages 26 - const getToolDisplayName = (toolName: string, callText: string): string => { 27 + const getToolDisplayName = (toolName: string, callText: string, hasResult: boolean): { present: string; past: string } => { 27 28 const params = extractParams(callText); 28 29 29 30 if (toolName === 'web_search' && params.query) { 30 - return `Searching the web for "${params.query}"`; 31 + const query = params.query.length > 50 ? params.query.substring(0, 50) + '...' : params.query; 32 + return { 33 + present: `co is searching the web for "${query}"`, 34 + past: `co searched for "${query}"` 35 + }; 31 36 } 32 37 33 38 if (toolName === 'conversation_search' && params.query) { 34 - return `Searching conversation history for "${params.query}"`; 39 + const query = params.query.length > 50 ? params.query.substring(0, 50) + '...' : params.query; 40 + return { 41 + present: `co is searching conversation history for "${query}"`, 42 + past: `co searched conversation history for "${query}"` 43 + }; 35 44 } 36 45 37 46 if (toolName === 'fetch_webpage' && params.url) { 38 47 const url = params.url.length > 50 ? params.url.substring(0, 50) + '...' : params.url; 39 - return `Fetching ${url}`; 48 + return { 49 + present: `co is fetching ${url}`, 50 + past: `co fetched ${url}` 51 + }; 40 52 } 41 53 42 - const displayNames: Record<string, string> = { 43 - web_search: 'Searching the web', 44 - fetch_webpage: 'Fetching webpage', 45 - memory_insert: 'Inserting into memory', 46 - memory_replace: 'Updating memory', 47 - conversation_search: 'Searching conversation history', 48 - send_message: 'Sending message', 54 + const displayNames: Record<string, { present: string; past: string }> = { 55 + web_search: { present: 'co is searching the web', past: 'co searched the web' }, 56 + fetch_webpage: { present: 'co is fetching webpage', past: 'co fetched webpage' }, 57 + memory_insert: { present: 'co is inserting into memory', past: 'co inserted into memory' }, 58 + memory_replace: { present: 'co is updating memory', past: 'co updated memory' }, 59 + conversation_search: { present: 'co is searching conversation history', past: 'co searched conversation history' }, 60 + send_message: { present: 'co is sending message', past: 'co sent message' }, 49 61 }; 50 - return displayNames[toolName] || toolName; 62 + return displayNames[toolName] || { present: toolName, past: toolName }; 51 63 }; 52 64 53 - const ToolCallItem: React.FC<ToolCallItemProps> = ({ callText, resultText, reasoning }) => { 65 + const ToolCallItem: React.FC<ToolCallItemProps> = ({ callText, resultText, reasoning, hasResult = false }) => { 54 66 const [expanded, setExpanded] = useState(false); 55 67 const [resultExpanded, setResultExpanded] = useState(false); 56 68 ··· 61 73 return m ? m[1] : ''; 62 74 }, [callText]); 63 75 64 - // Get friendly display name 65 - const displayName = useMemo(() => getToolDisplayName(toolName, callText), [toolName, callText]); 76 + // Get friendly display names 77 + const displayNames = useMemo(() => getToolDisplayName(toolName, callText, hasResult), [toolName, callText, hasResult]); 78 + const displayText = hasResult ? displayNames.past : displayNames.present; 66 79 67 80 // Try to parse a "name({json})" or "name(k=v, ...)" shape into 68 81 // a nicer multiline representation for readability. ··· 123 136 <View style={styles.container}> 124 137 {reasoning && <ReasoningToggle reasoning={reasoning} />} 125 138 <TouchableOpacity 126 - style={[styles.header, expanded && !resultText && styles.headerExpanded, expanded && resultText && styles.headerExpandedWithResult]} 139 + style={styles.header} 127 140 onPress={() => setExpanded((e) => !e)} 128 141 activeOpacity={0.7} 129 142 > 143 + <Text style={expanded ? styles.callText : styles.displayName} numberOfLines={expanded ? 0 : 1}> 144 + {expanded ? prettyCallText : `(${displayText})`} 145 + </Text> 130 146 <Ionicons 131 - name={expanded ? 'chevron-down' : 'chevron-forward'} 132 - size={14} 133 - color={darkTheme.colors.text.secondary} 147 + name={expanded ? 'chevron-up' : 'chevron-down'} 148 + size={16} 149 + color={darkTheme.colors.text.tertiary} 134 150 style={styles.chevron} 135 151 /> 136 - <Text style={expanded ? styles.callText : styles.displayName} numberOfLines={expanded ? 0 : 1}> 137 - {expanded ? prettyCallText : displayName} 138 - </Text> 139 152 </TouchableOpacity> 140 153 {expanded && !!resultText && ( 141 154 <TouchableOpacity ··· 167 180 }, 168 181 header: { 169 182 flexDirection: 'row', 170 - alignItems: 'flex-start', 171 - gap: 8, 172 - backgroundColor: '#242424', 173 - borderRadius: 10, 174 - borderWidth: 1, 175 - borderColor: 'rgba(255, 255, 255, 0.1)', 176 - paddingVertical: 8, 177 - paddingHorizontal: 10, 183 + alignItems: 'center', 184 + paddingVertical: 4, 185 + marginBottom: 8, 178 186 }, 179 187 chevron: { 180 - marginTop: 2, 181 - }, 182 - headerExpanded: { 183 - borderBottomLeftRadius: 0, 184 - borderBottomRightRadius: 0, 185 - }, 186 - headerExpandedWithResult: { 187 - borderBottomLeftRadius: 0, 188 - borderBottomRightRadius: 0, 188 + marginLeft: 4, 189 189 }, 190 190 displayName: { 191 - color: darkTheme.colors.text.primary, 192 191 fontSize: 14, 193 - lineHeight: 20, 192 + fontFamily: 'Lexend_500Medium', 193 + color: darkTheme.colors.text.secondary, 194 194 flexShrink: 1, 195 195 }, 196 196 callText: {