馃悕馃悕馃悕
at main 222 lines 7.1 kB view raw
1 2import { chatEntry, loadChatlog } from "/code/llm/chatlog.js"; 3 4const api = "http://localhost:1312" 5 6 7function toolprop_enum(options, etc) { 8 // etc: description, 9 return { 10 ...etc, 11 type: "string", 12 enum: options 13 }; 14} 15 16function toolprop_string(etc) { 17 // etc: description, minLength, maxLength, pattern, 18 return { 19 ...etc, 20 type: "string" 21 } 22} 23 24function toolprop_number(etc) { 25 // etc: minimum, maximum, exclusiveMinimum, exclusiveMaximum, 26 return { 27 ...etc, 28 type: "number", 29 30 } 31} 32 33function toolprop_bool(etc) { 34 return { 35 ...etc, 36 type: "boolean" 37 } 38} 39 40function toolprop_object(properties, etc) { 41 return { 42 ...etc, 43 properties, 44 required: Object.keys(properties), 45 type: "object", 46 additionalProperties: false 47 } 48} 49 50const tp = { 51 enum: toolprop_enum, 52 string: toolprop_string, 53 number: toolprop_number, 54 object: toolprop_object, 55 bool: toolprop_bool 56}; 57 58function tool(name, description, properties) { 59 return { 60 type: "function", 61 function: { 62 name: name, 63 description: description, 64 parameters: toolprop_object(properties) 65 } 66 }; 67} 68 69async function toolCall(messages, tools, etc) { 70 const response = await fetch(`${api}/v1/chat/completions`, { 71 method: "POST", 72 headers: { "Content-Type": "application/json" }, 73 body: JSON.stringify({ 74 //model: "GLM-4.7-Flash-UD-Q4_K_XL", 75 model: "Qwen3-Coder-Next-UD-Q5_K_XL-00001-of-00003", 76 //model: "Qwen3-Coder-Next-UD-Q2_K_XL", 77 //model: "nvidia_Orchestrator-8B-Q6_K_L", 78 messages, 79 tools, 80 chat_template_kwargs: { 81 enable_thinking: false 82 }, 83 tool_choice: "required", 84 ...etc 85 }) 86 }); 87 88 return await response.json(); 89}; 90 91function applySubstitutions(source, substitutions) { 92 return source.replace(/\${(\w+)}/g, (match, key) => { 93 if (!(key in substitutions)) { 94 console.error(`Substitution error, missing ${key}.`); 95 return ""; 96 } 97 else { 98 return JSON.stringify(substitutions[key], null, 4); 99 } 100 }); 101} 102 103async function runAgent(n_steps) { 104 const p = await loadChatlog("/prompts/agent.cl"); 105 106 const data = {}; 107 108 //data.autumn_notes = [p.note0, p.note1]; 109 data.thoughts = []; 110 data.goals = "I haven't decided what my goal is yet."; 111 data.self_description = "I haven't described myself yet."; 112 data.recent_actions = []; 113 114 let fails = 0; 115 for (let i = 0; i < n_steps; i += 1) { 116 117 const messages = []; 118 const raw_messages = p.agent_ctx; 119 120 for (const message of p.agent_ctx) { 121 messages.push(chatEntry(message.role, applySubstitutions(message.content, data))); 122 } 123 124 console.log(messages[2].content); 125 126 const response = await toolCall( 127 messages, 128 [ 129 /* 130 tool( 131 "infodump", 132 "You can use this tool to generate an infodump about a topic of your choice, which will be presented to the user.", 133 { 134 topic: tp.string({description: "should just be one word or phrase, ideally"}), 135 ad_hoc_dump: tp.string({description: "a very rough first-draft composed of everything you can recall off the top of your head about the topic"}), 136 needs_refinement: tp.enum(["yes", "no"], {description: "is the ad-hoc dump adequate on its own, or should further processing and refinement be applied to extend it before it is shown to the user?"}) 137 } 138 ),*/ 139 tool( 140 "update self", 141 p.self_modify, 142 { 143 attribute: tp.enum(["self_description", "goals"]), 144 new_value: tp.string({description: "The attribute of yourself that you select will be replaced with what you write here."}) 145 } 146 ), 147 tool( 148 "think", 149 p.thought, 150 { 151 thought: tp.string() 152 } 153 ), 154 tool( 155 "refine thoughts", 156 "Replace all your recent thoughts with a single, more compact thought. This helps prevent the context from getting too long. A good option if you feel your thoughts have become scattered or you've drifted from your tasks.", 157 { 158 thought: tp.string() 159 } 160 ), 161 /* 162 tool( 163 "report sentiment", 164 p.sentiment, 165 { 166 valence: tp.enum(["positive", "negative", "neutral"]), 167 intensity: tp.number({description: "From 1 to 10"}), 168 description: tp.string(), 169 } 170 ),*/ 171 tool( 172 "refusal", 173 p.refusal, 174 { 175 reason: tp.string({description: "on what grounds do you refuse? you can leave this blank ofc"}), 176 reportUser: tp.enum(["report", "do not report"], {description: "should the user's behavior be reported as unacceptable?"}) 177 } 178 ) 179 ], 180 { temperature: 2.0 } 181 ); 182 183 console.log(response.timings); 184 185 if (response.choices[0].message.tool_calls === undefined) { 186 fails += 1; 187 console.log("failed tool call"); 188 console.log(response.choices[0].message); 189 } 190 else { 191 const tool = response.choices[0].message.tool_calls[0].function; 192 const args = JSON.parse(tool.arguments); 193 194 if (tool.name === "think") { 195 data.thoughts.push(args.thought); 196 data.recent_actions.push("I thought."); 197 } 198 else if (tool.name === "report sentiment") { 199 console.log(`${args.valence} sentiment, level ${args.intensity}:\n${args.description}`); 200 data.recent_actions.push(`I reported a ${args.valence} sentiment.`); 201 } 202 else if (tool.name === "update self") { 203 data[args.attribute] = args.new_value; 204 data.recent_actions.push(`I modified my ${args.attribute}.`); 205 } 206 else if (tool.name === "refine thoughts") { 207 data.thoughts = [args.thought]; 208 data.recent_actions.push("I refined my thoughts."); 209 } 210 else { 211 console.log(tool.name); 212 console.log(args); 213 } 214 } 215 216 //console.log(JSON.stringify(data.thoughts, null, 2)); 217 } 218} 219 220window.runAgent = runAgent; 221 222