nixos configs
1{
2 "$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
3
4 // See https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/docs/guide/agent-model-matching.md
5 // Work machines use the Arm OpenAI proxy only
6 // Cost is not a factor, but speed still matters for utility paths
7 "agents": {
8 // Sisyphus is Claude-first in upstream docs; use strongest OpenAI general model.
9 "sisyphus": {
10 "model": "arm/gpt-5.2-pro",
11 "ultrawork": { "model": "arm/gpt-5.3-codex", "variant": "xhigh" },
12 },
13
14 // Utility/search agents: use faster model for snappier turnaround.
15 "librarian": { "model": "arm/gpt-5-mini" },
16 "explore": { "model": "arm/gpt-5-mini" },
17
18 // Deep/specialist agents.
19 "hephaestus": { "model": "arm/gpt-5.3-codex", "variant": "xhigh" },
20 "oracle": { "model": "arm/gpt-5.2-pro", "variant": "xhigh" },
21 "momus": { "model": "arm/gpt-5.2-pro", "variant": "xhigh" },
22 "metis": { "model": "arm/gpt-5.2-pro", "variant": "xhigh" },
23 "atlas": { "model": "arm/gpt-5.2" },
24
25 // Keep the same orchestration bias tweak.
26 "prometheus": {
27 "prompt_append": "Leverage deep & quick agents heavily, always in parallel.",
28 },
29 },
30
31 "categories": {
32 "quick": { "model": "arm/gpt-5-mini" },
33 "unspecified-low": { "model": "arm/gpt-5-mini" },
34 "unspecified-high": { "model": "arm/gpt-5.2-pro", "variant": "xhigh" },
35 "writing": { "model": "arm/gpt-5.2-pro" },
36 "visual-engineering": { "model": "arm/gpt-5.2-pro", "variant": "xhigh" },
37
38 // Additional upstream-style categories mapped to OpenAI-only models.
39 "deep": { "model": "arm/gpt-5.3-codex", "variant": "xhigh" },
40 "ultrabrain": { "model": "arm/gpt-5.3-codex", "variant": "xhigh" },
41 "artistry": { "model": "arm/gpt-5.2-pro", "variant": "xhigh" },
42 },
43}