{ "$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json", // See https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/docs/guide/agent-model-matching.md // Work machines use the Arm OpenAI proxy only // Cost is not a factor, but speed still matters for utility paths "agents": { // Sisyphus is Claude-first in upstream docs; use strongest OpenAI general model. "sisyphus": { "model": "arm/gpt-5.2-pro", "ultrawork": { "model": "arm/gpt-5.3-codex", "variant": "xhigh" }, }, // Utility/search agents: use faster model for snappier turnaround. "librarian": { "model": "arm/gpt-5-mini" }, "explore": { "model": "arm/gpt-5-mini" }, // Deep/specialist agents. "hephaestus": { "model": "arm/gpt-5.3-codex", "variant": "xhigh" }, "oracle": { "model": "arm/gpt-5.2-pro", "variant": "xhigh" }, "momus": { "model": "arm/gpt-5.2-pro", "variant": "xhigh" }, "metis": { "model": "arm/gpt-5.2-pro", "variant": "xhigh" }, "atlas": { "model": "arm/gpt-5.2" }, // Keep the same orchestration bias tweak. "prometheus": { "prompt_append": "Leverage deep & quick agents heavily, always in parallel.", }, }, "categories": { "quick": { "model": "arm/gpt-5-mini" }, "unspecified-low": { "model": "arm/gpt-5-mini" }, "unspecified-high": { "model": "arm/gpt-5.2-pro", "variant": "xhigh" }, "writing": { "model": "arm/gpt-5.2-pro" }, "visual-engineering": { "model": "arm/gpt-5.2-pro", "variant": "xhigh" }, // Additional upstream-style categories mapped to OpenAI-only models. "deep": { "model": "arm/gpt-5.3-codex", "variant": "xhigh" }, "ultrabrain": { "model": "arm/gpt-5.3-codex", "variant": "xhigh" }, "artistry": { "model": "arm/gpt-5.2-pro", "variant": "xhigh" }, }, }