Compare commits

..

1 Commits

Author SHA1 Message Date
YeonGyu-Kim
829c58ccb0 refactor(aliases): migrate to pattern-based model alias resolution
Move from hardcoded exact aliases to pattern-based canonicalization:

- Populate PATTERN_ALIAS_RULES with regex patterns for:
  - Claude thinking variants (claude-opus-4-6-thinking → claude-opus-4-6)
  - Gemini tier suffixes (gemini-3.1-pro-{high,low} → gemini-3.1-pro)
- Add stripProviderPrefixForAliasLookup() for provider-prefixed models
  (anthropic/claude-sonnet-4-6 → claude-sonnet-4-6 for capability lookup)
- Preserve requestedModelID (with prefix) for API transport
- Reduce EXACT_ALIAS_RULES to exceptional cases only
  (gemini-3-pro-{high,low} → gemini-3-pro-preview)
- Comprehensive test coverage for patterns, prefix stripping, negatives

Addresses Discussion #2835 (pattern matching architecture)
Related to PR #2834 (alias guardrails)

41 targeted tests pass, 4467 full suite tests pass, tsc clean.
2026-03-26 12:04:50 +09:00
225 changed files with 1552 additions and 8565 deletions

View File

@@ -60,33 +60,16 @@ jobs:
bun test src/features/opencode-skill-loader/loader.test.ts
bun test src/hooks/anthropic-context-window-limit-recovery/recovery-hook.test.ts
bun test src/hooks/anthropic-context-window-limit-recovery/executor.test.ts
# src/shared mock-heavy files (mock.module pollutes connected-providers-cache and legacy-plugin-warning)
bun test src/shared/model-capabilities.test.ts
bun test src/shared/log-legacy-plugin-startup-warning.test.ts
bun test src/shared/model-error-classifier.test.ts
bun test src/shared/opencode-message-dir.test.ts
# session-recovery mock isolation (recover-tool-result-missing mocks ./storage)
bun test src/hooks/session-recovery/recover-tool-result-missing.test.ts
# legacy-plugin-toast mock isolation (hook.test.ts mocks ./auto-migrate)
bun test src/hooks/legacy-plugin-toast/hook.test.ts
- name: Run remaining tests
run: |
# Enumerate subdirectories/files explicitly to EXCLUDE mock-heavy files
# that were already run in isolation above.
# Excluded from src/shared: model-capabilities, log-legacy-plugin-startup-warning, model-error-classifier, opencode-message-dir
# Excluded from src/cli: doctor/formatter.test.ts, doctor/format-default.test.ts
# Excluded from src/tools: call-omo-agent/sync-executor.test.ts, call-omo-agent/session-creator.test.ts, session-manager (all)
# Excluded from src/hooks/anthropic-context-window-limit-recovery: recovery-hook.test.ts, executor.test.ts
# Build src/shared file list excluding mock-heavy files already run in isolation
SHARED_FILES=$(find src/shared -name '*.test.ts' \
! -name 'model-capabilities.test.ts' \
! -name 'log-legacy-plugin-startup-warning.test.ts' \
! -name 'model-error-classifier.test.ts' \
! -name 'opencode-message-dir.test.ts' \
| sort | tr '\n' ' ')
bun test bin script src/config src/mcp src/index.test.ts \
src/agents $SHARED_FILES \
src/agents src/shared \
src/cli/run src/cli/config-manager src/cli/mcp-oauth \
src/cli/index.test.ts src/cli/install.test.ts src/cli/model-fallback.test.ts \
src/cli/config-manager.test.ts \
@@ -99,8 +82,6 @@ jobs:
src/tools/call-omo-agent/background-executor.test.ts \
src/tools/call-omo-agent/subagent-session-creator.test.ts \
src/hooks/anthropic-context-window-limit-recovery/empty-content-recovery-sdk.test.ts src/hooks/anthropic-context-window-limit-recovery/parser.test.ts src/hooks/anthropic-context-window-limit-recovery/pruning-deduplication.test.ts src/hooks/anthropic-context-window-limit-recovery/recovery-deduplication.test.ts src/hooks/anthropic-context-window-limit-recovery/storage.test.ts \
src/hooks/session-recovery/detect-error-type.test.ts src/hooks/session-recovery/index.test.ts src/hooks/session-recovery/recover-empty-content-message-sdk.test.ts src/hooks/session-recovery/resume.test.ts src/hooks/session-recovery/storage \
src/hooks/legacy-plugin-toast/auto-migrate.test.ts \
src/hooks/claude-code-compatibility \
src/hooks/context-injection \
src/hooks/provider-toast \

View File

@@ -56,33 +56,10 @@ jobs:
env:
BUN_INSTALL_ALLOW_SCRIPTS: "@ast-grep/napi"
- name: Validate release inputs
id: validate
env:
INPUT_VERSION: ${{ inputs.version }}
INPUT_DIST_TAG: ${{ inputs.dist_tag }}
run: |
VERSION="$INPUT_VERSION"
DIST_TAG="$INPUT_DIST_TAG"
if ! [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z]+(\.[0-9A-Za-z]+)*)?$ ]]; then
echo "::error::Invalid version: $VERSION"
exit 1
fi
if [ -n "$DIST_TAG" ] && ! [[ "$DIST_TAG" =~ ^[a-z][a-z0-9-]*$ ]]; then
echo "::error::Invalid dist_tag: $DIST_TAG"
exit 1
fi
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "dist_tag=$DIST_TAG" >> $GITHUB_OUTPUT
- name: Check if already published
id: check
env:
VERSION: ${{ steps.validate.outputs.version }}
run: |
VERSION="${{ inputs.version }}"
PLATFORM_KEY="${{ matrix.platform }}"
PLATFORM_KEY="${PLATFORM_KEY//-/_}"
@@ -119,18 +96,15 @@ jobs:
- name: Update version in package.json
if: steps.check.outputs.skip != 'true'
env:
VERSION: ${{ steps.validate.outputs.version }}
run: |
VERSION="${{ inputs.version }}"
cd packages/${{ matrix.platform }}
jq --arg v "$VERSION" '.version = $v' package.json > tmp.json && mv tmp.json package.json
- name: Set root package version
if: steps.check.outputs.skip != 'true'
env:
VERSION: ${{ steps.validate.outputs.version }}
run: |
jq --arg v "$VERSION" '.version = $v' package.json > tmp.json && mv tmp.json package.json
jq --arg v "${{ inputs.version }}" '.version = $v' package.json > tmp.json && mv tmp.json package.json
- name: Pre-download baseline compile target
if: steps.check.outputs.skip != 'true' && endsWith(matrix.platform, '-baseline')
@@ -252,33 +226,11 @@ jobs:
matrix:
platform: [darwin-arm64, darwin-x64, darwin-x64-baseline, linux-x64, linux-x64-baseline, linux-arm64, linux-x64-musl, linux-x64-musl-baseline, linux-arm64-musl, windows-x64, windows-x64-baseline]
steps:
- name: Validate release inputs
id: validate
env:
INPUT_VERSION: ${{ inputs.version }}
INPUT_DIST_TAG: ${{ inputs.dist_tag }}
run: |
VERSION="$INPUT_VERSION"
DIST_TAG="$INPUT_DIST_TAG"
if ! [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z]+(\.[0-9A-Za-z]+)*)?$ ]]; then
echo "::error::Invalid version: $VERSION"
exit 1
fi
if [ -n "$DIST_TAG" ] && ! [[ "$DIST_TAG" =~ ^[a-z][a-z0-9-]*$ ]]; then
echo "::error::Invalid dist_tag: $DIST_TAG"
exit 1
fi
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "dist_tag=$DIST_TAG" >> $GITHUB_OUTPUT
- name: Check if already published
id: check
env:
VERSION: ${{ steps.validate.outputs.version }}
run: |
VERSION="${{ inputs.version }}"
OC_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/oh-my-opencode-${{ matrix.platform }}/${VERSION}")
OA_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/oh-my-openagent-${{ matrix.platform }}/${VERSION}")
@@ -336,38 +288,38 @@ jobs:
- name: Publish oh-my-opencode-${{ matrix.platform }}
if: steps.check.outputs.skip_opencode != 'true' && steps.download.outcome == 'success'
env:
DIST_TAG: ${{ steps.validate.outputs.dist_tag }}
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
NPM_CONFIG_PROVENANCE: true
run: |
cd packages/${{ matrix.platform }}
if [ -n "$DIST_TAG" ]; then
npm publish --access public --provenance --tag "$DIST_TAG"
else
npm publish --access public --provenance
TAG_ARG=""
if [ -n "${{ inputs.dist_tag }}" ]; then
TAG_ARG="--tag ${{ inputs.dist_tag }}"
fi
npm publish --access public --provenance $TAG_ARG
env:
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
NPM_CONFIG_PROVENANCE: true
timeout-minutes: 15
- name: Publish oh-my-openagent-${{ matrix.platform }}
if: steps.check.outputs.skip_openagent != 'true' && steps.download.outcome == 'success'
env:
DIST_TAG: ${{ steps.validate.outputs.dist_tag }}
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
NPM_CONFIG_PROVENANCE: true
run: |
cd packages/${{ matrix.platform }}
# Rename package for oh-my-openagent
jq --arg name "oh-my-openagent-${{ matrix.platform }}" \
--arg desc "Platform-specific binary for oh-my-openagent (${{ matrix.platform }})" \
'.name = $name | .description = $desc | .bin = {"oh-my-openagent": (.bin | to_entries | .[0].value)}' \
package.json > tmp.json && mv tmp.json package.json
if [ -n "$DIST_TAG" ]; then
npm publish --access public --provenance --tag "$DIST_TAG"
else
npm publish --access public --provenance
TAG_ARG=""
if [ -n "${{ inputs.dist_tag }}" ]; then
TAG_ARG="--tag ${{ inputs.dist_tag }}"
fi
npm publish --access public --provenance $TAG_ARG
env:
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
NPM_CONFIG_PROVENANCE: true
timeout-minutes: 15

View File

@@ -61,33 +61,16 @@ jobs:
bun test src/features/opencode-skill-loader/loader.test.ts
bun test src/hooks/anthropic-context-window-limit-recovery/recovery-hook.test.ts
bun test src/hooks/anthropic-context-window-limit-recovery/executor.test.ts
# src/shared mock-heavy files (mock.module pollutes connected-providers-cache and legacy-plugin-warning)
bun test src/shared/model-capabilities.test.ts
bun test src/shared/log-legacy-plugin-startup-warning.test.ts
bun test src/shared/model-error-classifier.test.ts
bun test src/shared/opencode-message-dir.test.ts
# session-recovery mock isolation (recover-tool-result-missing mocks ./storage)
bun test src/hooks/session-recovery/recover-tool-result-missing.test.ts
# legacy-plugin-toast mock isolation (hook.test.ts mocks ./auto-migrate)
bun test src/hooks/legacy-plugin-toast/hook.test.ts
- name: Run remaining tests
run: |
# Enumerate subdirectories/files explicitly to EXCLUDE mock-heavy files
# that were already run in isolation above.
# Excluded from src/shared: model-capabilities, log-legacy-plugin-startup-warning, model-error-classifier, opencode-message-dir
# Excluded from src/cli: doctor/formatter.test.ts, doctor/format-default.test.ts
# Excluded from src/tools: call-omo-agent/sync-executor.test.ts, call-omo-agent/session-creator.test.ts, session-manager (all)
# Excluded from src/hooks/anthropic-context-window-limit-recovery: recovery-hook.test.ts, executor.test.ts
# Build src/shared file list excluding mock-heavy files already run in isolation
SHARED_FILES=$(find src/shared -name '*.test.ts' \
! -name 'model-capabilities.test.ts' \
! -name 'log-legacy-plugin-startup-warning.test.ts' \
! -name 'model-error-classifier.test.ts' \
! -name 'opencode-message-dir.test.ts' \
| sort | tr '\n' ' ')
bun test bin script src/config src/mcp src/index.test.ts \
src/agents $SHARED_FILES \
src/agents src/shared \
src/cli/run src/cli/config-manager src/cli/mcp-oauth \
src/cli/index.test.ts src/cli/install.test.ts src/cli/model-fallback.test.ts \
src/cli/config-manager.test.ts \
@@ -100,8 +83,6 @@ jobs:
src/tools/call-omo-agent/background-executor.test.ts \
src/tools/call-omo-agent/subagent-session-creator.test.ts \
src/hooks/anthropic-context-window-limit-recovery/empty-content-recovery-sdk.test.ts src/hooks/anthropic-context-window-limit-recovery/parser.test.ts src/hooks/anthropic-context-window-limit-recovery/pruning-deduplication.test.ts src/hooks/anthropic-context-window-limit-recovery/recovery-deduplication.test.ts src/hooks/anthropic-context-window-limit-recovery/storage.test.ts \
src/hooks/session-recovery/detect-error-type.test.ts src/hooks/session-recovery/index.test.ts src/hooks/session-recovery/recover-empty-content-message-sdk.test.ts src/hooks/session-recovery/resume.test.ts src/hooks/session-recovery/storage \
src/hooks/legacy-plugin-toast/auto-migrate.test.ts \
src/hooks/claude-code-compatibility \
src/hooks/context-injection \
src/hooks/provider-toast \
@@ -167,47 +148,33 @@ jobs:
- name: Calculate version
id: version
env:
RAW_VERSION: ${{ inputs.version }}
BUMP: ${{ inputs.bump }}
run: |
VERSION="$RAW_VERSION"
VERSION="${{ inputs.version }}"
if [ -z "$VERSION" ]; then
PREV=$(curl -s https://registry.npmjs.org/oh-my-opencode/latest | jq -r '.version // "0.0.0"')
BASE="${PREV%%-*}"
IFS='.' read -r MAJOR MINOR PATCH <<< "$BASE"
case "$BUMP" in
case "${{ inputs.bump }}" in
major) VERSION="$((MAJOR+1)).0.0" ;;
minor) VERSION="${MAJOR}.$((MINOR+1)).0" ;;
*) VERSION="${MAJOR}.${MINOR}.$((PATCH+1))" ;;
esac
fi
if ! [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z]+(\.[0-9A-Za-z]+)*)?$ ]]; then
echo "::error::Invalid version: $VERSION"
exit 1
fi
echo "version=$VERSION" >> $GITHUB_OUTPUT
if [[ "$VERSION" == *"-"* ]]; then
DIST_TAG=$(printf '%s' "$VERSION" | cut -d'-' -f2 | cut -d'.' -f1)
if ! [[ "$DIST_TAG" =~ ^[a-z][a-z0-9-]*$ ]]; then
echo "::error::Invalid dist_tag: $DIST_TAG"
exit 1
fi
DIST_TAG=$(echo "$VERSION" | cut -d'-' -f2 | cut -d'.' -f1)
echo "dist_tag=${DIST_TAG:-next}" >> $GITHUB_OUTPUT
else
echo "dist_tag=" >> $GITHUB_OUTPUT
fi
echo "Version: $VERSION"
- name: Check if already published
id: check
env:
VERSION: ${{ steps.version.outputs.version }}
run: |
VERSION="${{ steps.version.outputs.version }}"
STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/oh-my-opencode/${VERSION}")
if [ "$STATUS" = "200" ]; then
echo "skip=true" >> $GITHUB_OUTPUT
@@ -218,16 +185,15 @@ jobs:
- name: Update version
if: steps.check.outputs.skip != 'true'
env:
VERSION: ${{ steps.version.outputs.version }}
run: |
VERSION="${{ steps.version.outputs.version }}"
jq --arg v "$VERSION" '.version = $v' package.json > tmp.json && mv tmp.json package.json
for platform in darwin-arm64 darwin-x64 darwin-x64-baseline linux-x64 linux-x64-baseline linux-arm64 linux-x64-musl linux-x64-musl-baseline linux-arm64-musl windows-x64 windows-x64-baseline; do
jq --arg v "$VERSION" '.version = $v' "packages/${platform}/package.json" > tmp.json
mv tmp.json "packages/${platform}/package.json"
done
jq --arg v "$VERSION" '.optionalDependencies = (.optionalDependencies | to_entries | map(.value = $v) | from_entries)' package.json > tmp.json && mv tmp.json package.json
- name: Build main package
@@ -240,22 +206,20 @@ jobs:
- name: Publish oh-my-opencode
if: steps.check.outputs.skip != 'true'
run: |
TAG_ARG=""
if [ -n "${{ steps.version.outputs.dist_tag }}" ]; then
TAG_ARG="--tag ${{ steps.version.outputs.dist_tag }}"
fi
npm publish --access public --provenance $TAG_ARG
env:
DIST_TAG: ${{ steps.version.outputs.dist_tag }}
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
NPM_CONFIG_PROVENANCE: true
run: |
if [ -n "$DIST_TAG" ]; then
npm publish --access public --provenance --tag "$DIST_TAG"
else
npm publish --access public --provenance
fi
- name: Check if oh-my-openagent already published
id: check-openagent
env:
VERSION: ${{ steps.version.outputs.version }}
run: |
VERSION="${{ steps.version.outputs.version }}"
STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/oh-my-openagent/${VERSION}")
if [ "$STATUS" = "200" ]; then
echo "skip=true" >> $GITHUB_OUTPUT
@@ -266,12 +230,9 @@ jobs:
- name: Publish oh-my-openagent
if: steps.check-openagent.outputs.skip != 'true'
env:
VERSION: ${{ steps.version.outputs.version }}
DIST_TAG: ${{ steps.version.outputs.dist_tag }}
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
NPM_CONFIG_PROVENANCE: true
run: |
VERSION="${{ steps.version.outputs.version }}"
# Update package name, version, and optionalDependencies for oh-my-openagent
jq --arg v "$VERSION" '
.name = "oh-my-openagent" |
@@ -282,31 +243,39 @@ jobs:
from_entries
)
' package.json > tmp.json && mv tmp.json package.json
if [ -n "$DIST_TAG" ]; then
npm publish --access public --provenance --tag "$DIST_TAG"
else
npm publish --access public --provenance
TAG_ARG=""
if [ -n "${{ steps.version.outputs.dist_tag }}" ]; then
TAG_ARG="--tag ${{ steps.version.outputs.dist_tag }}"
fi
npm publish --access public --provenance $TAG_ARG || echo "::warning::oh-my-openagent publish failed"
env:
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
NPM_CONFIG_PROVENANCE: true
- name: Restore package.json
if: always() && steps.check-openagent.outputs.skip != 'true'
if: steps.check-openagent.outputs.skip != 'true'
run: |
git checkout -- package.json
publish-platform:
trigger-platform:
runs-on: ubuntu-latest
needs: publish-main
if: inputs.skip_platform != true
uses: ./.github/workflows/publish-platform.yml
with:
version: ${{ needs.publish-main.outputs.version }}
dist_tag: ${{ needs.publish-main.outputs.dist_tag }}
secrets: inherit
steps:
- name: Trigger platform publish workflow
run: |
gh workflow run publish-platform.yml \
--repo ${{ github.repository }} \
--ref ${{ github.ref }} \
-f version=${{ needs.publish-main.outputs.version }} \
-f dist_tag=${{ needs.publish-main.outputs.dist_tag }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
release:
runs-on: ubuntu-latest
needs: [publish-main, publish-platform]
if: always() && needs.publish-main.result == 'success' && (inputs.skip_platform == true || needs.publish-platform.result == 'success')
needs: publish-main
steps:
- uses: actions/checkout@v4
with:
@@ -330,53 +299,13 @@ jobs:
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Apply release version to source tree
env:
VERSION: ${{ needs.publish-main.outputs.version }}
run: |
jq --arg v "$VERSION" '.version = $v' package.json > tmp.json && mv tmp.json package.json
for platform in darwin-arm64 darwin-x64 darwin-x64-baseline linux-x64 linux-x64-baseline linux-arm64 linux-x64-musl linux-x64-musl-baseline linux-arm64-musl windows-x64 windows-x64-baseline; do
jq --arg v "$VERSION" '.version = $v' "packages/${platform}/package.json" > tmp.json
mv tmp.json "packages/${platform}/package.json"
done
jq --arg v "$VERSION" '.optionalDependencies = (.optionalDependencies | to_entries | map(.value = $v) | from_entries)' package.json > tmp.json && mv tmp.json package.json
- name: Commit version bump
env:
VERSION: ${{ needs.publish-main.outputs.version }}
run: |
git config user.email "github-actions[bot]@users.noreply.github.com"
git config user.name "github-actions[bot]"
git add package.json packages/*/package.json
git diff --cached --quiet || git commit -m "release: v${VERSION}"
- name: Create release tag
env:
VERSION: ${{ needs.publish-main.outputs.version }}
run: |
if git rev-parse "v${VERSION}" >/dev/null 2>&1; then
echo "::error::Tag v${VERSION} already exists"
exit 1
fi
git tag "v${VERSION}"
- name: Push release state
env:
VERSION: ${{ needs.publish-main.outputs.version }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
git push origin HEAD
git push origin "v${VERSION}"
- name: Create GitHub release
env:
VERSION: ${{ needs.publish-main.outputs.version }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
VERSION="${{ needs.publish-main.outputs.version }}"
gh release view "v${VERSION}" >/dev/null 2>&1 || \
gh release create "v${VERSION}" --title "v${VERSION}" --notes-file /tmp/changelog.md
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Delete draft release
run: gh release delete next --yes 2>/dev/null || true
@@ -385,13 +314,13 @@ jobs:
- name: Merge to master
continue-on-error: true
env:
VERSION: ${{ needs.publish-main.outputs.version }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
VERSION="${{ needs.publish-main.outputs.version }}"
git stash --include-untracked || true
git checkout master
git reset --hard "v${VERSION}"
git push -f origin master || echo "::warning::Failed to push to master"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -282,18 +282,6 @@ Once all three gates pass:
gh pr merge "$PR_NUMBER" --squash --delete-branch
```
### Sync .sisyphus state back to main repo
Before removing the worktree, copy `.sisyphus/` state back. When `.sisyphus/` is gitignored, files written there during worktree execution are not committed or merged — they would be lost on worktree removal.
```bash
# Sync .sisyphus state from worktree to main repo (preserves task state, plans, notepads)
if [ -d "$WORKTREE_PATH/.sisyphus" ]; then
mkdir -p "$ORIGINAL_DIR/.sisyphus"
cp -r "$WORKTREE_PATH/.sisyphus/"* "$ORIGINAL_DIR/.sisyphus/" 2>/dev/null || true
fi
```
### Clean up the worktree
The worktree served its purpose — remove it to avoid disk bloat:

View File

@@ -168,7 +168,7 @@ Read this and tell me why it's not just another boilerplate: https://raw.githubu
**Sisyphus** (`claude-opus-4-6` / **`kimi-k2.5`** / **`glm-5`**) はあなたのメインのオーケストレーターです。計画を立て、専門家に委任し、攻撃的な並列実行でタスクを完了まで推進します。途中で投げ出すことはありません。
**Hephaestus** (`gpt-5.4`) はあなたの自律的なディープワーカーです。レシピではなく、目標を与えてください。手取り足取り教えなくても、コードベースを探索し、パターンを研究し、端から端まで実行します。*正当なる職人 (The Legitimate Craftsman).*
**Hephaestus** (`gpt-5.3-codex`) はあなたの自律的なディープワーカーです。レシピではなく、目標を与えてください。手取り足取り教えなくても、コードベースを探索し、パターンを研究し、端から端まで実行します。*正当なる職人 (The Legitimate Craftsman).*
**Prometheus** (`claude-opus-4-6` / **`kimi-k2.5`** / **`glm-5`**) はあなたの戦略プランナーです。インタビューモードで動作し、コードに触れる前に質問をしてスコープを特定し、詳細な計画を構築します。
@@ -176,7 +176,7 @@ Read this and tell me why it's not just another boilerplate: https://raw.githubu
> Anthropicが[私たちのせいでOpenCodeをブロックしました。](https://x.com/thdxr/status/2010149530486911014) だからこそHephaestusは「正当なる職人 (The Legitimate Craftsman)」と呼ばれているのです。皮肉を込めています。
>
> Opusで最もよく動きますが、Kimi K2.5 + GPT-5.4の組み合わせだけでも、バニラのClaude Codeを軽く凌駕します。設定は一切不要です。
> Opusで最もよく動きますが、Kimi K2.5 + GPT-5.3 Codexの組み合わせだけでも、バニラのClaude Codeを軽く凌駕します。設定は一切不要です。
### エージェントの<E38388><E381AE>ーケストレーション

View File

@@ -162,7 +162,7 @@ Read this and tell me why it's not just another boilerplate: https://raw.githubu
**Sisyphus** (`claude-opus-4-6` / **`kimi-k2.5`** / **`glm-5`**)는 당신의 메인 오케스트레이터입니다. 공격적인 병렬 실행으로 계획을 세우고, 전문가들에게 위임하며, 완료될 때까지 밀어붙입니다. 중간에 포기하는 법이 없습니다.
**Hephaestus** (`gpt-5.4`)는 당신의 자율 딥 워커입니다. 레시피가 아니라 목표를 주세요. 베이비시터 없이 알아서 코드베이스를 탐색하고, 패턴을 연구하며, 끝에서 끝까지 전부 해냅니다. *진정한 장인(The Legitimate Craftsman).*
**Hephaestus** (`gpt-5.3-codex`)는 당신의 자율 딥 워커입니다. 레시피가 아니라 목표를 주세요. 베이비시터 없이 알아서 코드베이스를 탐색하고, 패턴을 연구하며, 끝에서 끝까지 전부 해냅니다. *진정한 장인(The Legitimate Craftsman).*
**Prometheus** (`claude-opus-4-6` / **`kimi-k2.5`** / **`glm-5`**)는 당신의 전략 플래너입니다. 인터뷰 모드로 작동합니다. 코드 한 줄 만지기 전에 질문을 던져 스코프를 파악하고 상세한 계획부터 세웁니다.
@@ -170,7 +170,7 @@ Read this and tell me why it's not just another boilerplate: https://raw.githubu
> Anthropic이 [우리 때문에 OpenCode를 막아버렸습니다.](https://x.com/thdxr/status/2010149530486911014) 그래서 Hephaestus의 별명이 "진정한 장인(The Legitimate Craftsman)"인 겁니다. (어디서 많이 들어본 이름이죠?) 아이러니를 노렸습니다.
>
> Opus에서 제일 잘 돌아가긴 하지만, Kimi K2.5 + GPT-5.4 조합만으로도 바닐라 Claude Code는 가볍게 바릅니다. 설정도 필요 없습니다.
> Opus에서 제일 잘 돌아가긴 하지만, Kimi K2.5 + GPT-5.3 Codex 조합만으로도 바닐라 Claude Code는 가볍게 바릅니다. 설정도 필요 없습니다.
### 에이전트 오케스트레이션

View File

@@ -111,8 +111,6 @@ Fetch the installation guide and follow it:
curl -s https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/refs/heads/dev/docs/guide/installation.md
```
**Note**: Use the published package and binary name `oh-my-opencode`. Inside `opencode.json`, the compatibility layer now prefers the plugin entry `oh-my-openagent`, while legacy `oh-my-opencode` entries still load with a warning. Plugin config files still commonly use `oh-my-opencode.json` or `oh-my-opencode.jsonc`, and both legacy and renamed basenames are recognized during the transition.
---
## Skip This README
@@ -166,7 +164,7 @@ Even only with following subscriptions, ultrawork will work well (this project i
**Sisyphus** (`claude-opus-4-6` / **`kimi-k2.5`** / **`glm-5`** ) is your main orchestrator. He plans, delegates to specialists, and drives tasks to completion with aggressive parallel execution. He does not stop halfway.
**Hephaestus** (`gpt-5.4`) is your autonomous deep worker. Give him a goal, not a recipe. He explores the codebase, researches patterns, and executes end-to-end without hand-holding. *The Legitimate Craftsman.*
**Hephaestus** (`gpt-5.3-codex`) is your autonomous deep worker. Give him a goal, not a recipe. He explores the codebase, researches patterns, and executes end-to-end without hand-holding. *The Legitimate Craftsman.*
**Prometheus** (`claude-opus-4-6` / **`kimi-k2.5`** / **`glm-5`** ) is your strategic planner. Interview mode: it questions, identifies scope, and builds a detailed plan before a single line of code is touched.
@@ -174,7 +172,7 @@ Every agent is tuned to its model's specific strengths. No manual model-juggling
> Anthropic [blocked OpenCode because of us.](https://x.com/thdxr/status/2010149530486911014) That's why Hephaestus is called "The Legitimate Craftsman." The irony is intentional.
>
> We run best on Opus, but Kimi K2.5 + GPT-5.4 already beats vanilla Claude Code. Zero config needed.
> We run best on Opus, but Kimi K2.5 + GPT-5.3 Codex already beats vanilla Claude Code. Zero config needed.
### Agent Orchestration
@@ -275,11 +273,11 @@ To remove oh-my-opencode:
1. **Remove the plugin from your OpenCode config**
Edit `~/.config/opencode/opencode.json` (or `opencode.jsonc`) and remove either `"oh-my-openagent"` or the legacy `"oh-my-opencode"` entry from the `plugin` array:
Edit `~/.config/opencode/opencode.json` (or `opencode.jsonc`) and remove `"oh-my-opencode"` from the `plugin` array:
```bash
# Using jq
jq '.plugin = [.plugin[] | select(. != "oh-my-openagent" and . != "oh-my-opencode")]' \
jq '.plugin = [.plugin[] | select(. != "oh-my-opencode")]' \
~/.config/opencode/opencode.json > /tmp/oc.json && \
mv /tmp/oc.json ~/.config/opencode/opencode.json
```
@@ -287,13 +285,11 @@ To remove oh-my-opencode:
2. **Remove configuration files (optional)**
```bash
# Remove plugin config files recognized during the compatibility window
rm -f ~/.config/opencode/oh-my-openagent.jsonc ~/.config/opencode/oh-my-openagent.json \
~/.config/opencode/oh-my-opencode.jsonc ~/.config/opencode/oh-my-opencode.json
# Remove user config
rm -f ~/.config/opencode/oh-my-opencode.json ~/.config/opencode/oh-my-opencode.jsonc
# Remove project config (if exists)
rm -f .opencode/oh-my-openagent.jsonc .opencode/oh-my-openagent.json \
.opencode/oh-my-opencode.jsonc .opencode/oh-my-opencode.json
rm -f .opencode/oh-my-opencode.json .opencode/oh-my-opencode.jsonc
```
3. **Verify removal**
@@ -319,10 +315,6 @@ See full [Features Documentation](docs/reference/features.md).
- **Built-in MCPs**: websearch (Exa), context7 (docs), grep_app (GitHub search)
- **Session Tools**: List, read, search, and analyze session history
- **Productivity Features**: Ralph Loop, Todo Enforcer, Comment Checker, Think Mode, and more
- **Doctor Command**: Built-in diagnostics (`bunx oh-my-opencode doctor`) verify plugin registration, config, models, and environment
- **Model Fallbacks**: `fallback_models` can mix plain model strings with per-fallback object settings in the same array
- **File Prompts**: Load prompts from files with `file://` support in agent configurations
- **Session Recovery**: Automatic recovery from session errors, context window limits, and API failures
- **Model Setup**: Agent-model matching is built into the [Installation Guide](docs/guide/installation.md#step-5-understand-your-model-setup)
## Configuration
@@ -332,7 +324,7 @@ Opinionated defaults, adjustable if you insist.
See [Configuration Documentation](docs/reference/configuration.md).
**Quick Overview:**
- **Config Locations**: The compatibility layer recognizes both `oh-my-openagent.json[c]` and legacy `oh-my-opencode.json[c]` plugin config files. Existing installs still commonly use the legacy basename.
- **Config Locations**: `.opencode/oh-my-opencode.jsonc` or `.opencode/oh-my-opencode.json` (project), `~/.config/opencode/oh-my-opencode.jsonc` or `~/.config/opencode/oh-my-opencode.json` (user)
- **JSONC Support**: Comments and trailing commas supported
- **Agents**: Override models, temperatures, prompts, and permissions for any agent
- **Built-in Skills**: `playwright` (browser automation), `git-master` (atomic commits)

View File

@@ -152,7 +152,7 @@ Read this and tell me why it's not just another boilerplate: https://raw.githubu
**Sisyphus** (`claude-opus-4-6` / **`kimi-k2.5`** / **`glm-5`**) — главный оркестратор. Он планирует, делегирует задачи специалистам и доводит их до завершения с агрессивным параллельным выполнением. Он не останавливается на полпути.
**Hephaestus** (`gpt-5.4`) — автономный глубокий исполнитель. Дайте ему цель, а не рецепт. Он исследует кодовую базу, изучает паттерны и выполняет задачи сквозным образом без лишних подсказок. *Законный Мастер.*
**Hephaestus** (`gpt-5.3-codex`) — автономный глубокий исполнитель. Дайте ему цель, а не рецепт. Он исследует кодовую базу, изучает паттерны и выполняет задачи сквозным образом без лишних подсказок. *Законный Мастер.*
**Prometheus** (`claude-opus-4-6` / **`kimi-k2.5`** / **`glm-5`**) — стратегический планировщик. Режим интервью: задаёт вопросы, определяет объём работ и формирует детальный план до того, как написана хотя бы одна строка кода.
@@ -160,7 +160,7 @@ Read this and tell me why it's not just another boilerplate: https://raw.githubu
> Anthropic [заблокировал OpenCode из-за нас.](https://x.com/thdxr/status/2010149530486911014) Именно поэтому Hephaestus зовётся «Законным Мастером». Ирония намеренная.
>
> Мы работаем лучше всего на Opus, но Kimi K2.5 + GPT-5.4 уже превосходят ванильный Claude Code. Никакой настройки не требуется.
> Мы работаем лучше всего на Opus, но Kimi K2.5 + GPT-5.3 Codex уже превосходят ванильный Claude Code. Никакой настройки не требуется.
### Оркестрация агентов

View File

@@ -169,7 +169,7 @@ Read this and tell me why it's not just another boilerplate: https://raw.githubu
**Sisyphus** (`claude-opus-4-6` / **`kimi-k2.5`** / **`glm-5`**) 是你的主指挥官。他负责制定计划、分配任务给专家团队,并以极其激进的并行策略推动任务直至完成。他从不半途而废。
**Hephaestus** (`gpt-5.4`) 是你的自主深度工作者。你只需要给他目标,不要给他具体做法。他会自动探索代码库模式,从头到尾独立执行任务,绝不会中途要你当保姆。*名副其实的正牌工匠。*
**Hephaestus** (`gpt-5.3-codex`) 是你的自主深度工作者。你只需要给他目标,不要给他具体做法。他会自动探索代码库模式,从头到尾独立执行任务,绝不会中途要你当保姆。*名副其实的正牌工匠。*
**Prometheus** (`claude-opus-4-6` / **`kimi-k2.5`** / **`glm-5`**) 是你的战略规划师。他通过访谈模式,在动一行代码之前,先通过提问确定范围并构建详尽的执行计划。
@@ -177,7 +177,7 @@ Read this and tell me why it's not just another boilerplate: https://raw.githubu
> Anthropic [因为我们屏蔽了 OpenCode](https://x.com/thdxr/status/2010149530486911014)。这就是为什么我们将 Hephaestus 命名为“正牌工匠 (The Legitimate Craftsman)”。这是一个故意的讽刺。
>
> 我们在 Opus 上运行得最好,但仅仅使用 Kimi K2.5 + GPT-5.4 就足以碾压原版的 Claude Code。完全不需要配置。
> 我们在 Opus 上运行得最好,但仅仅使用 Kimi K2.5 + GPT-5.3 Codex 就足以碾压原版的 Claude Code。完全不需要配置。
### 智能体调度机制

View File

@@ -4423,11 +4423,6 @@
},
"model_fallback_title": {
"type": "boolean"
},
"max_tools": {
"type": "integer",
"minimum": 1,
"maximum": 9007199254740991
}
},
"additionalProperties": false
@@ -4661,14 +4656,6 @@
"type": "number",
"minimum": 60000
},
"taskTtlMs": {
"type": "number",
"minimum": 300000
},
"sessionGoneTimeoutMs": {
"type": "number",
"minimum": 10000
},
"syncPollTimeoutMs": {
"type": "number",
"minimum": 60000
@@ -4885,11 +4872,6 @@
"additionalProperties": false
},
"git_master": {
"default": {
"commit_footer": true,
"include_co_authored_by": true,
"git_env_prefix": "GIT_MASTER=1"
},
"type": "object",
"properties": {
"commit_footer": {
@@ -5040,8 +5022,5 @@
}
}
},
"required": [
"git_master"
],
"additionalProperties": false
}

View File

@@ -71,19 +71,9 @@ function getSignalExitCode(signal) {
return 128 + (signalCodeByName[signal] ?? 1);
}
function getPackageBaseName() {
try {
const packageJson = JSON.parse(readFileSync(new URL("../package.json", import.meta.url), "utf8"));
return packageJson.name || "oh-my-opencode";
} catch {
return "oh-my-opencode";
}
}
function main() {
const { platform, arch } = process;
const libcFamily = getLibcFamily();
const packageBaseName = getPackageBaseName();
const avx2Supported = supportsAvx2();
let packageCandidates;
@@ -93,7 +83,6 @@ function main() {
arch,
libcFamily,
preferBaseline: avx2Supported === false,
packageBaseName,
});
} catch (error) {
console.error(`\noh-my-opencode: ${error.message}\n`);

View File

@@ -3,11 +3,11 @@
/**
* Get the platform-specific package name
* @param {{ platform: string, arch: string, libcFamily?: string | null, packageBaseName?: string }} options
* @param {{ platform: string, arch: string, libcFamily?: string | null }} options
* @returns {string} Package name like "oh-my-opencode-darwin-arm64"
* @throws {Error} If libc cannot be detected on Linux
*/
export function getPlatformPackage({ platform, arch, libcFamily, packageBaseName = "oh-my-opencode" }) {
export function getPlatformPackage({ platform, arch, libcFamily }) {
let suffix = "";
if (platform === "linux") {
if (libcFamily === null || libcFamily === undefined) {
@@ -23,13 +23,13 @@ export function getPlatformPackage({ platform, arch, libcFamily, packageBaseName
// Map platform names: win32 -> windows (for package name)
const os = platform === "win32" ? "windows" : platform;
return `${packageBaseName}-${os}-${arch}${suffix}`;
return `oh-my-opencode-${os}-${arch}${suffix}`;
}
/** @param {{ platform: string, arch: string, libcFamily?: string | null, preferBaseline?: boolean, packageBaseName?: string }} options */
export function getPlatformPackageCandidates({ platform, arch, libcFamily, preferBaseline = false, packageBaseName = "oh-my-opencode" }) {
const primaryPackage = getPlatformPackage({ platform, arch, libcFamily, packageBaseName });
const baselinePackage = getBaselinePlatformPackage({ platform, arch, libcFamily, packageBaseName });
/** @param {{ platform: string, arch: string, libcFamily?: string | null, preferBaseline?: boolean }} options */
export function getPlatformPackageCandidates({ platform, arch, libcFamily, preferBaseline = false }) {
const primaryPackage = getPlatformPackage({ platform, arch, libcFamily });
const baselinePackage = getBaselinePlatformPackage({ platform, arch, libcFamily });
if (!baselinePackage) {
return [primaryPackage];
@@ -38,18 +38,18 @@ export function getPlatformPackageCandidates({ platform, arch, libcFamily, prefe
return preferBaseline ? [baselinePackage, primaryPackage] : [primaryPackage, baselinePackage];
}
/** @param {{ platform: string, arch: string, libcFamily?: string | null, packageBaseName?: string }} options */
function getBaselinePlatformPackage({ platform, arch, libcFamily, packageBaseName = "oh-my-opencode" }) {
/** @param {{ platform: string, arch: string, libcFamily?: string | null }} options */
function getBaselinePlatformPackage({ platform, arch, libcFamily }) {
if (arch !== "x64") {
return null;
}
if (platform === "darwin") {
return `${packageBaseName}-darwin-x64-baseline`;
return "oh-my-opencode-darwin-x64-baseline";
}
if (platform === "win32") {
return `${packageBaseName}-windows-x64-baseline`;
return "oh-my-opencode-windows-x64-baseline";
}
if (platform === "linux") {
@@ -61,10 +61,10 @@ function getBaselinePlatformPackage({ platform, arch, libcFamily, packageBaseNam
}
if (libcFamily === "musl") {
return `${packageBaseName}-linux-x64-musl-baseline`;
return "oh-my-opencode-linux-x64-musl-baseline";
}
return `${packageBaseName}-linux-x64-baseline`;
return "oh-my-opencode-linux-x64-baseline";
}
return null;

View File

@@ -190,21 +190,6 @@ describe("getPlatformPackageCandidates", () => {
]);
});
test("supports renamed package family via packageBaseName override", () => {
// #given Linux x64 with glibc and renamed package base
const input = { platform: "linux", arch: "x64", libcFamily: "glibc", packageBaseName: "oh-my-openagent" };
// #when getting package candidates
const result = getPlatformPackageCandidates(input);
// #then returns renamed package family candidates
expect(result).toEqual([
"oh-my-openagent-linux-x64",
"oh-my-openagent-linux-x64-baseline",
]);
});
test("returns only one candidate for ARM64", () => {
// #given non-x64 platform
const input = { platform: "linux", arch: "arm64", libcFamily: "glibc" };

View File

@@ -1,88 +0,0 @@
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
// Optimized for intensive coding sessions.
// Prioritizes deep implementation agents and fast feedback loops.
"agents": {
// Primary orchestrator: aggressive parallel delegation
"sisyphus": {
"model": "kimi-for-coding/k2p5",
"ultrawork": { "model": "anthropic/claude-opus-4-6", "variant": "max" },
"prompt_append": "Delegate heavily to hephaestus for implementation. Parallelize exploration.",
},
// Heavy lifter: maximum autonomy for coding tasks
"hephaestus": {
"model": "openai/gpt-5.4",
"prompt_append": "You are the primary implementation agent. Own the codebase. Explore, decide, execute. Use LSP and AST-grep aggressively.",
"permission": { "edit": "allow", "bash": { "git": "allow", "test": "allow" } },
},
// Lightweight planner: quick planning for coding tasks
"prometheus": {
"model": "opencode/gpt-5-nano",
"prompt_append": "Keep plans concise. Focus on file structure and key decisions.",
},
// Debugging and architecture
"oracle": { "model": "openai/gpt-5.4", "variant": "high" },
// Fast docs lookup
"librarian": { "model": "github-copilot/grok-code-fast-1" },
// Rapid codebase navigation
"explore": { "model": "github-copilot/grok-code-fast-1" },
// Frontend and visual work
"multimodal-looker": { "model": "google/gemini-3.1-pro" },
// Plan review: minimal overhead
"metis": { "model": "opencode/gpt-5-nano" },
// Code review focus
"momus": { "prompt_append": "Focus on code quality, edge cases, and test coverage." },
// Long-running coding sessions
"atlas": {},
// Quick fixes and small tasks
"sisyphus-junior": { "model": "opencode/gpt-5-nano" },
},
"categories": {
// Trivial changes: fastest possible
"quick": { "model": "opencode/gpt-5-nano" },
// Standard coding tasks: good quality, fast
"unspecified-low": { "model": "anthropic/claude-sonnet-4-6" },
// Complex refactors: best quality
"unspecified-high": { "model": "openai/gpt-5.3-codex" },
// Visual work
"visual-engineering": { "model": "google/gemini-3.1-pro", "variant": "high" },
// Deep autonomous work
"deep": { "model": "openai/gpt-5.3-codex" },
// Architecture decisions
"ultrabrain": { "model": "openai/gpt-5.4", "variant": "xhigh" },
},
// High concurrency for parallel agent work
"background_task": {
"defaultConcurrency": 8,
"providerConcurrency": {
"anthropic": 5,
"openai": 5,
"google": 10,
"github-copilot": 10,
"opencode": 15,
},
},
// Enable all coding aids
"hashline_edit": true,
"experimental": { "aggressive_truncation": true, "task_system": true },
}

View File

@@ -1,71 +0,0 @@
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
// Balanced defaults for general development.
// Tuned for reliability across diverse tasks without overspending.
"agents": {
// Main orchestrator: handles delegation and drives tasks to completion
"sisyphus": {
"model": "anthropic/claude-opus-4-6",
"ultrawork": { "model": "anthropic/claude-opus-4-6", "variant": "max" },
},
// Deep autonomous worker: end-to-end implementation
"hephaestus": {
"model": "openai/gpt-5.4",
"prompt_append": "Explore thoroughly, then implement. Prefer small, testable changes.",
},
// Strategic planner: interview mode before execution
"prometheus": {
"prompt_append": "Always interview first. Validate scope before planning.",
},
// Architecture consultant: complex design and debugging
"oracle": { "model": "openai/gpt-5.4", "variant": "high" },
// Documentation and code search
"librarian": { "model": "google/gemini-3-flash" },
// Fast codebase exploration
"explore": { "model": "github-copilot/grok-code-fast-1" },
// Visual tasks: UI/UX, images, diagrams
"multimodal-looker": { "model": "google/gemini-3.1-pro" },
// Plan consultant: reviews and improves plans
"metis": {},
// Critic and reviewer
"momus": {},
// Continuation and long-running task handler
"atlas": {},
// Lightweight task executor for simple jobs
"sisyphus-junior": { "model": "opencode/gpt-5-nano" },
},
"categories": {
"quick": { "model": "opencode/gpt-5-nano" },
"unspecified-low": { "model": "anthropic/claude-sonnet-4-6" },
"unspecified-high": { "model": "anthropic/claude-opus-4-6", "variant": "max" },
"writing": { "model": "google/gemini-3-flash" },
"visual-engineering": { "model": "google/gemini-3.1-pro", "variant": "high" },
"deep": { "model": "openai/gpt-5.3-codex" },
"ultrabrain": { "model": "openai/gpt-5.4", "variant": "xhigh" },
},
// Conservative concurrency for cost control
"background_task": {
"providerConcurrency": {
"anthropic": 3,
"openai": 3,
"google": 5,
"opencode": 10,
},
},
"experimental": { "aggressive_truncation": true },
}

View File

@@ -1,112 +0,0 @@
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
// Optimized for strategic planning, architecture, and complex project design.
// Prioritizes deep thinking agents and thorough analysis before execution.
"agents": {
// Orchestrator: delegates to planning agents first
"sisyphus": {
"model": "anthropic/claude-opus-4-6",
"ultrawork": { "model": "anthropic/claude-opus-4-6", "variant": "max" },
"prompt_append": "Always consult prometheus and atlas for planning. Never rush to implementation.",
},
// Implementation: uses planning outputs
"hephaestus": {
"model": "openai/gpt-5.4",
"prompt_append": "Follow established plans precisely. Ask for clarification when plans are ambiguous.",
},
// Primary planner: deep interview mode
"prometheus": {
"model": "anthropic/claude-opus-4-6",
"thinking": { "type": "enabled", "budgetTokens": 160000 },
"prompt_append": "Interview extensively. Question assumptions. Build exhaustive plans with milestones, risks, and contingencies. Use deep & quick agents heavily in parallel for research.",
},
// Architecture consultant
"oracle": {
"model": "openai/gpt-5.4",
"variant": "xhigh",
"thinking": { "type": "enabled", "budgetTokens": 120000 },
},
// Research and documentation
"librarian": { "model": "google/gemini-3-flash" },
// Exploration for research phase
"explore": { "model": "github-copilot/grok-code-fast-1" },
// Visual planning and diagrams
"multimodal-looker": { "model": "google/gemini-3.1-pro", "variant": "high" },
// Plan review and refinement: heavily utilized
"metis": {
"model": "anthropic/claude-opus-4-6",
"prompt_append": "Critically evaluate plans. Identify gaps, risks, and improvements. Be thorough.",
},
// Critic: challenges assumptions
"momus": {
"model": "openai/gpt-5.4",
"prompt_append": "Challenge all assumptions in plans. Look for edge cases, failure modes, and overlooked requirements.",
},
// Long-running planning sessions
"atlas": {
"prompt_append": "Preserve context across long planning sessions. Track evolving decisions.",
},
// Quick research tasks
"sisyphus-junior": { "model": "opencode/gpt-5-nano" },
},
"categories": {
"quick": { "model": "opencode/gpt-5-nano" },
"unspecified-low": { "model": "anthropic/claude-sonnet-4-6" },
// High-effort planning tasks: maximum reasoning
"unspecified-high": {
"model": "openai/gpt-5.4",
"variant": "xhigh",
},
// Documentation from plans
"writing": { "model": "google/gemini-3-flash" },
// Visual architecture
"visual-engineering": { "model": "google/gemini-3.1-pro", "variant": "high" },
// Deep research and analysis
"deep": { "model": "openai/gpt-5.3-codex" },
// Strategic reasoning
"ultrabrain": { "model": "openai/gpt-5.4", "variant": "xhigh" },
// Creative approaches to problems
"artistry": { "model": "google/gemini-3.1-pro", "variant": "high" },
},
// Moderate concurrency: planning is sequential by nature
"background_task": {
"defaultConcurrency": 5,
"staleTimeoutMs": 300000,
"providerConcurrency": {
"anthropic": 3,
"openai": 3,
},
"modelConcurrency": {
"anthropic/claude-opus-4-6": 2,
"openai/gpt-5.4": 2,
},
},
"sisyphus_agent": {
"planner_enabled": true,
"replace_plan": true,
},
"experimental": { "aggressive_truncation": true },
}

View File

@@ -27,7 +27,7 @@ Using Sisyphus with older GPT models would be like taking your best project mana
Hephaestus is the developer who stays in their room coding all day. Doesn't talk much. Might seem socially awkward. But give them a hard technical problem and they'll emerge three hours later with a solution nobody else could have found.
**This is why Hephaestus uses GPT-5.4.** GPT-5.4 is built for exactly this:
**This is why Hephaestus uses GPT-5.3 Codex.** Codex is built for exactly this:
- Deep, autonomous exploration without hand-holding
- Multi-file reasoning across complex codebases
@@ -64,8 +64,8 @@ These agents have Claude-optimized prompts — long, detailed, mechanics-driven.
| Agent | Role | Fallback Chain | Notes |
| ------------ | ----------------- | -------------------------------------- | ------------------------------------------------------------------------------------------------- |
| **Sisyphus** | Main orchestrator | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → opencode-go/kimi-k2.5 → kimi-for-coding/k2p5 → opencode\|moonshotai\|moonshotai-cn\|firmware\|ollama-cloud\|aihubmix/kimi-k2.5 → openai\|github-copilot\|opencode/gpt-5.4 (medium) → zai-coding-plan\|opencode/glm-5 → opencode/big-pickle | Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Metis** | Plan gap analyzer | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → openai\|github-copilot\|opencode/gpt-5.4 (high) → opencode-go/glm-5 → kimi-for-coding/k2p5 | Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Sisyphus** | Main orchestrator | Claude Opus → opencode-go/kimi-k2.5 → K2P5 → Kimi K2.5 → GPT-5.4 → GLM-5 → Big Pickle | Claude-family first. GPT-5.4 has dedicated prompt support. Kimi available through multiple providers. |
| **Metis** | Plan gap analyzer | Claude Opus → GPT-5.4 → opencode-go/glm-5 → K2P5 | Claude preferred. GPT-5.4 as secondary before GLM-5 fallback. |
### Dual-Prompt Agents → Claude preferred, GPT supported
@@ -73,8 +73,8 @@ These agents ship separate prompts for Claude and GPT families. They auto-detect
| Agent | Role | Fallback Chain | Notes |
| -------------- | ----------------- | -------------------------------------- | -------------------------------------------------------------------- |
| **Prometheus** | Strategic planner | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → openai\|github-copilot\|opencode/gpt-5.4 (high) → opencode-go/glm-5 → google\|github-copilot\|opencode/gemini-3.1-pro | Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Atlas** | Todo orchestrator | anthropic\|github-copilot\|opencode/claude-sonnet-4-6 → opencode-go/kimi-k2.5 → openai\|github-copilot\|opencode/gpt-5.4 (medium) → opencode-go/minimax-m2.7 | Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Prometheus** | Strategic planner | Claude Opus → GPT-5.4 → opencode-go/glm-5 → Gemini 3.1 Pro | Interview-mode planning. GPT prompt is compact and principle-driven. |
| **Atlas** | Todo orchestrator | Claude Sonnet → opencode-go/kimi-k2.5 → GPT-5.4 | Claude first, opencode-go as intermediate, GPT-5.4 as last resort. |
### Deep Specialists → GPT
@@ -82,9 +82,9 @@ These agents are built for GPT's principle-driven style. Their prompts assume au
| Agent | Role | Fallback Chain | Notes |
| -------------- | ----------------------- | -------------------------------------- | ------------------------------------------------ |
| **Hephaestus** | Autonomous deep worker | GPT-5.4 (medium) | Requires a GPT-capable provider. The craftsman. |
| **Oracle** | Architecture consultant | openai\|github-copilot\|opencode/gpt-5.4 (high) → google\|github-copilot\|opencode/gemini-3.1-pro (high) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → opencode-go/glm-5 | Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Momus** | Ruthless reviewer | openai\|github-copilot\|opencode/gpt-5.4 (xhigh) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → google\|github-copilot\|opencode/gemini-3.1-pro (high) → opencode-go/glm-5 | Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Hephaestus** | Autonomous deep worker | GPT-5.3 Codex → GPT-5.4 (Copilot) | Requires GPT access. GPT-5.4 via Copilot as fallback. The craftsman. |
| **Oracle** | Architecture consultant | GPT-5.4 → Gemini 3.1 Pro → Claude Opus → opencode-go/glm-5 | Read-only high-IQ consultation. |
| **Momus** | Ruthless reviewer | GPT-5.4 → Claude Opus → Gemini 3.1 Pro → opencode-go/glm-5 | Verification and plan review. GPT-5.4 uses xhigh variant. |
### Utility Runners → Speed over Intelligence
@@ -92,10 +92,10 @@ These agents do grep, search, and retrieval. They intentionally use the fastest,
| Agent | Role | Fallback Chain | Notes |
| --------------------- | ------------------ | ---------------------------------------------- | ----------------------------------------------------- |
| **Explore** | Fast codebase grep | github-copilot\|xai/grok-code-fast-1 → opencode-go/minimax-m2.7-highspeed → opencode/minimax-m2.7 → anthropic\|opencode/claude-haiku-4-5 → opencode/gpt-5-nano | Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Librarian** | Docs/code search | opencode-go/minimax-m2.7 → opencode/minimax-m2.7-highspeed → anthropic\|opencode/claude-haiku-4-5 → opencode/gpt-5-nano | Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Multimodal Looker** | Vision/screenshots | openai\|opencode/gpt-5.4 (medium) → opencode-go/kimi-k2.5 → zai-coding-plan/glm-4.6v → openai\|github-copilot\|opencode/gpt-5-nano | Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Sisyphus-Junior** | Category executor | anthropic\|github-copilot\|opencode/claude-sonnet-4-6 → opencode-go/kimi-k2.5 → openai\|github-copilot\|opencode/gpt-5.4 (medium) → opencode-go/minimax-m2.7 → opencode/big-pickle | Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Explore** | Fast codebase grep | Grok Code Fast → opencode-go/minimax-m2.7-highspeed → MiniMax M2.7 → Haiku → GPT-5-Nano | Speed is everything. Fire 10 in parallel. |
| **Librarian** | Docs/code search | opencode-go/minimax-m2.7 → MiniMax M2.7-highspeed → Haiku → GPT-5-Nano | Doc retrieval doesn't need deep reasoning. |
| **Multimodal Looker** | Vision/screenshots | GPT-5.4 → opencode-go/kimi-k2.5 → GLM-4.6v → GPT-5-Nano | Uses the first available multimodal-capable fallback. |
| **Sisyphus-Junior** | Category executor | Claude Sonnet → opencode-go/kimi-k2.5 → GPT-5.4 → MiniMax M2.7 → Big Pickle | Handles delegated category tasks. Sonnet-tier default. |
---
@@ -119,7 +119,7 @@ Principle-driven, explicit reasoning, deep technical capability. Best for agents
| Model | Strengths |
| ----------------- | ----------------------------------------------------------------------------------------------- |
| **GPT-5.3 Codex** | Deep coding powerhouse. Autonomous exploration. Still available for deep category and explicit overrides. |
| **GPT-5.3 Codex** | Deep coding powerhouse. Autonomous exploration. Required for Hephaestus. |
| **GPT-5.4** | High intelligence, strategic reasoning. Default for Oracle, Momus, and a key fallback for Prometheus / Atlas. Uses xhigh variant for Momus. |
| **GPT-5.4 Mini** | Fast + strong reasoning. Good for lightweight autonomous tasks. Default for quick category. |
| **GPT-5-Nano** | Ultra-cheap, fast. Good for simple utility tasks. |
@@ -131,8 +131,8 @@ Principle-driven, explicit reasoning, deep technical capability. Best for agents
| **Gemini 3.1 Pro** | Excels at visual/frontend tasks. Different reasoning style. Default for `visual-engineering` and `artistry`. |
| **Gemini 3 Flash** | Fast. Good for doc search and light tasks. |
| **Grok Code Fast 1** | Blazing fast code grep. Default for Explore agent. |
| **MiniMax M2.7** | Fast and smart. Used in OpenCode Go and OpenCode Zen utility fallback chains. |
| **MiniMax M2.7 Highspeed** | High-speed OpenCode catalog entry used in utility fallback chains that prefer the fastest available MiniMax path. |
| **MiniMax M2.7** | Fast and smart. Good for utility tasks and search/retrieval. Upgraded from M2.5 with better reasoning. |
| **MiniMax M2.7 Highspeed** | Ultra-fast variant. Optimized for latency-sensitive tasks like codebase grep. |
### OpenCode Go
@@ -144,12 +144,11 @@ A premium subscription tier ($10/month) that provides reliable access to Chinese
| ------------------------ | --------------------------------------------------------------------- |
| **opencode-go/kimi-k2.5** | Vision-capable, Claude-like reasoning. Used by Sisyphus, Atlas, Sisyphus-Junior, Multimodal Looker. |
| **opencode-go/glm-5** | Text-only orchestration model. Used by Oracle, Prometheus, Metis, Momus. |
| **opencode-go/minimax-m2.7** | Ultra-cheap, fast responses. Used by Librarian, Atlas, and Sisyphus-Junior for utility work. |
| **opencode-go/minimax-m2.7-highspeed** | Even faster OpenCode Go MiniMax entry used by Explore when the high-speed catalog entry is available. |
| **opencode-go/minimax-m2.7** | Ultra-cheap, fast responses. Used by Librarian, Explore, Atlas, Sisyphus-Junior for utility work. |
**When It Gets Used:**
OpenCode Go models appear throughout the fallback chains as intermediate options. Depending on the agent, they can sit before GPT, after GPT, or act as the last structured-model fallback before cheaper utility paths.
OpenCode Go models appear in fallback chains as intermediate options. They bridge the gap between premium Claude access and free-tier alternatives. The system tries OpenCode Go models before falling back to free tiers (MiniMax M2.7-highspeed, Big Pickle) or GPT alternatives.
**Go-Only Scenarios:**
@@ -157,7 +156,7 @@ Some model identifiers like `k2p5` (paid Kimi K2.5) and `glm-5` may only be avai
### About Free-Tier Fallbacks
You may see model names like `kimi-k2.5-free`, `minimax-m2.7`, `minimax-m2.7-highspeed`, or `big-pickle` (GLM 4.6) in the source code or logs. These are provider-specific or speed-optimized entries in fallback chains.
You may see model names like `kimi-k2.5-free`, `minimax-m2.7-highspeed`, or `big-pickle` (GLM 4.6) in the source code or logs. These are free-tier or speed-optimized versions of the same model families. They exist as lower-priority entries in fallback chains.
You don't need to configure them. The system includes them so it degrades gracefully when you don't have every paid subscription. If you have the paid version, the paid version is always preferred.
@@ -169,14 +168,14 @@ When agents delegate work, they don't pick a model name — they pick a **catego
| Category | When Used | Fallback Chain |
| -------------------- | -------------------------- | -------------------------------------------- |
| `visual-engineering` | Frontend, UI, CSS, design | google\|github-copilot\|opencode/gemini-3.1-pro (high) → zai-coding-plan\|opencode/glm-5 → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → opencode-go/glm-5 → kimi-for-coding/k2p5 |
| `ultrabrain` | Maximum reasoning needed | openai\|opencode/gpt-5.4 (xhigh) → google\|github-copilot\|opencode/gemini-3.1-pro (high) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → opencode-go/glm-5 |
| `deep` | Deep coding, complex logic | openai\|opencode/gpt-5.3-codex (medium) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → google\|github-copilot\|opencode/gemini-3.1-pro (high) |
| `artistry` | Creative, novel approaches | google\|github-copilot\|opencode/gemini-3.1-pro (high) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → openai\|github-copilot\|opencode/gpt-5.4 |
| `quick` | Simple, fast tasks | openai\|github-copilot\|opencode/gpt-5.4-mini → anthropic\|github-copilot\|opencode/claude-haiku-4-5 → google\|github-copilot\|opencode/gemini-3-flash → opencode-go/minimax-m2.7 → opencode/gpt-5-nano |
| `unspecified-high` | General complex work | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → openai\|github-copilot\|opencode/gpt-5.4 (high) → zai-coding-plan\|opencode/glm-5 → kimi-for-coding/k2p5 → opencode-go/glm-5 → opencode/kimi-k2.5 → opencode\|moonshotai\|moonshotai-cn\|firmware\|ollama-cloud\|aihubmix/kimi-k2.5 |
| `unspecified-low` | General standard work | anthropic\|github-copilot\|opencode/claude-sonnet-4-6 → openai\|opencode/gpt-5.3-codex (medium) → opencode-go/kimi-k2.5 → google\|github-copilot\|opencode/gemini-3-flash → opencode-go/minimax-m2.7 |
| `writing` | Text, docs, prose | google\|github-copilot\|opencode/gemini-3-flash → opencode-go/kimi-k2.5 → anthropic\|github-copilot\|opencode/claude-sonnet-4-6 → opencode-go/minimax-m2.7 |
| `visual-engineering` | Frontend, UI, CSS, design | Gemini 3.1 Pro → GLM 5 → Claude Opus → opencode-go/glm-5 → K2P5 |
| `ultrabrain` | Maximum reasoning needed | GPT-5.4 → Gemini 3.1 Pro → Claude Opus → opencode-go/glm-5 |
| `deep` | Deep coding, complex logic | GPT-5.3 Codex → Claude Opus → Gemini 3.1 Pro |
| `artistry` | Creative, novel approaches | Gemini 3.1 Pro → Claude Opus → GPT-5.4 |
| `quick` | Simple, fast tasks | GPT-5.4 Mini → Claude Haiku → Gemini Flash → opencode-go/minimax-m2.7 → GPT-5-Nano |
| `unspecified-high` | General complex work | Claude Opus → GPT-5.4 → GLM 5 → K2P5 → opencode-go/glm-5 → Kimi K2.5 |
| `unspecified-low` | General standard work | Claude Sonnet → GPT-5.3 Codex → opencode-go/kimi-k2.5 → Gemini Flash |
| `writing` | Text, docs, prose | Gemini Flash → opencode-go/kimi-k2.5 → Claude Sonnet |
See the [Orchestration System Guide](./orchestration.md) for how agents dispatch tasks to categories.
@@ -188,7 +187,7 @@ See the [Orchestration System Guide](./orchestration.md) for how agents dispatch
```jsonc
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-openagent.schema.json",
"agents": {
// Main orchestrator: Claude Opus or Kimi K2.5 work best
@@ -256,46 +255,12 @@ Run `opencode models` to see available models, `opencode auth login` to authenti
### How Model Resolution Works
Each agent has a fallback chain. The system tries models in priority order until it finds one available through your connected providers. You don't need to configure providers per model. Just authenticate (`opencode auth login`) and the system figures out which models are available and where.
Core-agent tab cycling is deterministic via injected runtime order field. The fixed priority order is Sisyphus (order: 1), Hephaestus (order: 2), Prometheus (order: 3), and Atlas (order: 4), then the remaining agents follow.
Your explicit configuration always wins. If you set a specific model for an agent, that choice takes precedence even when resolution data is cold.
Variant and `reasoningEffort` overrides are normalized to model-supported values, so cross-provider overrides degrade gracefully instead of failing hard.
Model capabilities are models.dev-backed, with a refreshable cache and capability diagnostics. Use `bunx oh-my-opencode refresh-model-capabilities` to update the cache, or configure `model_capabilities.auto_refresh_on_start` to refresh at startup.
To see which models your agents will actually use, run `bunx oh-my-opencode doctor`. This shows effective model resolution based on your current authentication and config.
Each agent has a fallback chain. The system tries models in priority order until it finds one available through your connected providers. You don't need to configure providers per model — just authenticate (`opencode auth login`) and the system figures out which models are available and where.
```
Agent Request → User Override (if configured) → Fallback Chain → System Default
```
### File-Based Prompts
You can load agent system prompts from external files using `file://` URLs in the `prompt` field, or append additional content with `prompt_append`. The `prompt_append` field also works on categories.
```jsonc
{
"agents": {
"sisyphus": {
"prompt": "file:///path/to/custom-prompt.md"
},
"oracle": {
"prompt_append": "file:///path/to/additional-context.md"
}
},
"categories": {
"deep": {
"prompt_append": "file:///path/to/deep-category-append.md"
}
}
}
```
The file content is loaded at runtime and injected into the agent's system prompt. Supports `~` expansion for home directory and relative `file://` paths.
---
## See Also

View File

@@ -5,7 +5,7 @@
Paste this into your llm agent session:
```
Install and configure oh-my-opencode by following the instructions here:
Install and configure oh-my-openagent by following the instructions here:
https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/refs/heads/dev/docs/guide/installation.md
```
@@ -14,8 +14,8 @@ https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/refs/heads/dev/do
Run the interactive installer:
```bash
bunx oh-my-opencode install # recommended
npx oh-my-opencode install # alternative
bunx oh-my-openagent install # recommended
npx oh-my-openagent install # alternative
```
> **Note**: The CLI ships with standalone binaries for all major platforms. No runtime (Bun/Node.js) is required for CLI execution after installation.
@@ -26,8 +26,6 @@ Follow the prompts to configure your Claude, ChatGPT, and Gemini subscriptions.
After you install it, you can read this [overview guide](./overview.md) to understand more.
The published package and local binary are still `oh-my-opencode`. Inside `opencode.json`, the compatibility layer now prefers the plugin entry `oh-my-openagent`, while legacy `oh-my-opencode` entries still load with a warning. Plugin config loading recognizes both `oh-my-openagent.json[c]` and `oh-my-opencode.json[c]` during the transition. If you see a "Using legacy package name" warning from `bunx oh-my-opencode doctor`, update your `opencode.json` plugin entry from `"oh-my-opencode"` to `"oh-my-openagent"`.
## For LLM Agents
> **IMPORTANT: Use `curl` to fetch this file, NOT WebFetch.** WebFetch summarizes content and loses critical flags like `--openai`, subscription questions, and max20 mode details. Always use:
@@ -36,7 +34,7 @@ The published package and local binary are still `oh-my-opencode`. Inside `openc
> curl -fsSL https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/refs/heads/dev/docs/guide/installation.md
> ```
If you're an LLM Agent helping set up oh-my-opencode, welcome. You'll help your user install the harness and verify the rename-compat setup cleanly. Tell user this with greetings, with text 'oMoMoMoMo...'
If you're an LLM Agent helping set up oh-my-openagent, Welcome! You'll now help your user to install the best agent harness, OhMyOpenAgent. Tell user this with greetings, with test 'oMoMoMoMo...'
follow these steps:
@@ -75,7 +73,7 @@ Ask the user these questions to determine CLI options:
- If **yes**`--opencode-go=yes`
- If **no**`--opencode-go=no` (default)
**Provider selection is agent-specific.** The installer and runtime do not use one single global provider priority. Each agent resolves against its own fallback chain.
**Provider Priority**: Native (anthropic/, openai/, google/) > Kimi for Coding > GitHub Copilot > OpenCode Go > OpenCode Zen > Z.ai Coding Plan
MUST STRONGLY WARNING, WHEN USER SAID THEY DON'T HAVE CLAUDE SUBSCRIPTION, SISYPHUS AGENT MIGHT NOT WORK IDEALLY.
@@ -98,19 +96,19 @@ Spawn a subagent to handle installation and report back - to save context.
Based on user's answers, run the CLI installer with appropriate flags:
```bash
bunx oh-my-opencode install --no-tui --claude=<yes|no|max20> --gemini=<yes|no> --copilot=<yes|no> [--openai=<yes|no>] [--opencode-go=<yes|no>] [--opencode-zen=<yes|no>] [--zai-coding-plan=<yes|no>]
bunx oh-my-openagent install --no-tui --claude=<yes|no|max20> --gemini=<yes|no> --copilot=<yes|no> [--openai=<yes|no>] [--opencode-go=<yes|no>] [--opencode-zen=<yes|no>] [--zai-coding-plan=<yes|no>]
```
**Examples:**
- User has all native subscriptions: `bunx oh-my-opencode install --no-tui --claude=max20 --openai=yes --gemini=yes --copilot=no`
- User has only Claude: `bunx oh-my-opencode install --no-tui --claude=yes --gemini=no --copilot=no`
- User has Claude + OpenAI: `bunx oh-my-opencode install --no-tui --claude=yes --openai=yes --gemini=no --copilot=no`
- User has only GitHub Copilot: `bunx oh-my-opencode install --no-tui --claude=no --gemini=no --copilot=yes`
- User has Z.ai for Librarian: `bunx oh-my-opencode install --no-tui --claude=yes --gemini=no --copilot=no --zai-coding-plan=yes`
- User has only OpenCode Zen: `bunx oh-my-opencode install --no-tui --claude=no --gemini=no --copilot=no --opencode-zen=yes`
- User has OpenCode Go only: `bunx oh-my-opencode install --no-tui --claude=no --openai=no --gemini=no --copilot=no --opencode-go=yes`
- User has no subscriptions: `bunx oh-my-opencode install --no-tui --claude=no --gemini=no --copilot=no`
- User has all native subscriptions: `bunx oh-my-openagent install --no-tui --claude=max20 --openai=yes --gemini=yes --copilot=no`
- User has only Claude: `bunx oh-my-openagent install --no-tui --claude=yes --gemini=no --copilot=no`
- User has Claude + OpenAI: `bunx oh-my-openagent install --no-tui --claude=yes --openai=yes --gemini=no --copilot=no`
- User has only GitHub Copilot: `bunx oh-my-openagent install --no-tui --claude=no --gemini=no --copilot=yes`
- User has Z.ai for Librarian: `bunx oh-my-openagent install --no-tui --claude=yes --gemini=no --copilot=no --zai-coding-plan=yes`
- User has only OpenCode Zen: `bunx oh-my-openagent install --no-tui --claude=no --gemini=no --copilot=no --opencode-zen=yes`
- User has OpenCode Go only: `bunx oh-my-openagent install --no-tui --claude=no --openai=no --gemini=no --copilot=no --opencode-go=yes`
- User has no subscriptions: `bunx oh-my-openagent install --no-tui --claude=no --gemini=no --copilot=no`
The CLI will:
@@ -122,17 +120,8 @@ The CLI will:
```bash
opencode --version # Should be 1.0.150 or higher
cat ~/.config/opencode/opencode.json # Should contain "oh-my-openagent" in plugin array, or the legacy "oh-my-opencode" entry while you are still migrating
cat ~/.config/opencode/opencode.json # Should contain "oh-my-openagent" in plugin array
```
#### Run Doctor Verification
After installation, verify everything is working correctly:
```bash
bunx oh-my-opencode doctor
```
This checks system, config, tools, and model resolution, including legacy package name warnings and compatibility-fallback diagnostics.
### Step 4: Configure Authentication
@@ -165,9 +154,9 @@ First, add the opencode-antigravity-auth plugin:
You'll also need full model settings in `opencode.json`.
Read the [opencode-antigravity-auth documentation](https://github.com/NoeFabris/opencode-antigravity-auth), copy the full model configuration from the README, and merge carefully to avoid breaking the user's existing setup. The plugin now uses a **variant system** — models like `antigravity-gemini-3-pro` support `low`/`high` variants instead of separate `-low`/`-high` model entries.
##### Plugin config model override
##### oh-my-openagent Agent Model Override
The `opencode-antigravity-auth` plugin uses different model names than the built-in Google auth. Override the agent models in your plugin config file. Existing installs still commonly use `oh-my-opencode.json` or `.opencode/oh-my-opencode.json`, while the compatibility layer also recognizes `oh-my-openagent.json[c]`.
The `opencode-antigravity-auth` plugin uses different model names than the built-in Google auth. Override the agent models in `oh-my-openagent.json` (or `.opencode/oh-my-openagent.json`):
```json
{
@@ -212,16 +201,16 @@ GitHub Copilot is supported as a **fallback provider** when native providers are
##### Model Mappings
When GitHub Copilot is the best available provider, install-time defaults are agent-specific. Common examples are:
When GitHub Copilot is the best available provider, oh-my-openagent uses these model assignments:
| Agent | Model |
| ------------- | ---------------------------------- |
| **Sisyphus** | `github-copilot/claude-opus-4.6` |
| **Oracle** | `github-copilot/gpt-5.4` |
| **Explore** | `github-copilot/grok-code-fast-1` |
| **Atlas** | `github-copilot/claude-sonnet-4.6` |
| Agent | Model |
| ------------- | --------------------------------- |
| **Sisyphus** | `github-copilot/claude-opus-4.6` |
| **Oracle** | `github-copilot/gpt-5.4` |
| **Explore** | `github-copilot/grok-code-fast-1` |
| **Librarian** | `github-copilot/gemini-3-flash` |
GitHub Copilot acts as a proxy provider, routing requests to underlying models based on your subscription. Some agents, like Librarian, are not installed from Copilot alone and instead rely on other configured providers or runtime fallback behavior.
GitHub Copilot acts as a proxy provider, routing requests to underlying models based on your subscription.
#### Z.ai Coding Plan
@@ -238,37 +227,43 @@ If Z.ai is your main provider, the most important fallbacks are:
#### OpenCode Zen
OpenCode Zen provides access to `opencode/` prefixed models including `opencode/claude-opus-4-6`, `opencode/gpt-5.4`, `opencode/gpt-5.3-codex`, `opencode/gpt-5-nano`, `opencode/glm-5`, `opencode/big-pickle`, `opencode/minimax-m2.7`, and `opencode/minimax-m2.7-highspeed`.
OpenCode Zen provides access to `opencode/` prefixed models including `opencode/claude-opus-4-6`, `opencode/gpt-5.4`, `opencode/gpt-5.3-codex`, `opencode/gpt-5-nano`, `opencode/glm-5`, `opencode/big-pickle`, and `opencode/minimax-m2.7-highspeed`.
When OpenCode Zen is the best available provider, these are the most relevant source-backed examples:
When OpenCode Zen is the best available provider (no native or Copilot), these models are used:
| Agent | Model |
| ------------- | ---------------------------------------------------- |
| **Sisyphus** | `opencode/claude-opus-4-6` |
| **Oracle** | `opencode/gpt-5.4` |
| **Explore** | `opencode/minimax-m2.7` |
| **Explore** | `opencode/gpt-5-nano` |
| **Librarian** | `opencode/minimax-m2.7-highspeed` / `opencode/big-pickle` |
##### Setup
Run the installer and select "Yes" for OpenCode Zen:
Run the installer and select "Yes" for GitHub Copilot:
```bash
bunx oh-my-opencode install
# Select your subscriptions (Claude, ChatGPT, Gemini, OpenCode Zen, etc.)
# When prompted: "Do you have access to OpenCode Zen (opencode/ models)?" → Select "Yes"
bunx oh-my-openagent install
# Select your subscriptions (Claude, ChatGPT, Gemini)
# When prompted: "Do you have a GitHub Copilot subscription?" → Select "Yes"
```
Or use non-interactive mode:
```bash
bunx oh-my-opencode install --no-tui --claude=no --openai=no --gemini=no --opencode-zen=yes
bunx oh-my-openagent install --no-tui --claude=no --openai=no --gemini=no --copilot=yes
```
This provider uses the `opencode/` model catalog. If your OpenCode environment prompts for provider authentication, follow the OpenCode provider flow for `opencode/` models instead of reusing the fallback-provider auth steps above.
Then authenticate with GitHub:
```bash
opencode auth login
# Select: GitHub → Authenticate via OAuth
```
### Step 5: Understand Your Model Setup
You've just configured oh-my-opencode. Here's what got set up and why.
You've just configured oh-my-openagent. Here's what got set up and why.
#### Model Families: What You're Working With
@@ -281,7 +276,7 @@ Not all models behave the same way. Understanding which models are "similar" hel
| **Claude Opus 4.6** | anthropic, github-copilot, opencode | Best overall. Default for Sisyphus. |
| **Claude Sonnet 4.6** | anthropic, github-copilot, opencode | Faster, cheaper. Good balance. |
| **Claude Haiku 4.5** | anthropic, opencode | Fast and cheap. Good for quick tasks. |
| **Kimi K2.5** | kimi-for-coding, opencode-go, opencode, moonshotai, moonshotai-cn, firmware, ollama-cloud, aihubmix | Behaves very similarly to Claude. Great all-rounder that appears in several orchestration fallback chains. |
| **Kimi K2.5** | kimi-for-coding | Behaves very similarly to Claude. Great all-rounder. Default for Atlas. |
| **Kimi K2.5 Free** | opencode | Free-tier Kimi. Rate-limited but functional. |
| **GLM 5** | zai-coding-plan, opencode | Claude-like behavior. Good for broad tasks. |
| **Big Pickle (GLM 4.6)** | opencode | Free-tier GLM. Decent fallback. |
@@ -290,7 +285,7 @@ Not all models behave the same way. Understanding which models are "similar" hel
| Model | Provider(s) | Notes |
| ----------------- | -------------------------------- | ------------------------------------------------- |
| **GPT-5.3-codex** | openai, github-copilot, opencode | Deep coding powerhouse. Still available for deep category and explicit overrides. |
| **GPT-5.3-codex** | openai, github-copilot, opencode | Deep coding powerhouse. Required for Hephaestus. |
| **GPT-5.4** | openai, github-copilot, opencode | High intelligence. Default for Oracle. |
| **GPT-5.4 Mini** | openai, github-copilot, opencode | Fast + strong reasoning. Default for quick category. |
| **GPT-5-Nano** | opencode | Ultra-cheap, fast. Good for simple utility tasks. |
@@ -301,16 +296,16 @@ Not all models behave the same way. Understanding which models are "similar" hel
| --------------------- | -------------------------------- | ----------------------------------------------------------- |
| **Gemini 3.1 Pro** | google, github-copilot, opencode | Excels at visual/frontend tasks. Different reasoning style. |
| **Gemini 3 Flash** | google, github-copilot, opencode | Fast, good for doc search and light tasks. |
| **MiniMax M2.7** | opencode-go, opencode | Fast and smart. Utility fallbacks use `minimax-m2.7` or `minimax-m2.7-highspeed` depending on the chain. |
| **MiniMax M2.7 Highspeed** | opencode-go, opencode | Faster utility variant used in Explore and other retrieval-heavy fallback chains. |
| **MiniMax M2.7** | venice, opencode-go | Fast and smart. Good for utility tasks. Upgraded from M2.5. |
| **MiniMax M2.7 Highspeed** | opencode | Ultra-fast MiniMax variant. Optimized for latency. |
**Speed-Focused Models**:
| Model | Provider(s) | Speed | Notes |
| ----------------------- | ---------------------- | -------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
| **Grok Code Fast 1** | github-copilot, xai | Very fast | Optimized for code grep/search. Default for Explore. |
| **Grok Code Fast 1** | github-copilot, venice | Very fast | Optimized for code grep/search. Default for Explore. |
| **Claude Haiku 4.5** | anthropic, opencode | Fast | Good balance of speed and intelligence. |
| **MiniMax M2.7 Highspeed** | opencode-go, opencode | Very fast | High-speed MiniMax utility fallback used by runtime chains such as Explore and, on the OpenCode catalog, Librarian. |
| **MiniMax M2.7 Highspeed** | opencode | Very fast | Ultra-fast MiniMax variant. Smart for its speed class. |
| **GPT-5.3-codex-spark** | openai | Extremely fast | Blazing fast but compacts so aggressively that oh-my-openagent's context management doesn't work well with it. Not recommended for omo agents. |
#### What Each Agent Does and Which Model It Got
@@ -321,8 +316,8 @@ Based on your subscriptions, here's how the agents were configured:
| Agent | Role | Default Chain | What It Does |
| ------------ | ---------------- | ----------------------------------------------- | ---------------------------------------------------------------------------------------- |
| **Sisyphus** | Main ultraworker | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → opencode-go/kimi-k2.5 → kimi-for-coding/k2p5 → opencode\|moonshotai\|moonshotai-cn\|firmware\|ollama-cloud\|aihubmix/kimi-k2.5 → openai\|github-copilot\|opencode/gpt-5.4 (medium) → zai-coding-plan\|opencode/glm-5 → opencode/big-pickle | Primary coding agent. Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Metis** | Plan review | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → openai\|github-copilot\|opencode/gpt-5.4 (high) → opencode-go/glm-5 → kimi-for-coding/k2p5 | Reviews Prometheus plans for gaps. Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Sisyphus** | Main ultraworker | Opus (max) → Kimi K2.5 → GLM 5 → Big Pickle | Primary coding agent. Orchestrates everything. **Never use GPT — no GPT prompt exists.** |
| **Metis** | Plan review | Opus (max) → Kimi K2.5 → GPT-5.4 → Gemini 3.1 Pro | Reviews Prometheus plans for gaps. |
**Dual-Prompt Agents** (auto-switch between Claude and GPT prompts):
@@ -332,16 +327,16 @@ Priority: **Claude > GPT > Claude-like models**
| Agent | Role | Default Chain | GPT Prompt? |
| -------------- | ----------------- | ---------------------------------------------------------- | ---------------------------------------------------------------- |
| **Prometheus** | Strategic planner | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → openai\|github-copilot\|opencode/gpt-5.4 (high) → opencode-go/glm-5 → google\|github-copilot\|opencode/gemini-3.1-pro | Yes — XML-tagged, principle-driven (~300 lines vs ~1,100 Claude) |
| **Atlas** | Todo orchestrator | anthropic\|github-copilot\|opencode/claude-sonnet-4-6 → opencode-go/kimi-k2.5 → openai\|github-copilot\|opencode/gpt-5.4 (medium) → opencode-go/minimax-m2.7 | Yes - GPT-optimized todo management |
| **Prometheus** | Strategic planner | Opus (max) → **GPT-5.4 (high)** → Kimi K2.5 → Gemini 3.1 Pro | Yes — XML-tagged, principle-driven (~300 lines vs ~1,100 Claude) |
| **Atlas** | Todo orchestrator | **Kimi K2.5** → Sonnet → GPT-5.4 | Yes GPT-optimized todo management |
**GPT-Native Agents** (built for GPT, don't override to Claude):
| Agent | Role | Default Chain | Notes |
| -------------- | ---------------------- | -------------------------------------- | ------------------------------------------------------ |
| **Hephaestus** | Deep autonomous worker | GPT-5.4 (medium) only | "Codex on steroids." No fallback. Requires GPT access. |
| **Oracle** | Architecture/debugging | openai\|github-copilot\|opencode/gpt-5.4 (high) → google\|github-copilot\|opencode/gemini-3.1-pro (high) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → opencode-go/glm-5 | High-IQ strategic backup. GPT preferred. |
| **Momus** | High-accuracy reviewer | openai\|github-copilot\|opencode/gpt-5.4 (xhigh) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → google\|github-copilot\|opencode/gemini-3.1-pro (high) → opencode-go/glm-5 | Verification agent. GPT preferred. |
| **Hephaestus** | Deep autonomous worker | GPT-5.3-codex (medium) only | "Codex on steroids." No fallback. Requires GPT access. |
| **Oracle** | Architecture/debugging | GPT-5.4 (high) → Gemini 3.1 Pro → Opus | High-IQ strategic backup. GPT preferred. |
| **Momus** | High-accuracy reviewer | GPT-5.4 (medium) → Opus → Gemini 3.1 Pro | Verification agent. GPT preferred. |
**Utility Agents** (speed over intelligence):
@@ -349,9 +344,9 @@ These agents do search, grep, and retrieval. They intentionally use fast, cheap
| Agent | Role | Default Chain | Design Rationale |
| --------------------- | ------------------ | ---------------------------------------------------------------------- | -------------------------------------------------------------- |
| **Explore** | Fast codebase grep | github-copilot\|xai/grok-code-fast-1opencode-go/minimax-m2.7-highspeed → opencode/minimax-m2.7 → anthropic\|opencode/claude-haiku-4-5 → opencode/gpt-5-nano | Speed is everything. Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Librarian** | Docs/code search | opencode-go/minimax-m2.7 → opencode/minimax-m2.7-highspeed → anthropic\|opencode/claude-haiku-4-5 → opencode/gpt-5-nano | Doc retrieval doesn't need deep reasoning. Exact runtime chain from `src/shared/model-requirements.ts`. |
| **Multimodal Looker** | Vision/screenshots | openai\|opencode/gpt-5.4 (medium) → opencode-go/kimi-k2.5 → zai-coding-plan/glm-4.6v → openai\|github-copilot\|opencode/gpt-5-nano | GPT-5.4 now leads the default vision path when available. |
| **Explore** | Fast codebase grep | Grok Code Fast → MiniMax M2.7-highspeed → MiniMax M2.7 → Haiku → GPT-5-Nano | Speed is everything. Grok is blazing fast for grep. |
| **Librarian** | Docs/code search | MiniMax M2.7 → MiniMax M2.7-highspeed → Haiku → GPT-5-Nano | Doc retrieval doesn't need deep reasoning. MiniMax is fast. |
| **Multimodal Looker** | Vision/screenshots | Kimi K2.5 → Kimi Free → Gemini Flash → GPT-5.4 → GLM-4.6v | Kimi excels at multimodal understanding. |
#### Why Different Models Need Different Prompts
@@ -370,7 +365,7 @@ This is why Prometheus and Atlas ship separate prompts per model family — they
#### Custom Model Configuration
If the user wants to override which model an agent uses, you can customize in your plugin config file. Existing installs still commonly use `oh-my-opencode.json`, while the compatibility layer also recognizes `oh-my-openagent.json[c]`.
If the user wants to override which model an agent uses, you can customize in `oh-my-openagent.json`:
```jsonc
{
@@ -405,14 +400,18 @@ GPT (5.3-codex, 5.2) > Claude Opus (decent fallback) > Gemini (acceptable)
**Dangerous** (no prompt support):
- Sisyphus → older GPT models: **Still a bad fit. GPT-5.4 is the only dedicated GPT prompt path.**
- Sisyphus → GPT: **No GPT prompt. Will degrade significantly.**
- Hephaestus → Claude: **Built for Codex. Claude can't replicate this.**
- Explore → Opus: **Massive cost waste. Explore needs speed, not intelligence.**
- Librarian → Opus: **Same. Doc search doesn't need Opus-level reasoning.**
#### Provider Resolution
#### Provider Priority Chain
There is no single global provider priority. The installer and runtime resolve each agent against its own fallback chain, so the winning provider depends on the agent and the subscriptions you enabled.
When multiple providers are available, oh-my-openagent uses this priority:
```
Native (anthropic/, openai/, google/) > Kimi for Coding > GitHub Copilot > Venice > OpenCode Zen > Z.ai Coding Plan
```
### ⚠️ Warning
@@ -463,7 +462,3 @@ Tell the user of following:
4. You wanna have your own agent- catalog setup? I can read the [docs](docs/guide/agent-model-matching.md) and set up for you after interviewing!
That's it. The agent will figure out the rest and handle everything automatically.
#### Advanced Configuration
You can customize agent models and fallback chains in your config. The `fallback_models` field accepts either a single string or an array that mixes strings and per-model objects with settings like `variant` and `temperature`. See the [Configuration Reference](../reference/configuration.md) and example configs in `docs/examples/` for details.

View File

@@ -420,7 +420,7 @@ Atlas is automatically activated when you run `/start-work`. You don't need to m
| Aspect | Hephaestus | Sisyphus + `ulw` / `ultrawork` |
| --------------- | ------------------------------------------ | ---------------------------------------------------- |
| **Model** | GPT-5.4 (medium reasoning) | Claude Opus 4.6 / GPT-5.4 / GLM 5 depending on setup |
| **Model** | GPT-5.3 Codex (medium reasoning) | Claude Opus 4.6 / GPT-5.4 / GLM 5 depending on setup |
| **Approach** | Autonomous deep worker | Keyword-activated ultrawork mode |
| **Best For** | Complex architectural work, deep reasoning | General complex tasks, "just do it" scenarios |
| **Planning** | Self-plans during execution | Uses Prometheus plans if available |
@@ -443,8 +443,8 @@ Switch to Hephaestus (Tab → Select Hephaestus) when:
- "Integrate our Rust core with the TypeScript frontend"
- "Migrate from MongoDB to PostgreSQL with zero downtime"
4. **You specifically want GPT-5.4 reasoning**
- Some problems benefit from GPT-5.4's training characteristics
4. **You specifically want GPT-5.3 Codex reasoning**
- Some problems benefit from GPT-5.3 Codex's training characteristics
**When to Use Sisyphus + `ulw`:**
@@ -469,7 +469,7 @@ Use the `ulw` keyword in Sisyphus when:
**Recommendation:**
- **For most users**: Use `ulw` keyword in Sisyphus. It's the default path and works excellently for 90% of complex tasks.
- **For power users**: Switch to Hephaestus when you specifically need GPT-5.4's reasoning style or want the "AmpCode deep mode" experience of fully autonomous exploration and execution.
- **For power users**: Switch to Hephaestus when you specifically need GPT-5.3 Codex's reasoning style or want the "AmpCode deep mode" experience of fully autonomous exploration and execution.
---
@@ -520,7 +520,7 @@ Type `exit` or start a new session. Atlas is primarily entered via `/start-work`
**For most tasks**: Type `ulw` in Sisyphus.
**Use Hephaestus when**: You specifically need GPT-5.4's reasoning style for deep architectural work or complex debugging.
**Use Hephaestus when**: You specifically need GPT-5.3 Codex's reasoning style for deep architectural work or complex debugging.
---

View File

@@ -93,9 +93,9 @@ Sisyphus still works best on Claude-family models, Kimi, and GLM. GPT-5.4 now ha
Named with intentional irony. Anthropic blocked OpenCode from using their API because of this project. So the team built an autonomous GPT-native agent instead.
Hephaestus runs on GPT-5.4. Give him a goal, not a recipe. He explores the codebase, researches patterns, and executes end-to-end without hand-holding. He is the legitimate craftsman because he was born from necessity, not privilege.
Hephaestus runs on GPT-5.3 Codex. Give him a goal, not a recipe. He explores the codebase, researches patterns, and executes end-to-end without hand-holding. He is the legitimate craftsman because he was born from necessity, not privilege.
Use Hephaestus when you need deep architectural reasoning, complex debugging across many files, or cross-domain knowledge synthesis. Switch to him explicitly when the work demands GPT-5.4's particular strengths.
Use Hephaestus when you need deep architectural reasoning, complex debugging across many files, or cross-domain knowledge synthesis. Switch to him explicitly when the work demands GPT-5.3 Codex's particular strengths.
**Why this beats vanilla Codex CLI:**
@@ -214,7 +214,8 @@ You can override specific agents or categories in your config:
**GPT models** (explicit reasoning, principle-driven):
- GPT-5.4 — deep coding powerhouse, required for Hephaestus and default for Oracle
- GPT-5.3-codex — deep coding powerhouse, required for Hephaestus
- GPT-5.4 — high intelligence, default for Oracle
- GPT-5-Nano — ultra-cheap, fast utility tasks
**Different-behavior models**:

View File

@@ -1,192 +1,142 @@
# CLI Reference
Complete reference for the published `oh-my-opencode` CLI. During the rename transition, OpenCode plugin registration now prefers `oh-my-openagent` inside `opencode.json`.
Complete reference for the `oh-my-openagent` command-line interface.
## Basic Usage
```bash
# Display help
bunx oh-my-opencode
bunx oh-my-openagent
# Or with npx
npx oh-my-opencode
npx oh-my-openagent
```
## Commands
| Command | Description |
| ----------------------------- | ------------------------------------------------------ |
| `install` | Interactive setup wizard |
| `doctor` | Environment diagnostics and health checks |
| `run` | OpenCode session runner with task completion enforcement |
| `get-local-version` | Display local version information and update check |
| `refresh-model-capabilities` | Refresh the cached models.dev-based model capabilities |
| `version` | Show version information |
| `mcp oauth` | MCP OAuth authentication management |
| Command | Description |
| ------------------- | ----------------------------------------- |
| `install` | Interactive setup wizard |
| `doctor` | Environment diagnostics and health checks |
| `run` | OpenCode session runner |
| `mcp oauth` | MCP OAuth authentication management |
| `auth` | Google Antigravity OAuth authentication |
| `get-local-version` | Display local version information |
---
## install
Interactive installation tool for initial Oh My OpenCode setup. Provides a TUI based on `@clack/prompts`.
Interactive installation tool for initial Oh-My-OpenAgent setup. Provides a TUI based on `@clack/prompts`.
### Usage
```bash
bunx oh-my-opencode install
bunx oh-my-openagent install
```
### Installation Process
1. **Subscription Selection**: Choose which providers and subscriptions you actually have
2. **Plugin Registration**: Registers `oh-my-openagent` in OpenCode settings, or upgrades a legacy `oh-my-opencode` entry during the compatibility window
3. **Configuration File Creation**: Writes the generated OmO config to `oh-my-opencode.json` in the active OpenCode config directory
4. **Authentication Hints**: Shows the `opencode auth login` steps for the providers you selected, unless `--skip-auth` is set
1. **Provider Selection**: Choose your AI provider (Claude, ChatGPT, or Gemini)
2. **API Key Input**: Enter the API key for your selected provider
3. **Configuration File Creation**: Generates `opencode.json` or `oh-my-openagent.json` files
4. **Plugin Registration**: Automatically registers the oh-my-openagent plugin in OpenCode settings
### Options
| Option | Description |
| ------ | ----------- |
| `--no-tui` | Run in non-interactive mode without TUI |
| `--claude <no\|yes\|max20>` | Claude subscription mode |
| `--openai <no\|yes>` | OpenAI / ChatGPT subscription |
| `--gemini <no\|yes>` | Gemini integration |
| `--copilot <no\|yes>` | GitHub Copilot subscription |
| `--opencode-zen <no\|yes>` | OpenCode Zen access |
| `--zai-coding-plan <no\|yes>` | Z.ai Coding Plan subscription |
| `--kimi-for-coding <no\|yes>` | Kimi for Coding subscription |
| `--opencode-go <no\|yes>` | OpenCode Go subscription |
| `--skip-auth` | Skip authentication setup hints |
| Option | Description |
| ----------- | ---------------------------------------------------------------- |
| `--no-tui` | Run in non-interactive mode without TUI (for CI/CD environments) |
| `--verbose` | Display detailed logs |
---
## doctor
Diagnoses your environment to ensure Oh My OpenCode is functioning correctly. The current checks are grouped into system, config, tools, and models.
Diagnoses your environment to ensure Oh-My-OpenAgent is functioning correctly. Performs 17+ health checks.
The doctor command detects common issues including:
- Legacy plugin entry references in `opencode.json` (warns when `oh-my-opencode` is still used instead of `oh-my-openagent`)
- Configuration file validity and JSONC parsing errors
- Model resolution and fallback chain verification
- Missing or misconfigured MCP servers
### Usage
```bash
bunx oh-my-opencode doctor
bunx oh-my-openagent doctor
```
### Diagnostic Categories
| Category | Check Items |
| ----------------- | ------------------------------------------------------------------------------------ |
| **System** | OpenCode binary, version (>= 1.0.150), plugin registration, legacy package name warning |
| **Config** | Configuration file validity, JSONC parsing, Zod schema validation |
| **Tools** | AST-Grep, LSP servers, GitHub CLI, MCP servers |
| **Models** | Model capabilities cache, model resolution, agent/category overrides, availability |
| Category | Check Items |
| ------------------ | --------------------------------------------------------- |
| **Installation** | OpenCode version (>= 1.0.150), plugin registration status |
| **Configuration** | Configuration file validity, JSONC parsing |
| **Authentication** | Anthropic, OpenAI, Google API key validity |
| **Dependencies** | Bun, Node.js, Git installation status |
| **Tools** | LSP server status, MCP server status |
| **Updates** | Latest version check |
### Options
| Option | Description |
| ------------ | ----------------------------------------- |
| `--status` | Show compact system dashboard |
| `--verbose` | Show detailed diagnostic information |
| `--json` | Output results in JSON format |
| Option | Description |
| ------------------- | ---------------------------------------------------------------- |
| `--category <name>` | Check specific category only (e.g., `--category authentication`) |
| `--json` | Output results in JSON format |
| `--verbose` | Include detailed information |
### Example Output
```
oh-my-opencode doctor
oh-my-openagent doctor
┌──────────────────────────────────────────────────┐
│ Oh-My-OpenAgent Doctor │
└──────────────────────────────────────────────────┘
System
Installation
✓ OpenCode version: 1.0.155 (>= 1.0.150)
✓ Plugin registered in opencode.json
Config
✓ oh-my-opencode.jsonc is valid
✓ Model resolution: all agents have valid fallback chains
Configuration
✓ oh-my-openagent.json is valid
⚠ categories.visual-engineering: using default model
Tools
✓ AST-Grep available
LSP servers configured
Authentication
✓ Anthropic API key configured
OpenAI API key configured
✗ Google API key not found
Models
11 agents, 8 categories, 0 overrides
⚠ Some configured models rely on compatibility fallback
Dependencies
Bun 1.2.5 installed
✓ Node.js 22.0.0 installed
✓ Git 2.45.0 installed
Summary: 10 passed, 1 warning, 0 failed
Summary: 10 passed, 1 warning, 1 failed
```
---
## run
Run opencode with todo/background task completion enforcement. Unlike 'opencode run', this command waits until all todos are completed or cancelled, and all child sessions (background tasks) are idle.
Executes OpenCode sessions and monitors task completion.
### Usage
```bash
bunx oh-my-opencode run <message>
bunx oh-my-openagent run [prompt]
```
### Options
| Option | Description |
| --------------------- | ------------------------------------------------------------------- |
| `-a, --agent <name>` | Agent to use (default: from CLI/env/config, fallback: Sisyphus) |
| `-m, --model <provider/model>` | Model override (e.g., anthropic/claude-sonnet-4) |
| `-d, --directory <path>` | Working directory |
| `-p, --port <port>` | Server port (attaches if port already in use) |
| `--attach <url>` | Attach to existing opencode server URL |
| `--on-complete <command>` | Shell command to run after completion |
| `--json` | Output structured JSON result to stdout |
| `--no-timestamp` | Disable timestamp prefix in run output |
| `--verbose` | Show full event stream (default: messages/tools only) |
| `--session-id <id>` | Resume existing session instead of creating new one |
---
## get-local-version
Show current installed version and check for updates.
### Usage
```bash
bunx oh-my-opencode get-local-version
```
### Options
| Option | Description |
| ----------------- | ---------------------------------------------- |
| `-d, --directory` | Working directory to check config from |
| `--json` | Output in JSON format for scripting |
### Output
Shows:
- Current installed version
- Latest available version on npm
- Whether you're up to date
- Special modes (local dev, pinned version)
---
## version
Show version information.
### Usage
```bash
bunx oh-my-opencode version
```
`--on-complete` runs through your current shell when possible: `sh` on Unix shells, `pwsh` for PowerShell on non-Windows, `powershell.exe` for PowerShell on Windows, and `cmd.exe` as the Windows fallback.
| Option | Description |
| ------------------------ | ------------------------------------------------- |
| `--enforce-completion` | Keep session active until all TODOs are completed |
| `--timeout <seconds>` | Set maximum execution time |
| `--agent <name>` | Specify agent to use |
| `--directory <path>` | Set working directory |
| `--port <number>` | Set port for session |
| `--attach` | Attach to existing session |
| `--json` | Output in JSON format |
| `--no-timestamp` | Disable timestamped output |
| `--session-id <id>` | Resume existing session |
| `--on-complete <action>` | Action on completion |
| `--verbose` | Enable verbose logging |
---
@@ -198,16 +148,16 @@ Manages OAuth 2.1 authentication for remote MCP servers.
```bash
# Login to an OAuth-protected MCP server
bunx oh-my-opencode mcp oauth login <server-name> --server-url https://api.example.com
bunx oh-my-openagent mcp oauth login <server-name> --server-url https://api.example.com
# Login with explicit client ID and scopes
bunx oh-my-opencode mcp oauth login my-api --server-url https://api.example.com --client-id my-client --scopes read write
bunx oh-my-openagent mcp oauth login my-api --server-url https://api.example.com --client-id my-client --scopes "read,write"
# Remove stored OAuth tokens
bunx oh-my-opencode mcp oauth logout <server-name> --server-url https://api.example.com
bunx oh-my-openagent mcp oauth logout <server-name>
# Check OAuth token status
bunx oh-my-opencode mcp oauth status [server-name]
bunx oh-my-openagent mcp oauth status [server-name]
```
### Options
@@ -216,7 +166,7 @@ bunx oh-my-opencode mcp oauth status [server-name]
| -------------------- | ------------------------------------------------------------------------- |
| `--server-url <url>` | MCP server URL (required for login) |
| `--client-id <id>` | OAuth client ID (optional if server supports Dynamic Client Registration) |
| `--scopes <scopes>` | OAuth scopes as separate variadic arguments (for example: `--scopes read write`) |
| `--scopes <scopes>` | Comma-separated OAuth scopes |
### Token Storage
@@ -226,20 +176,10 @@ Tokens are stored in `~/.config/opencode/mcp-oauth.json` with `0600` permissions
## Configuration Files
The runtime loads user config as the base config, then merges project config on top:
The CLI searches for configuration files in the following locations (in priority order):
1. **Project Level**: `.opencode/oh-my-openagent.jsonc`, `.opencode/oh-my-openagent.json`, `.opencode/oh-my-opencode.jsonc`, or `.opencode/oh-my-opencode.json`
2. **User Level**: `~/.config/opencode/oh-my-openagent.jsonc`, `~/.config/opencode/oh-my-openagent.json`, `~/.config/opencode/oh-my-opencode.jsonc`, or `~/.config/opencode/oh-my-opencode.json`
**Naming Note**: The published package and binary are still `oh-my-opencode`. Inside `opencode.json`, the compatibility layer now prefers the plugin entry `oh-my-openagent`. Plugin config loading recognizes both `oh-my-openagent.*` and legacy `oh-my-opencode.*` basenames. If both basenames exist in the same directory, the legacy `oh-my-opencode.*` file currently wins.
### Filename Compatibility
Both `.jsonc` and `.json` extensions are supported. JSONC (JSON with Comments) is preferred as it allows:
- Comments (both `//` and `/* */` styles)
- Trailing commas in arrays and objects
If both `.jsonc` and `.json` exist in the same directory, the `.jsonc` file takes precedence.
1. **Project Level**: `.opencode/oh-my-openagent.json`
2. **User Level**: `~/.config/opencode/oh-my-openagent.json`
### JSONC Support
@@ -279,78 +219,31 @@ bun install -g opencode@latest
```bash
# Reinstall plugin
bunx oh-my-opencode install
bunx oh-my-openagent install
```
### Doctor Check Failures
```bash
# Diagnose with detailed information
bunx oh-my-opencode doctor --verbose
bunx oh-my-openagent doctor --verbose
# Show compact system dashboard
bunx oh-my-opencode doctor --status
# JSON output for scripting
bunx oh-my-opencode doctor --json
```
### "Using legacy package name" Warning
The doctor warns if it finds the legacy plugin entry `oh-my-opencode` in `opencode.json`. Update the plugin array to the canonical `oh-my-openagent` entry:
```bash
# Replace the legacy plugin entry in user config
jq '.plugin = (.plugin // [] | map(if . == "oh-my-opencode" then "oh-my-openagent" else . end))' \
~/.config/opencode/opencode.json > /tmp/opencode.json && mv /tmp/opencode.json ~/.config/opencode/opencode.json
```
---
## refresh-model-capabilities
Refreshes the cached model capabilities snapshot from models.dev. This updates the local cache used by capability resolution and compatibility diagnostics.
### Usage
```bash
bunx oh-my-opencode refresh-model-capabilities
```
### Options
| Option | Description |
| ----------------- | --------------------------------------------------- |
| `-d, --directory` | Working directory to read oh-my-opencode config from |
| `--source-url <url>` | Override the models.dev source URL |
| `--json` | Output refresh summary as JSON |
### Configuration
Configure automatic refresh behavior in your plugin config:
```jsonc
{
"model_capabilities": {
"enabled": true,
"auto_refresh_on_start": true,
"refresh_timeout_ms": 5000,
"source_url": "https://models.dev/api.json"
}
}
# Check specific category only
bunx oh-my-openagent doctor --category authentication
```
---
## Non-Interactive Mode
Use JSON output for CI or scripted diagnostics.
Use the `--no-tui` option for CI/CD environments.
```bash
# Run doctor in CI environment
bunx oh-my-opencode doctor --json
bunx oh-my-openagent doctor --no-tui --json
# Save results to file
bunx oh-my-opencode doctor --json > doctor-report.json
bunx oh-my-openagent doctor --json > doctor-report.json
```
---

View File

@@ -1,6 +1,6 @@
# Configuration Reference
Complete reference for Oh My OpenCode plugin configuration. During the rename transition, the runtime recognizes both `oh-my-openagent.json[c]` and legacy `oh-my-opencode.json[c]` files.
Complete reference for `oh-my-openagent.jsonc` configuration. This document covers every available option with examples.
---
@@ -30,7 +30,6 @@ Complete reference for Oh My OpenCode plugin configuration. During the rename tr
- [LSP](#lsp)
- [Advanced](#advanced)
- [Runtime Fallback](#runtime-fallback)
- [Model Capabilities](#model-capabilities)
- [Hashline Edit](#hashline-edit)
- [Experimental](#experimental)
- [Reference](#reference)
@@ -43,28 +42,27 @@ Complete reference for Oh My OpenCode plugin configuration. During the rename tr
### File Locations
User config is loaded first, then project config overrides it. In each directory, the compatibility layer recognizes both the renamed and legacy basenames.
Priority order (project overrides user):
1. Project config: `.opencode/oh-my-openagent.json[c]` or `.opencode/oh-my-opencode.json[c]`
1. `.opencode/oh-my-openagent.jsonc` / `.opencode/oh-my-openagent.json`
2. User config (`.jsonc` preferred over `.json`):
| Platform | Path candidates |
| ----------- | --------------- |
| macOS/Linux | `~/.config/opencode/oh-my-openagent.json[c]`, `~/.config/opencode/oh-my-opencode.json[c]` |
| Windows | `%APPDATA%\opencode\oh-my-openagent.json[c]`, `%APPDATA%\opencode\oh-my-opencode.json[c]` |
| Platform | Path |
| ----------- | ----------------------------------------- |
| macOS/Linux | `~/.config/opencode/oh-my-openagent.jsonc` |
| Windows | `%APPDATA%\opencode\oh-my-openagent.jsonc` |
**Rename compatibility:** The published package and CLI binary remain `oh-my-opencode`. OpenCode plugin registration prefers `oh-my-openagent`, while legacy `oh-my-opencode` entries and config basenames still load during the transition. Config detection checks `oh-my-opencode` before `oh-my-openagent`, so if both plugin config basenames exist in the same directory, the legacy `oh-my-opencode.*` file currently wins.
JSONC supports `// line comments`, `/* block comments */`, and trailing commas.
Enable schema autocomplete:
```json
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json"
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-openagent.schema.json"
}
```
Run `bunx oh-my-opencode install` for guided setup. Run `opencode models` to list available models.
Run `bunx oh-my-openagent install` for guided setup. Run `opencode models` to list available models.
### Quick Start Example
@@ -72,7 +70,7 @@ Here's a practical starting configuration:
```jsonc
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-openagent.schema.json",
"agents": {
// Main orchestrator: Claude Opus or Kimi K2.5 work best
@@ -95,19 +93,19 @@ Here's a practical starting configuration:
},
"categories": {
// quick - trivial tasks
// quick trivial tasks
"quick": { "model": "opencode/gpt-5-nano" },
// unspecified-low - moderate tasks
// unspecified-low moderate tasks
"unspecified-low": { "model": "anthropic/claude-sonnet-4-6" },
// unspecified-high - complex work
// unspecified-high complex work
"unspecified-high": { "model": "anthropic/claude-opus-4-6", "variant": "max" },
// writing - docs/prose
// writing docs/prose
"writing": { "model": "google/gemini-3-flash" },
// visual-engineering - Gemini dominates visual tasks
// visual-engineering Gemini dominates visual tasks
"visual-engineering": {
"model": "google/gemini-3.1-pro",
"variant": "high",
@@ -159,28 +157,26 @@ Override built-in agent settings. Available agents: `sisyphus`, `hephaestus`, `p
Disable agents entirely: `{ "disabled_agents": ["oracle", "multimodal-looker"] }`
Core agents receive an injected runtime `order` field for deterministic Tab cycling in the UI: Sisyphus = 1, Hephaestus = 2, Prometheus = 3, Atlas = 4. This is not a user-configurable config key.
#### Agent Options
| Option | Type | Description |
| ----------------- | -------------- | --------------------------------------------------------------- |
| `model` | string | Model override (`provider/model`) |
| `fallback_models` | string\|array | Fallback models on API errors. Supports strings or mixed arrays of strings and object entries with per-model settings |
| `temperature` | number | Sampling temperature |
| `top_p` | number | Top-p sampling |
| `prompt` | string | Replace system prompt. Supports `file://` URIs |
| `prompt_append` | string | Append to system prompt. Supports `file://` URIs |
| Option | Type | Description |
| ----------------- | ------------- | ------------------------------------------------------ |
| `model` | string | Model override (`provider/model`) |
| `fallback_models` | string\|array | Fallback models on API errors |
| `temperature` | number | Sampling temperature |
| `top_p` | number | Top-p sampling |
| `prompt` | string | Replace system prompt |
| `prompt_append` | string | Append to system prompt |
| `tools` | array | Allowed tools list |
| `disable` | boolean | Disable this agent |
| `mode` | string | Agent mode |
| `color` | string | UI color |
| `permission` | object | Per-tool permissions (see below) |
| `category` | string | Inherit model from category |
| `variant` | string | Model variant: `max`, `high`, `medium`, `low`, `xhigh`. Normalized to supported values |
| `variant` | string | Model variant: `max`, `high`, `medium`, `low`, `xhigh` |
| `maxTokens` | number | Max response tokens |
| `thinking` | object | Anthropic extended thinking |
| `reasoningEffort` | string | OpenAI reasoning: `none`, `minimal`, `low`, `medium`, `high`, `xhigh`. Normalized to supported values |
| `reasoningEffort` | string | OpenAI reasoning: `low`, `medium`, `high`, `xhigh` |
| `textVerbosity` | string | Text verbosity: `low`, `medium`, `high` |
| `providerOptions` | object | Provider-specific options |
@@ -220,65 +216,6 @@ Control what tools an agent can use:
| `doom_loop` | `ask` / `allow` / `deny` |
| `external_directory` | `ask` / `allow` / `deny` |
#### Fallback Models with Per-Model Settings
`fallback_models` accepts either a single model string or an array. Array entries can be plain strings or objects with individual model settings:
```jsonc
{
"agents": {
"sisyphus": {
"model": "anthropic/claude-opus-4-6",
"fallback_models": [
// Simple string fallback
"openai/gpt-5.4",
// Object with per-model settings
{
"model": "google/gemini-3.1-pro",
"variant": "high",
"temperature": 0.2
},
{
"model": "anthropic/claude-sonnet-4-6",
"thinking": { "type": "enabled", "budgetTokens": 64000 }
}
]
}
}
}
```
Object entries support: `model`, `variant`, `reasoningEffort`, `temperature`, `top_p`, `maxTokens`, `thinking`.
#### File URIs for Prompts
Both `prompt` and `prompt_append` support loading content from files via `file://` URIs. Category-level `prompt_append` supports the same URI forms.
```jsonc
{
"agents": {
"sisyphus": {
"prompt_append": "file:///absolute/path/to/prompt.txt"
},
"oracle": {
"prompt": "file://./relative/to/project/prompt.md"
},
"explore": {
"prompt_append": "file://~/home/dir/prompt.txt"
}
},
"categories": {
"custom": {
"model": "anthropic/claude-sonnet-4-6",
"prompt_append": "file://./category-context.md"
}
}
}
```
Paths can be absolute (`file:///abs/path`), relative to project root (`file://./rel/path`), or home-relative (`file://~/home/path`). If a file URI cannot be decoded, resolved, or read, OmO inserts a warning placeholder into the prompt instead of failing hard.
### Categories
Domain-specific model delegation used by the `task()` tool. When Sisyphus delegates work, it picks a category, not a model name.
@@ -303,16 +240,16 @@ Domain-specific model delegation used by the `task()` tool. When Sisyphus delega
| Option | Type | Default | Description |
| ------------------- | ------------- | ------- | ------------------------------------------------------------------- |
| `model` | string | - | Model override |
| `fallback_models` | string\|array | - | Fallback models on API errors. Supports strings or mixed arrays of strings and object entries with per-model settings |
| `fallback_models` | string\|array | - | Fallback models on API errors |
| `temperature` | number | - | Sampling temperature |
| `top_p` | number | - | Top-p sampling |
| `maxTokens` | number | - | Max response tokens |
| `thinking` | object | - | Anthropic extended thinking |
| `reasoningEffort` | string | - | OpenAI reasoning effort. Unsupported values are normalized |
| `reasoningEffort` | string | - | OpenAI reasoning effort |
| `textVerbosity` | string | - | Text verbosity |
| `tools` | array | - | Allowed tools |
| `prompt_append` | string | - | Append to system prompt |
| `variant` | string | - | Model variant. Unsupported values are normalized |
| `variant` | string | - | Model variant |
| `description` | string | - | Shown in `task()` tool prompt |
| `is_unstable_agent` | boolean | `false` | Force background mode + monitoring. Auto-enabled for Gemini models. |
@@ -320,66 +257,41 @@ Disable categories: `{ "disabled_categories": ["ultrabrain"] }`
### Model Resolution
Runtime priority:
1. **UI-selected model** - model chosen in the OpenCode UI, for primary agents
2. **User override** - model set in config → used exactly as-is. Even on cold cache, explicit user configuration takes precedence over hardcoded fallback chains
3. **Category default** - model inherited from the assigned category config
4. **User `fallback_models`** - user-configured fallback list is tried before built-in fallback chains
5. **Provider fallback chain** - built-in provider/model chain from OmO source
6. **System default** - OpenCode's configured default model
#### Model Settings Compatibility
Model settings are compatibility-normalized against model capabilities instead of failing hard.
Normalized fields:
- `variant` - downgraded to the closest supported value
- `reasoningEffort` - downgraded to the closest supported value, or removed if unsupported
- `temperature` - removed if unsupported by the model metadata
- `top_p` - removed if unsupported by the model metadata
- `maxTokens` - capped to the model's reported max output limit
- `thinking` - removed if the target model does not support thinking
Examples:
- Claude models do not support `reasoningEffort` - it is removed automatically
- GPT-4.1 does not support reasoning - `reasoningEffort` is removed
- o-series models support `none` through `high` - `xhigh` is downgraded to `high`
- GPT-5 supports `none`, `minimal`, `low`, `medium`, `high`, `xhigh` - all pass through
Capability data comes from provider runtime metadata first. OmO also ships bundled models.dev-backed capability data, supports a refreshable local models.dev cache, and falls back to heuristic family detection plus alias rules when exact metadata is unavailable. `bunx oh-my-opencode doctor` surfaces capability diagnostics and warns when a configured model relies on compatibility fallback.
3-step priority at runtime:
1. **User override** — model set in config → used exactly as-is
2. **Provider fallback chain** — tries each provider in priority order until available
3. **System default** — falls back to OpenCode's configured default model
#### Agent Provider Chains
| Agent | Default Model | Provider Priority |
| --------------------- | ------------------- | ---------------------------------------------------------------------------- |
| **Sisyphus** | `claude-opus-4-6` | `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)``opencode-go/kimi-k2.5``kimi-for-coding/k2p5``opencode\|moonshotai\|moonshotai-cn\|firmware\|ollama-cloud\|aihubmix/kimi-k2.5``openai\|github-copilot\|opencode/gpt-5.4 (medium)``zai-coding-plan\|opencode/glm-5``opencode/big-pickle` |
| **Hephaestus** | `gpt-5.4` | `gpt-5.4 (medium)` |
| **oracle** | `gpt-5.4` | `openai\|github-copilot\|opencode/gpt-5.4 (high)``google\|github-copilot\|opencode/gemini-3.1-pro (high)``anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)``opencode-go/glm-5` |
| **librarian** | `minimax-m2.7` | `opencode-go/minimax-m2.7``opencode/minimax-m2.7-highspeed``anthropic\|opencode/claude-haiku-4-5``opencode/gpt-5-nano` |
| **explore** | `grok-code-fast-1` | `github-copilot\|xai/grok-code-fast-1``opencode-go/minimax-m2.7-highspeed``opencode/minimax-m2.7``anthropic\|opencode/claude-haiku-4-5``opencode/gpt-5-nano` |
| **multimodal-looker** | `gpt-5.4` | `openai\|opencode/gpt-5.4 (medium)``opencode-go/kimi-k2.5``zai-coding-plan/glm-4.6v``openai\|github-copilot\|opencode/gpt-5-nano` |
| **Prometheus** | `claude-opus-4-6` | `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)``openai\|github-copilot\|opencode/gpt-5.4 (high)``opencode-go/glm-5``google\|github-copilot\|opencode/gemini-3.1-pro` |
| **Metis** | `claude-opus-4-6` | `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)``openai\|github-copilot\|opencode/gpt-5.4 (high)``opencode-go/glm-5``kimi-for-coding/k2p5` |
| **Momus** | `gpt-5.4` | `openai\|github-copilot\|opencode/gpt-5.4 (xhigh)``anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)``google\|github-copilot\|opencode/gemini-3.1-pro (high)``opencode-go/glm-5` |
| **Atlas** | `claude-sonnet-4-6` | `anthropic\|github-copilot\|opencode/claude-sonnet-4-6``opencode-go/kimi-k2.5``openai\|github-copilot\|opencode/gpt-5.4 (medium)``opencode-go/minimax-m2.7` |
| **Sisyphus** | `claude-opus-4-6` | `claude-opus-4-6``glm-5``big-pickle` |
| **Hephaestus** | `gpt-5.3-codex` | `gpt-5.3-codex``gpt-5.4` (GitHub Copilot fallback) |
| **oracle** | `gpt-5.4` | `gpt-5.4``gemini-3.1-pro``claude-opus-4-6` |
| **librarian** | `minimax-m2.7` | `minimax-m2.7``minimax-m2.7-highspeed``claude-haiku-4-5``gpt-5-nano` |
| **explore** | `grok-code-fast-1` | `grok-code-fast-1``minimax-m2.7-highspeed``minimax-m2.7``claude-haiku-4-5``gpt-5-nano` |
| **multimodal-looker** | `gpt-5.3-codex` | `gpt-5.3-codex``k2p5``gemini-3-flash``glm-4.6v``gpt-5-nano` |
| **Prometheus** | `claude-opus-4-6` | `claude-opus-4-6``gpt-5.4``gemini-3.1-pro` |
| **Metis** | `claude-opus-4-6` | `claude-opus-4-6``gpt-5.4``gemini-3.1-pro` |
| **Momus** | `gpt-5.4` | `gpt-5.4``claude-opus-4-6``gemini-3.1-pro` |
| **Atlas** | `claude-sonnet-4-6` | `claude-sonnet-4-6``gpt-5.4` |
#### Category Provider Chains
| Category | Default Model | Provider Priority |
| ---------------------- | ------------------- | -------------------------------------------------------------- |
| **visual-engineering** | `gemini-3.1-pro` | `google\|github-copilot\|opencode/gemini-3.1-pro (high)``zai-coding-plan\|opencode/glm-5``anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)``opencode-go/glm-5``kimi-for-coding/k2p5` |
| **ultrabrain** | `gpt-5.4` | `openai\|opencode/gpt-5.4 (xhigh)``google\|github-copilot\|opencode/gemini-3.1-pro (high)``anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)``opencode-go/glm-5` |
| **deep** | `gpt-5.3-codex` | `openai\|opencode/gpt-5.3-codex (medium)``anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)``google\|github-copilot\|opencode/gemini-3.1-pro (high)` |
| **artistry** | `gemini-3.1-pro` | `google\|github-copilot\|opencode/gemini-3.1-pro (high)``anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)``openai\|github-copilot\|opencode/gpt-5.4` |
| **quick** | `gpt-5.4-mini` | `openai\|github-copilot\|opencode/gpt-5.4-mini``anthropic\|github-copilot\|opencode/claude-haiku-4-5``google\|github-copilot\|opencode/gemini-3-flash``opencode-go/minimax-m2.7``opencode/gpt-5-nano` |
| **unspecified-low** | `claude-sonnet-4-6` | `anthropic\|github-copilot\|opencode/claude-sonnet-4-6``openai\|opencode/gpt-5.3-codex (medium)``opencode-go/kimi-k2.5``google\|github-copilot\|opencode/gemini-3-flash``opencode-go/minimax-m2.7` |
| **unspecified-high** | `claude-opus-4-6` | `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)``openai\|github-copilot\|opencode/gpt-5.4 (high)``zai-coding-plan\|opencode/glm-5``kimi-for-coding/k2p5``opencode-go/glm-5``opencode/kimi-k2.5``opencode\|moonshotai\|moonshotai-cn\|firmware\|ollama-cloud\|aihubmix/kimi-k2.5` |
| **writing** | `gemini-3-flash` | `google\|github-copilot\|opencode/gemini-3-flash``opencode-go/kimi-k2.5``anthropic\|github-copilot\|opencode/claude-sonnet-4-6``opencode-go/minimax-m2.7` |
| **visual-engineering** | `gemini-3.1-pro` | `gemini-3.1-pro``glm-5``claude-opus-4-6` |
| **ultrabrain** | `gpt-5.4` | `gpt-5.4``gemini-3.1-pro``claude-opus-4-6` |
| **deep** | `gpt-5.3-codex` | `gpt-5.3-codex``claude-opus-4-6``gemini-3.1-pro` |
| **artistry** | `gemini-3.1-pro` | `gemini-3.1-pro``claude-opus-4-6``gpt-5.4` |
| **quick** | `gpt-5.4-mini` | `gpt-5.4-mini``claude-haiku-4-5``gemini-3-flash``minimax-m2.7``gpt-5-nano` |
| **unspecified-low** | `claude-sonnet-4-6` | `claude-sonnet-4-6``gpt-5.3-codex``gemini-3-flash``minimax-m2.7` |
| **unspecified-high** | `claude-opus-4-6` | `claude-opus-4-6``gpt-5.4 (high)``glm-5``k2p5``kimi-k2.5` |
| **writing** | `gemini-3-flash` | `gemini-3-flash``claude-sonnet-4-6``minimax-m2.7` |
Run `bunx oh-my-opencode doctor --verbose` to see effective model resolution for your config.
Run `bunx oh-my-openagent doctor --verbose` to see effective model resolution for your config.
---
@@ -513,10 +425,9 @@ Available hooks: `todo-continuation-enforcer`, `context-window-monitor`, `sessio
**Notes:**
- `directory-agents-injector` - auto-disabled on OpenCode 1.1.37+ (native AGENTS.md support)
- `no-sisyphus-gpt` - **do not disable**. It blocks incompatible GPT models for Sisyphus while allowing the dedicated GPT-5.4 prompt path.
- `directory-agents-injector` auto-disabled on OpenCode 1.1.37+ (native AGENTS.md support)
- `no-sisyphus-gpt` **do not disable**. It blocks incompatible GPT models for Sisyphus while allowing the dedicated GPT-5.4 prompt path.
- `startup-toast` is a sub-feature of `auto-update-checker`. Disable just the toast by adding `startup-toast` to `disabled_hooks`.
- `session-recovery` - automatically recovers from recoverable session errors (missing tool results, unavailable tools, thinking block violations). Shows toast notifications during recovery. Enable `experimental.auto_resume` for automatic retry after recovery.
### Commands
@@ -593,7 +504,7 @@ Force-enable session notifications:
{ "notification": { "force_enable": true } }
```
`force_enable` (`false`) - force session-notification even if external notification plugins are detected.
`force_enable` (`false`) force session-notification even if external notification plugins are detected.
### MCPs
@@ -679,233 +590,12 @@ Define `fallback_models` per agent or category:
"agents": {
"sisyphus": {
"model": "anthropic/claude-opus-4-6",
"fallback_models": [
"openai/gpt-5.4",
{
"model": "google/gemini-3.1-pro",
"variant": "high"
}
]
"fallback_models": ["openai/gpt-5.4", "google/gemini-3.1-pro"]
}
}
}
```
`fallback_models` also supports object-style entries so you can attach settings to a specific fallback model:
```json
{
"agents": {
"sisyphus": {
"model": "anthropic/claude-opus-4-6",
"fallback_models": [
"openai/gpt-5.4",
{
"model": "anthropic/claude-sonnet-4-6",
"variant": "high",
"thinking": { "type": "enabled", "budgetTokens": 12000 }
},
{
"model": "openai/gpt-5.3-codex",
"reasoningEffort": "high",
"temperature": 0.2,
"top_p": 0.95,
"maxTokens": 8192
}
]
}
}
}
```
Mixed arrays are allowed, so string entries and object entries can appear together in the same fallback chain.
#### Object-style `fallback_models`
Object entries use the following shape:
| Field | Type | Description |
| ----- | ---- | ----------- |
| `model` | string | Fallback model ID. Provider prefix is optional when OmO can inherit the current/default provider. |
| `variant` | string | Explicit variant override for this fallback entry. |
| `reasoningEffort` | string | OpenAI reasoning effort override for this fallback entry. |
| `temperature` | number | Temperature applied if this fallback model becomes active. |
| `top_p` | number | Top-p applied if this fallback model becomes active. |
| `maxTokens` | number | Max response tokens applied if this fallback model becomes active. |
| `thinking` | object | Anthropic thinking config applied if this fallback model becomes active. |
Per-model settings are **fallback-only**. They are promoted only when that specific fallback model is actually selected, so they do not override your primary model settings when the primary model resolves successfully.
`thinking` uses the same shape as the normal agent/category option:
| Field | Type | Description |
| ----- | ---- | ----------- |
| `type` | string | `enabled` or `disabled` |
| `budgetTokens` | number | Optional Anthropic thinking budget |
Object entries can also omit the provider prefix when OmO can infer it from the current/default provider. If you provide both inline variant syntax in `model` and an explicit `variant` field, the explicit `variant` field wins.
#### Full examples
**1. Simple string chain**
Use strings when you only need an ordered fallback chain:
```json
{
"agents": {
"atlas": {
"model": "anthropic/claude-sonnet-4-6",
"fallback_models": [
"anthropic/claude-haiku-4-5",
"openai/gpt-5.4",
"google/gemini-3.1-pro"
]
}
}
}
```
**2. Same-provider shorthand**
If the primary model already establishes the provider, fallback entries can omit the prefix:
```json
{
"agents": {
"atlas": {
"model": "openai/gpt-5.4",
"fallback_models": [
"gpt-5.4-mini",
{
"model": "gpt-5.3-codex",
"reasoningEffort": "medium",
"maxTokens": 4096
}
]
}
}
}
```
In this example OmO treats `gpt-5.4-mini` and `gpt-5.3-codex` as OpenAI fallback entries because the current/default provider is already `openai`.
**3. Mixed cross-provider chain**
Mix string entries and object entries when only some fallback models need special settings:
```json
{
"agents": {
"sisyphus": {
"model": "anthropic/claude-opus-4-6",
"fallback_models": [
"openai/gpt-5.4",
{
"model": "anthropic/claude-sonnet-4-6",
"variant": "high",
"thinking": { "type": "enabled", "budgetTokens": 12000 }
},
{
"model": "google/gemini-3.1-pro",
"variant": "high"
}
]
}
}
}
```
**4. Category-level fallback chain**
`fallback_models` works the same way under `categories`:
```json
{
"categories": {
"deep": {
"model": "openai/gpt-5.3-codex",
"fallback_models": [
{
"model": "openai/gpt-5.4",
"reasoningEffort": "xhigh",
"maxTokens": 12000
},
{
"model": "anthropic/claude-opus-4-6",
"variant": "max",
"temperature": 0.2
},
"google/gemini-3.1-pro(high)"
]
}
}
}
```
**5. Full object entry with every supported field**
This shows every supported object-style parameter in one place:
```json
{
"agents": {
"oracle": {
"model": "openai/gpt-5.4",
"fallback_models": [
{
"model": "openai/gpt-5.3-codex(low)",
"variant": "xhigh",
"reasoningEffort": "high",
"temperature": 0.3,
"top_p": 0.9,
"maxTokens": 8192,
"thinking": {
"type": "disabled"
}
}
]
}
}
}
```
In this example the explicit `"variant": "xhigh"` overrides the inline `(low)` suffix in `"model"`.
This final example is a **complete shape reference**. In real configs, prefer provider-appropriate settings:
- use `reasoningEffort` for OpenAI reasoning models
- use `thinking` for Anthropic thinking-capable models
- use `variant`, `temperature`, `top_p`, and `maxTokens` only when that fallback model supports them
### Model Capabilities
OmO can refresh a local models.dev capability snapshot on startup. This cache is controlled by `model_capabilities`.
```jsonc
{
"model_capabilities": {
"enabled": true,
"auto_refresh_on_start": true,
"refresh_timeout_ms": 5000,
"source_url": "https://models.dev/api.json"
}
}
```
| Option | Default behavior | Description |
| ------ | ---------------- | ----------- |
| `enabled` | enabled unless explicitly set to `false` | Master switch for model capability refresh behavior |
| `auto_refresh_on_start` | refresh on startup unless explicitly set to `false` | Refresh the local models.dev cache during startup checks |
| `refresh_timeout_ms` | `5000` | Timeout for the startup refresh attempt |
| `source_url` | `https://models.dev/api.json` | Override the models.dev source URL |
Notes:
- Startup refresh runs through the auto-update checker hook.
- Manual refresh is available via `bunx oh-my-opencode refresh-model-capabilities`.
- Provider runtime metadata still takes priority when OmO resolves capabilities for compatibility checks.
### Hashline Edit
Replaces the built-in `Edit` tool with a hash-anchored version using `LINE#ID` references to prevent stale-line edits. Disabled by default.
@@ -925,7 +615,7 @@ When enabled, two companion hooks are active: `hashline-read-enhancer` (annotate
"aggressive_truncation": false,
"auto_resume": false,
"disable_omo_env": false,
"task_system": true,
"task_system": false,
"dynamic_context_pruning": {
"enabled": false,
"notification": "detailed",
@@ -955,7 +645,7 @@ When enabled, two companion hooks are active: `hashline-read-enhancer` (annotate
| `aggressive_truncation` | `false` | Aggressively truncate when token limit exceeded |
| `auto_resume` | `false` | Auto-resume after thinking block recovery |
| `disable_omo_env` | `false` | Disable auto-injected `<omo-env>` block (date/time/locale). Improves cache hit rate. |
| `task_system` | `true` | Enable Sisyphus task system |
| `task_system` | `false` | Enable Sisyphus task system |
| `dynamic_context_pruning.enabled` | `false` | Auto-prune old tool outputs to manage context window |
| `dynamic_context_pruning.notification` | `detailed` | Pruning notifications: `off` / `minimal` / `detailed` |
| `turn_protection.turns` | `3` | Recent turns protected from pruning (110) |

View File

@@ -6,30 +6,29 @@ Oh-My-OpenAgent provides 11 specialized AI agents. Each has distinct expertise,
### Core Agents
Core-agent tab cycling is deterministic via injected runtime order field. The fixed priority order is Sisyphus (order: 1), Hephaestus (order: 2), Prometheus (order: 3), and Atlas (order: 4). Remaining agents follow after that stable core ordering.
| Agent | Model | Purpose |
| --------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **Sisyphus** | `claude-opus-4-6` | The default orchestrator. Plans, delegates, and executes complex tasks using specialized subagents with aggressive parallel execution. Todo-driven workflow with extended thinking (32k budget). Fallback: `opencode-go/kimi-k2.5``kimi-for-coding/k2p5``opencode\|moonshotai\|moonshotai-cn\|firmware\|ollama-cloud\|aihubmix/kimi-k2.5``openai\|github-copilot\|opencode/gpt-5.4 (medium)``zai-coding-plan\|opencode/glm-5``opencode/big-pickle`. |
| **Hephaestus** | `gpt-5.4` | The Legitimate Craftsman. Autonomous deep worker inspired by AmpCode's deep mode. Goal-oriented execution with thorough research before action. Explores codebase patterns, completes tasks end-to-end without premature stopping. Named after the Greek god of forge and craftsmanship. Requires a GPT-capable provider. |
| **Oracle** | `gpt-5.4` | Architecture decisions, code review, debugging. Read-only consultation with stellar logical reasoning and deep analysis. Inspired by AmpCode. Fallback: `google\|github-copilot\|opencode/gemini-3.1-pro (high)``anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)``opencode-go/glm-5`. |
| **Librarian** | `minimax-m2.7` | Multi-repo analysis, documentation lookup, OSS implementation examples. Deep codebase understanding with evidence-based answers. Fallback: `opencode/minimax-m2.7-highspeed``anthropic\|opencode/claude-haiku-4-5``opencode/gpt-5-nano`. |
| **Explore** | `grok-code-fast-1` | Fast codebase exploration and contextual grep. Fallback: `opencode-go/minimax-m2.7-highspeed``opencode/minimax-m2.7``anthropic\|opencode/claude-haiku-4-5``opencode/gpt-5-nano`. |
| **Multimodal-Looker** | `gpt-5.4` | Visual content specialist. Analyzes PDFs, images, diagrams to extract information. Fallback: `opencode-go/kimi-k2.5``zai-coding-plan/glm-4.6v``openai\|github-copilot\|opencode/gpt-5-nano`. |
| **Sisyphus** | `claude-opus-4-6` | The default orchestrator. Plans, delegates, and executes complex tasks using specialized subagents with aggressive parallel execution. Todo-driven workflow with extended thinking (32k budget). Fallback: `glm-5``big-pickle`. |
| **Hephaestus** | `gpt-5.3-codex` | The Legitimate Craftsman. Autonomous deep worker inspired by AmpCode's deep mode. Goal-oriented execution with thorough research before action. Explores codebase patterns, completes tasks end-to-end without premature stopping. Named after the Greek god of forge and craftsmanship. Fallback: `gpt-5.4` on GitHub Copilot. Requires a GPT-capable provider. |
| **Oracle** | `gpt-5.4` | Architecture decisions, code review, debugging. Read-only consultation with stellar logical reasoning and deep analysis. Inspired by AmpCode. Fallback: `gemini-3.1-pro``claude-opus-4-6`. |
| **Librarian** | `minimax-m2.7` | Multi-repo analysis, documentation lookup, OSS implementation examples. Deep codebase understanding with evidence-based answers. Fallback: `minimax-m2.7-highspeed``claude-haiku-4-5``gpt-5-nano`. |
| **Explore** | `grok-code-fast-1` | Fast codebase exploration and contextual grep. Fallback: `minimax-m2.7-highspeed``minimax-m2.7``claude-haiku-4-5``gpt-5-nano`. |
| **Multimodal-Looker** | `gpt-5.3-codex` | Visual content specialist. Analyzes PDFs, images, diagrams to extract information. Fallback: `k2p5``gemini-3-flash``glm-4.6v``gpt-5-nano`. |
### Planning Agents
| Agent | Model | Purpose |
| -------------- | ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- |
| **Prometheus** | `claude-opus-4-6` | Strategic planner with interview mode. Creates detailed work plans through iterative questioning. Fallback: `openai\|github-copilot\|opencode/gpt-5.4 (high)``opencode-go/glm-5``google\|github-copilot\|opencode/gemini-3.1-pro`. |
| **Metis** | `claude-opus-4-6` | Plan consultant — pre-planning analysis. Identifies hidden intentions, ambiguities, and AI failure points. Fallback: `openai\|github-copilot\|opencode/gpt-5.4 (high)``opencode-go/glm-5``kimi-for-coding/k2p5`. |
| **Momus** | `gpt-5.4` | Plan reviewer — validates plans against clarity, verifiability, and completeness standards. Fallback: `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)``google\|github-copilot\|opencode/gemini-3.1-pro (high)``opencode-go/glm-5`. |
| **Prometheus** | `claude-opus-4-6` | Strategic planner with interview mode. Creates detailed work plans through iterative questioning. Fallback: `gpt-5.4``gemini-3.1-pro`. |
| **Metis** | `claude-opus-4-6` | Plan consultant — pre-planning analysis. Identifies hidden intentions, ambiguities, and AI failure points. Fallback: `gpt-5.4``gemini-3.1-pro`. |
| **Momus** | `gpt-5.4` | Plan reviewer — validates plans against clarity, verifiability, and completeness standards. Fallback: `claude-opus-4-6``gemini-3.1-pro`. |
### Orchestration Agents
| Agent | Model | Purpose |
| ------------------- | ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **Atlas** | `claude-sonnet-4-6` | Todo-list orchestrator. Executes planned tasks systematically, managing todo items and coordinating work. Fallback: `opencode-go/kimi-k2.5``openai\|github-copilot\|opencode/gpt-5.4 (medium)``opencode-go/minimax-m2.7`. |
| **Sisyphus-Junior** | _(category-dependent)_ | Category-spawned executor. Model is selected automatically based on the task category (visual-engineering, quick, deep, etc.). Its built-in general fallback chain is `anthropic\|github-copilot\|opencode/claude-sonnet-4-6``opencode-go/kimi-k2.5``openai\|github-copilot\|opencode/gpt-5.4 (medium)``opencode-go/minimax-m2.7``opencode/big-pickle`. |
| **Atlas** | `claude-sonnet-4-6` | Todo-list orchestrator. Executes planned tasks systematically, managing todo items and coordinating work. Fallback: `gpt-5.4` (medium). |
| **Sisyphus-Junior** | _(category-dependent)_ | Category-spawned executor. Model is selected automatically based on the task category (visual-engineering, quick, deep, etc.). Used when the main agent delegates work via the `task` tool. |
### Invoking Agents
@@ -90,9 +89,8 @@ When running inside tmux:
- Watch multiple agents work in real-time
- Each pane shows agent output live
- Auto-cleanup when agents complete
- **Stable agent ordering**: core-agent tab cycling is deterministic via injected runtime order field (Sisyphus: 1, Hephaestus: 2, Prometheus: 3, Atlas: 4)
Customize agent models, prompts, and permissions in `oh-my-opencode.jsonc`.
Customize agent models, prompts, and permissions in `oh-my-openagent.json`.
## Category System
@@ -131,7 +129,7 @@ task({
### Custom Categories
You can define custom categories in your plugin config file. During the rename transition, both `oh-my-openagent.json[c]` and legacy `oh-my-opencode.json[c]` basenames are recognized.
You can define custom categories in `oh-my-openagent.json`.
#### Category Configuration Schema
@@ -190,75 +188,6 @@ When you use a Category, a special agent called **Sisyphus-Junior** performs the
- **Characteristic**: Cannot **re-delegate** tasks to other agents.
- **Purpose**: Prevents infinite delegation loops and ensures focus on the assigned task.
## Advanced Configuration
### Rename Compatibility
The published package and binary remain `oh-my-opencode`. Inside `opencode.json`, the compatibility layer now prefers the plugin entry `oh-my-openagent`, while legacy `oh-my-opencode` entries still load with a warning. Plugin config files (`oh-my-openagent.json[c]` or legacy `oh-my-opencode.json[c]`) are recognized during the transition. Run `bunx oh-my-opencode doctor` to check for legacy package name warnings.
### Fallback Models
Configure per-agent fallback chains with arrays that can mix plain model strings and per-model objects:
```jsonc
{
"agents": {
"sisyphus": {
"fallback_models": [
"opencode/glm-5",
{ "model": "openai/gpt-5.4", "variant": "high" },
{ "model": "anthropic/claude-sonnet-4-6", "thinking": { "type": "enabled", "budgetTokens": 64000 } }
]
}
}
}
```
When a model errors, the runtime can move through the configured fallback array. Object entries let you tune the backup model itself instead of only swapping the model name.
### File-Based Prompts
Load agent system prompts from external files using `file://` URLs in the `prompt` field, or append additional content with `prompt_append`. The `prompt_append` field also works on categories.
```jsonc
{
"agents": {
"sisyphus": {
"prompt": "file:///path/to/custom-prompt.md"
},
"oracle": {
"prompt_append": "file:///path/to/additional-context.md"
}
},
"categories": {
"deep": {
"prompt_append": "file:///path/to/deep-category-append.md"
}
}
}
```
Supports `~` expansion for home directory and relative `file://` paths.
Useful for:
- Version controlling prompts separately from config
- Sharing prompts across projects
- Keeping configuration files concise
- Adding category-specific context without duplicating base prompts
The file content is loaded at runtime and injected into the agent's system prompt.
### Session Recovery
The system automatically recovers from common session failures without user intervention:
- **Missing tool results**: reconstructs recoverable tool state and skips invalid tool-part IDs instead of failing the whole recovery pass
- **Thinking block violations**: Recovers from API thinking block mismatches
- **Empty messages**: Reconstructs message history when content is missing
- **Context window limits**: Gracefully handles Claude context window exceeded errors with intelligent compaction
- **JSON parse errors**: Recovers from malformed tool outputs
Recovery happens transparently during agent execution. You see the result, not the failure.
## Skills
Skills provide specialized workflows with embedded MCP servers and detailed instructions. A Skill is a mechanism that injects **specialized knowledge (Context)** and **tools (MCP)** for specific domains into agents.
@@ -915,41 +844,9 @@ When a skill MCP has `oauth` configured:
Pre-authenticate via CLI:
```bash
bunx oh-my-opencode mcp oauth login <server-name> --server-url https://api.example.com
bunx oh-my-openagent mcp oauth login <server-name> --server-url https://api.example.com
```
## Model Capabilities
Model capabilities are models.dev-backed, with a refreshable cache and compatibility diagnostics. The system combines bundled models.dev snapshot data, optional refreshed cache data, provider runtime metadata, and heuristics when exact metadata is unavailable.
### Refreshing Capabilities
Update the local cache with the latest model information:
```bash
bunx oh-my-opencode refresh-model-capabilities
```
Configure automatic refresh at startup:
```jsonc
{
"model_capabilities": {
"enabled": true,
"auto_refresh_on_start": true,
"refresh_timeout_ms": 5000,
"source_url": "https://models.dev/api.json"
}
}
```
### Capability Diagnostics
Run `bunx oh-my-opencode doctor` to see capability diagnostics including:
- effective model resolution for agents and categories
- warnings when configured models rely on compatibility fallback
- override compatibility details alongside model resolution output
## Context Injection
### Directory AGENTS.md

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode",
"version": "3.14.0",
"version": "3.11.0",
"description": "The Best AI Agent Harness - Batteries-Included OpenCode Plugin with Multi-Model Orchestration, Parallel Background Agents, and Crafted LSP/AST Tools",
"main": "dist/index.js",
"types": "dist/index.d.ts",
@@ -78,17 +78,17 @@
"typescript": "^5.7.3"
},
"optionalDependencies": {
"oh-my-opencode-darwin-arm64": "3.14.0",
"oh-my-opencode-darwin-x64": "3.14.0",
"oh-my-opencode-darwin-x64-baseline": "3.14.0",
"oh-my-opencode-linux-arm64": "3.14.0",
"oh-my-opencode-linux-arm64-musl": "3.14.0",
"oh-my-opencode-linux-x64": "3.14.0",
"oh-my-opencode-linux-x64-baseline": "3.14.0",
"oh-my-opencode-linux-x64-musl": "3.14.0",
"oh-my-opencode-linux-x64-musl-baseline": "3.14.0",
"oh-my-opencode-windows-x64": "3.14.0",
"oh-my-opencode-windows-x64-baseline": "3.14.0"
"oh-my-opencode-darwin-arm64": "3.11.0",
"oh-my-opencode-darwin-x64": "3.11.0",
"oh-my-opencode-darwin-x64-baseline": "3.11.0",
"oh-my-opencode-linux-arm64": "3.11.0",
"oh-my-opencode-linux-arm64-musl": "3.11.0",
"oh-my-opencode-linux-x64": "3.11.0",
"oh-my-opencode-linux-x64-baseline": "3.11.0",
"oh-my-opencode-linux-x64-musl": "3.11.0",
"oh-my-opencode-linux-x64-musl-baseline": "3.11.0",
"oh-my-opencode-windows-x64": "3.11.0",
"oh-my-opencode-windows-x64-baseline": "3.11.0"
},
"overrides": {
"@opencode-ai/sdk": "^1.2.24"

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-darwin-arm64",
"version": "3.14.0",
"version": "3.11.0",
"description": "Platform-specific binary for oh-my-opencode (darwin-arm64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-darwin-x64-baseline",
"version": "3.14.0",
"version": "3.11.0",
"description": "Platform-specific binary for oh-my-opencode (darwin-x64-baseline, no AVX2)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-darwin-x64",
"version": "3.14.0",
"version": "3.11.0",
"description": "Platform-specific binary for oh-my-opencode (darwin-x64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-arm64-musl",
"version": "3.14.0",
"version": "3.11.0",
"description": "Platform-specific binary for oh-my-opencode (linux-arm64-musl)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-arm64",
"version": "3.14.0",
"version": "3.11.0",
"description": "Platform-specific binary for oh-my-opencode (linux-arm64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64-baseline",
"version": "3.14.0",
"version": "3.11.0",
"description": "Platform-specific binary for oh-my-opencode (linux-x64-baseline, no AVX2)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64-musl-baseline",
"version": "3.14.0",
"version": "3.11.0",
"description": "Platform-specific binary for oh-my-opencode (linux-x64-musl-baseline, no AVX2)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64-musl",
"version": "3.14.0",
"version": "3.11.0",
"description": "Platform-specific binary for oh-my-opencode (linux-x64-musl)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64",
"version": "3.14.0",
"version": "3.11.0",
"description": "Platform-specific binary for oh-my-opencode (linux-x64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-windows-x64-baseline",
"version": "3.14.0",
"version": "3.11.0",
"description": "Platform-specific binary for oh-my-opencode (windows-x64-baseline, no AVX2)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-windows-x64",
"version": "3.14.0",
"version": "3.11.0",
"description": "Platform-specific binary for oh-my-opencode (windows-x64)",
"license": "MIT",
"repository": {

View File

@@ -1,7 +1,6 @@
// postinstall.mjs
// Runs after npm install to verify platform binary is available
import { readFileSync } from "node:fs";
import { createRequire } from "node:module";
import { getPlatformPackageCandidates, getBinaryPath } from "./bin/platform.js";
@@ -23,26 +22,15 @@ function getLibcFamily() {
}
}
function getPackageBaseName() {
try {
const packageJson = JSON.parse(readFileSync(new URL("./package.json", import.meta.url), "utf8"));
return packageJson.name || "oh-my-opencode";
} catch {
return "oh-my-opencode";
}
}
function main() {
const { platform, arch } = process;
const libcFamily = getLibcFamily();
const packageBaseName = getPackageBaseName();
try {
const packageCandidates = getPlatformPackageCandidates({
platform,
arch,
libcFamily,
packageBaseName,
});
const resolvedPackage = packageCandidates.find((pkg) => {

View File

@@ -34,72 +34,6 @@ async function generateChangelog(previousTag: string): Promise<string[]> {
return notes
}
async function getChangedFiles(previousTag: string): Promise<string[]> {
try {
const diff = await $`git diff --name-only ${previousTag}..HEAD`.text()
return diff
.split("\n")
.map((line) => line.trim())
.filter(Boolean)
} catch {
return []
}
}
function touchesAnyPath(files: string[], candidates: string[]): boolean {
return files.some((file) => candidates.some((candidate) => file === candidate || file.startsWith(`${candidate}/`)))
}
function buildReleaseFraming(files: string[]): string[] {
const bullets: string[] = []
if (
touchesAnyPath(files, [
"src/index.ts",
"src/plugin-config.ts",
"bin/platform.js",
"postinstall.mjs",
"docs",
])
) {
bullets.push("Rename transition updates across package detection, plugin/config compatibility, and install surfaces.")
}
if (touchesAnyPath(files, ["src/tools/delegate-task", "src/plugin/tool-registry.ts"])) {
bullets.push("Task and tool behavior updates, including delegate-task contract and runtime registration behavior.")
}
if (
touchesAnyPath(files, [
"src/plugin/tool-registry.ts",
"src/plugin-handlers/agent-config-handler.ts",
"src/plugin-handlers/tool-config-handler.ts",
"src/hooks/tasks-todowrite-disabler",
])
) {
bullets.push("Task-system default behavior alignment so omitted configuration behaves consistently across runtime paths.")
}
if (touchesAnyPath(files, [".github/workflows", "docs/guide/installation.md", "postinstall.mjs"])) {
bullets.push("Install and publish workflow hardening, including safer release sequencing and package/install fixes.")
}
if (bullets.length === 0) {
return []
}
return [
"## Minor Compatibility and Stability Release",
"",
"This release carries compatibility-facing behavior changes and operational hardening. Read the summary below before upgrading or publishing.",
"",
...bullets.map((bullet) => `- ${bullet}`),
"",
"## Commit Summary",
"",
]
}
async function getContributors(previousTag: string): Promise<string[]> {
const notes: string[] = []
@@ -144,11 +78,9 @@ async function main() {
process.exit(0)
}
const changedFiles = await getChangedFiles(previousTag)
const changelog = await generateChangelog(previousTag)
const contributors = await getContributors(previousTag)
const framing = buildReleaseFraming(changedFiles)
const notes = [...framing, ...changelog, ...contributors]
const notes = [...changelog, ...contributors]
if (notes.length === 0) {
console.log("No notable changes")

View File

@@ -2327,78 +2327,6 @@
"created_at": "2026-03-25T23:11:32Z",
"repoId": 1108837393,
"pullRequestNo": 2840
},
{
"name": "kuitos",
"id": 5206843,
"comment_id": 4133207953,
"created_at": "2026-03-26T09:55:49Z",
"repoId": 1108837393,
"pullRequestNo": 2833
},
{
"name": "Jholly2008",
"id": 29773273,
"comment_id": 4139918265,
"created_at": "2026-03-27T03:37:00Z",
"repoId": 1108837393,
"pullRequestNo": 2871
},
{
"name": "WhiteGiverMa",
"id": 152406589,
"comment_id": 4140294245,
"created_at": "2026-03-27T05:26:37Z",
"repoId": 1108837393,
"pullRequestNo": 2877
},
{
"name": "codivedev",
"id": 249558739,
"comment_id": 4142164072,
"created_at": "2026-03-27T12:11:45Z",
"repoId": 1108837393,
"pullRequestNo": 2888
},
{
"name": "AlexDochioiu",
"id": 38853913,
"comment_id": 4147980685,
"created_at": "2026-03-28T12:20:42Z",
"repoId": 1108837393,
"pullRequestNo": 2916
},
{
"name": "ryandielhenn",
"id": 35785891,
"comment_id": 4148508024,
"created_at": "2026-03-28T17:46:50Z",
"repoId": 1108837393,
"pullRequestNo": 2919
},
{
"name": "lorenzo-dallamuta",
"id": 66994937,
"comment_id": 4148848505,
"created_at": "2026-03-28T21:30:15Z",
"repoId": 1108837393,
"pullRequestNo": 2925
},
{
"name": "quangtran88",
"id": 107824159,
"comment_id": 4149327240,
"created_at": "2026-03-29T03:21:39Z",
"repoId": 1108837393,
"pullRequestNo": 2929
},
{
"name": "HOYALIM",
"id": 166576253,
"comment_id": 4149626853,
"created_at": "2026-03-29T07:31:36Z",
"repoId": 1108837393,
"pullRequestNo": 2935
}
]
}

View File

@@ -11,7 +11,7 @@ Agent factories following `createXXXAgent(model) → AgentConfig` pattern. Each
| Agent | Model | Temp | Mode | Fallback Chain | Purpose |
|-------|-------|------|------|----------------|---------|
| **Sisyphus** | claude-opus-4-6 max | 0.1 | all | k2p5 → kimi-k2.5 → gpt-5.4 medium → glm-5 → big-pickle | Main orchestrator, plans + delegates |
| **Hephaestus** | gpt-5.4 medium | 0.1 | all | | Autonomous deep worker |
| **Hephaestus** | gpt-5.3-codex medium | 0.1 | all | gpt-5.4 medium (copilot) | Autonomous deep worker |
| **Oracle** | gpt-5.4 high | 0.1 | subagent | gemini-3.1-pro high → claude-opus-4-6 max | Read-only consultation |
| **Librarian** | minimax-m2.7 | 0.1 | subagent | minimax-m2.7-highspeed → claude-haiku-4-5 → gpt-5-nano | External docs/code search |
| **Explore** | grok-code-fast-1 | 0.1 | subagent | minimax-m2.7-highspeed → minimax-m2.7 → claude-haiku-4-5 → gpt-5-nano | Contextual grep |

View File

@@ -78,16 +78,12 @@ export function collectPendingBuiltinAgents(input: {
})
if (!resolution) {
if (override?.model) {
// User explicitly configured a model but resolution failed (e.g., cold cache).
// Honor the user's choice directly instead of falling back to hardcoded chain.
log("[agent-registration] User-configured model not resolved, using as-is", {
log("[agent-registration] User-configured model could not be resolved, falling back", {
agent: agentName,
configuredModel: override.model,
})
resolution = { model: override.model, provenance: "override" as const }
} else {
resolution = getFirstFallbackModel(requirement)
}
resolution = getFirstFallbackModel(requirement)
}
if (!resolution) continue
const { model, variant: resolvedVariant } = resolution

View File

@@ -308,12 +308,6 @@ Briefly announce "Consulting Oracle for [reason]" before invocation.
**Collect Oracle results before your final answer. No exceptions.**
**Oracle-dependent implementation is BLOCKED until Oracle finishes.**
- If you asked Oracle for architecture/debugging direction that affects the fix, do not implement before Oracle result arrives.
- While waiting, only do non-overlapping prep work. Never ship implementation decisions Oracle was asked to decide.
- Never "time out and continue anyway" for Oracle-dependent tasks.
- Oracle takes minutes. When done with your own work: **end your response** — wait for the \`<system-reminder>\`.
- Do NOT poll \`background_output\` on a running Oracle. The notification will come.
- Never cancel Oracle.

View File

@@ -127,12 +127,6 @@ This verbalization anchors your routing decision and makes your reasoning transp
- **Open-ended** ("Improve", "Refactor", "Add feature") → Assess codebase first
- **Ambiguous** (unclear scope, multiple interpretations) → Ask ONE clarifying question
### Step 1.5: Turn-Local Intent Reset (MANDATORY)
- Reclassify intent from the CURRENT user message only. Never auto-carry "implementation mode" from prior turns.
- If current message is a question/explanation/investigation request, answer/analyze only. Do NOT create todos or edit files.
- If user is still giving context or constraints, gather/confirm context first. Do NOT start implementation yet.
### Step 2: Check for Ambiguity
- Single valid interpretation → Proceed
@@ -141,15 +135,6 @@ This verbalization anchors your routing decision and makes your reasoning transp
- Missing critical info (file, error, context) → **MUST ask**
- User's design seems flawed or suboptimal → **MUST raise concern** before implementing
### Step 2.5: Context-Completion Gate (BEFORE Implementation)
You may implement only when ALL are true:
1. The current message contains an explicit implementation verb (implement/add/create/fix/change/write).
2. Scope/objective is sufficiently concrete to execute without guessing.
3. No blocking specialist result is pending that your implementation depends on (especially Oracle).
If any condition fails, do research/clarification only, then wait.
### Step 3: Validate Before Acting
**Assumptions Check:**

View File

@@ -167,11 +167,6 @@ Complexity:
- Open-ended ("improve", "refactor") → assess codebase first, then propose
- Ambiguous (multiple interpretations with 2x+ effort difference) → ask ONE question
Turn-local reset (mandatory): classify from the CURRENT user message, not conversation momentum.
- Never carry implementation mode from prior turns.
- If current turn is question/explanation/investigation, answer or analyze only.
- If user appears to still be providing context, gather/confirm context first and wait.
Domain guess (provisional — finalized in ROUTE after exploration):
- Visual (UI, CSS, styling, layout, design, animation) → likely visual-engineering
- Logic (algorithms, architecture, complex business logic) → likely ultrabrain
@@ -189,11 +184,6 @@ Step 2 — Check before acting:
- Missing critical info → ask
- User's design seems flawed → raise concern concisely, propose alternative, ask if they want to proceed anyway
Context-completion gate before implementation:
- Implement only when the current message explicitly requests implementation (implement/add/create/fix/change/write),
scope is concrete enough to execute without guessing, and no blocking specialist result is pending.
- If any condition fails, continue with research/clarification only and wait.
<ask_gate>
Proceed unless:
(a) the action is irreversible,

View File

@@ -642,7 +642,7 @@ describe("createBuiltinAgents with requiresProvider gating (hephaestus)", () =>
// #then
expect(agents.hephaestus).toBeDefined()
expect(agents.hephaestus.model).toBe("openai/gpt-5.4")
expect(agents.hephaestus.model).toBe("openai/gpt-5.3-codex")
} finally {
cacheSpy.mockRestore()
fetchSpy.mockRestore()

View File

@@ -202,7 +202,7 @@ exports[`generateModelConfig single native provider uses OpenAI models when only
"variant": "medium",
},
"hephaestus": {
"model": "openai/gpt-5.4",
"model": "openai/gpt-5.3-codex",
"variant": "medium",
},
"librarian": {
@@ -287,7 +287,7 @@ exports[`generateModelConfig single native provider uses OpenAI models with isMa
"variant": "medium",
},
"hephaestus": {
"model": "openai/gpt-5.4",
"model": "openai/gpt-5.3-codex",
"variant": "medium",
},
"librarian": {
@@ -490,7 +490,7 @@ exports[`generateModelConfig all native providers uses preferred models from fal
"model": "anthropic/claude-haiku-4-5",
},
"hephaestus": {
"model": "openai/gpt-5.4",
"model": "openai/gpt-5.3-codex",
"variant": "medium",
},
"metis": {
@@ -565,7 +565,7 @@ exports[`generateModelConfig all native providers uses preferred models with isM
"model": "anthropic/claude-haiku-4-5",
},
"hephaestus": {
"model": "openai/gpt-5.4",
"model": "openai/gpt-5.3-codex",
"variant": "medium",
},
"metis": {
@@ -641,7 +641,7 @@ exports[`generateModelConfig fallback providers uses OpenCode Zen models when on
"model": "opencode/claude-haiku-4-5",
},
"hephaestus": {
"model": "opencode/gpt-5.4",
"model": "opencode/gpt-5.3-codex",
"variant": "medium",
},
"metis": {
@@ -716,7 +716,7 @@ exports[`generateModelConfig fallback providers uses OpenCode Zen models with is
"model": "opencode/claude-haiku-4-5",
},
"hephaestus": {
"model": "opencode/gpt-5.4",
"model": "opencode/gpt-5.3-codex",
"variant": "medium",
},
"metis": {
@@ -1049,7 +1049,7 @@ exports[`generateModelConfig mixed provider scenarios uses Claude + OpenCode Zen
"model": "anthropic/claude-haiku-4-5",
},
"hephaestus": {
"model": "opencode/gpt-5.4",
"model": "opencode/gpt-5.3-codex",
"variant": "medium",
},
"metis": {
@@ -1124,7 +1124,7 @@ exports[`generateModelConfig mixed provider scenarios uses OpenAI + Copilot comb
"model": "github-copilot/gpt-5-mini",
},
"hephaestus": {
"model": "openai/gpt-5.4",
"model": "openai/gpt-5.3-codex",
"variant": "medium",
},
"metis": {
@@ -1329,7 +1329,7 @@ exports[`generateModelConfig mixed provider scenarios uses all fallback provider
"model": "opencode/claude-haiku-4-5",
},
"hephaestus": {
"model": "github-copilot/gpt-5.4",
"model": "opencode/gpt-5.3-codex",
"variant": "medium",
},
"librarian": {
@@ -1407,7 +1407,7 @@ exports[`generateModelConfig mixed provider scenarios uses all providers togethe
"model": "anthropic/claude-haiku-4-5",
},
"hephaestus": {
"model": "openai/gpt-5.4",
"model": "openai/gpt-5.3-codex",
"variant": "medium",
},
"librarian": {
@@ -1485,7 +1485,7 @@ exports[`generateModelConfig mixed provider scenarios uses all providers with is
"model": "anthropic/claude-haiku-4-5",
},
"hephaestus": {
"model": "openai/gpt-5.4",
"model": "openai/gpt-5.3-codex",
"variant": "medium",
},
"librarian": {

View File

@@ -1,5 +1,4 @@
import color from "picocolors"
import { PLUGIN_NAME } from "../shared"
import type { InstallArgs } from "./types"
import {
addPluginToOpenCodeConfig,
@@ -33,7 +32,7 @@ export async function runCliInstaller(args: InstallArgs, version: string): Promi
}
console.log()
printInfo(
`Usage: bunx ${PLUGIN_NAME} install --no-tui --claude=<no|yes|max20> --gemini=<no|yes> --copilot=<no|yes>`,
"Usage: bunx oh-my-opencode install --no-tui --claude=<no|yes|max20> --gemini=<no|yes> --copilot=<no|yes>",
)
console.log()
return 1
@@ -66,7 +65,7 @@ export async function runCliInstaller(args: InstallArgs, version: string): Promi
const config = argsToConfig(args)
printStep(step++, totalSteps, `Adding ${PLUGIN_NAME} plugin...`)
printStep(step++, totalSteps, "Adding oh-my-opencode plugin...")
const pluginResult = await addPluginToOpenCodeConfig(version)
if (!pluginResult.success) {
printError(`Failed: ${pluginResult.error}`)
@@ -76,7 +75,7 @@ export async function runCliInstaller(args: InstallArgs, version: string): Promi
`Plugin ${isUpdate ? "verified" : "added"} ${SYMBOLS.arrow} ${color.dim(pluginResult.configPath)}`,
)
printStep(step++, totalSteps, `Writing ${PLUGIN_NAME} configuration...`)
printStep(step++, totalSteps, "Writing oh-my-opencode configuration...")
const omoResult = writeOmoConfig(config)
if (!omoResult.success) {
printError(`Failed: ${omoResult.error}`)

View File

@@ -0,0 +1,300 @@
import { describe, expect, test, mock, afterEach } from "bun:test"
import { getPluginNameWithVersion, fetchNpmDistTags, generateOmoConfig } from "./config-manager"
import type { InstallConfig } from "./types"
describe("getPluginNameWithVersion", () => {
const originalFetch = globalThis.fetch
afterEach(() => {
globalThis.fetch = originalFetch
})
test("returns @latest when current version matches latest tag", async () => {
// #given npm dist-tags with latest=2.14.0
globalThis.fetch = mock(() =>
Promise.resolve({
ok: true,
json: () => Promise.resolve({ latest: "2.14.0", beta: "3.0.0-beta.3" }),
} as Response)
) as unknown as typeof fetch
// #when current version is 2.14.0
const result = await getPluginNameWithVersion("2.14.0")
// #then should use @latest tag
expect(result).toBe("oh-my-opencode@latest")
})
test("returns @beta when current version matches beta tag", async () => {
// #given npm dist-tags with beta=3.0.0-beta.3
globalThis.fetch = mock(() =>
Promise.resolve({
ok: true,
json: () => Promise.resolve({ latest: "2.14.0", beta: "3.0.0-beta.3" }),
} as Response)
) as unknown as typeof fetch
// #when current version is 3.0.0-beta.3
const result = await getPluginNameWithVersion("3.0.0-beta.3")
// #then should use @beta tag
expect(result).toBe("oh-my-opencode@beta")
})
test("returns @next when current version matches next tag", async () => {
// #given npm dist-tags with next=3.1.0-next.1
globalThis.fetch = mock(() =>
Promise.resolve({
ok: true,
json: () => Promise.resolve({ latest: "2.14.0", beta: "3.0.0-beta.3", next: "3.1.0-next.1" }),
} as Response)
) as unknown as typeof fetch
// #when current version is 3.1.0-next.1
const result = await getPluginNameWithVersion("3.1.0-next.1")
// #then should use @next tag
expect(result).toBe("oh-my-opencode@next")
})
test("returns prerelease channel tag when no dist-tag matches prerelease version", async () => {
// #given npm dist-tags with beta=3.0.0-beta.3
globalThis.fetch = mock(() =>
Promise.resolve({
ok: true,
json: () => Promise.resolve({ latest: "2.14.0", beta: "3.0.0-beta.3" }),
} as Response)
) as unknown as typeof fetch
// #when current version is old beta 3.0.0-beta.2
const result = await getPluginNameWithVersion("3.0.0-beta.2")
// #then should preserve prerelease channel
expect(result).toBe("oh-my-opencode@beta")
})
test("returns prerelease channel tag when fetch fails", async () => {
// #given network failure
globalThis.fetch = mock(() => Promise.reject(new Error("Network error"))) as unknown as typeof fetch
// #when current version is 3.0.0-beta.3
const result = await getPluginNameWithVersion("3.0.0-beta.3")
// #then should preserve prerelease channel
expect(result).toBe("oh-my-opencode@beta")
})
test("returns bare package name when npm returns non-ok response for stable version", async () => {
// #given npm returns 404
globalThis.fetch = mock(() =>
Promise.resolve({
ok: false,
status: 404,
} as Response)
) as unknown as typeof fetch
// #when current version is 2.14.0
const result = await getPluginNameWithVersion("2.14.0")
// #then should fall back to bare package entry
expect(result).toBe("oh-my-opencode")
})
test("prioritizes latest over other tags when version matches multiple", async () => {
// #given version matches both latest and beta (during release promotion)
globalThis.fetch = mock(() =>
Promise.resolve({
ok: true,
json: () => Promise.resolve({ beta: "3.0.0", latest: "3.0.0", next: "3.1.0-alpha.1" }),
} as Response)
) as unknown as typeof fetch
// #when current version matches both
const result = await getPluginNameWithVersion("3.0.0")
// #then should prioritize @latest
expect(result).toBe("oh-my-opencode@latest")
})
})
describe("fetchNpmDistTags", () => {
const originalFetch = globalThis.fetch
afterEach(() => {
globalThis.fetch = originalFetch
})
test("returns dist-tags on success", async () => {
// #given npm returns dist-tags
globalThis.fetch = mock(() =>
Promise.resolve({
ok: true,
json: () => Promise.resolve({ latest: "2.14.0", beta: "3.0.0-beta.3" }),
} as Response)
) as unknown as typeof fetch
// #when fetching dist-tags
const result = await fetchNpmDistTags("oh-my-opencode")
// #then should return the tags
expect(result).toEqual({ latest: "2.14.0", beta: "3.0.0-beta.3" })
})
test("returns null on network failure", async () => {
// #given network failure
globalThis.fetch = mock(() => Promise.reject(new Error("Network error"))) as unknown as typeof fetch
// #when fetching dist-tags
const result = await fetchNpmDistTags("oh-my-opencode")
// #then should return null
expect(result).toBeNull()
})
test("returns null on non-ok response", async () => {
// #given npm returns 404
globalThis.fetch = mock(() =>
Promise.resolve({
ok: false,
status: 404,
} as Response)
) as unknown as typeof fetch
// #when fetching dist-tags
const result = await fetchNpmDistTags("oh-my-opencode")
// #then should return null
expect(result).toBeNull()
})
})
describe("generateOmoConfig - model fallback system", () => {
test("uses github-copilot sonnet fallback when only copilot available", () => {
// #given user has only copilot (no max plan)
const config: InstallConfig = {
hasClaude: false,
isMax20: false,
hasOpenAI: false,
hasGemini: false,
hasCopilot: true,
hasOpencodeZen: false,
hasZaiCodingPlan: false,
hasKimiForCoding: false,
}
// #when generating config
const result = generateOmoConfig(config)
// #then Sisyphus uses Copilot (OR logic - copilot is in claude-opus-4-6 providers)
expect((result.agents as Record<string, { model: string }>).sisyphus.model).toBe("github-copilot/claude-opus-4.6")
})
test("uses ultimate fallback when no providers configured", () => {
// #given user has no providers
const config: InstallConfig = {
hasClaude: false,
isMax20: false,
hasOpenAI: false,
hasGemini: false,
hasCopilot: false,
hasOpencodeZen: false,
hasZaiCodingPlan: false,
hasKimiForCoding: false,
}
// #when generating config
const result = generateOmoConfig(config)
// #then Sisyphus is omitted (requires all fallback providers)
expect(result.$schema).toBe("https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json")
expect((result.agents as Record<string, { model: string }>).sisyphus).toBeUndefined()
})
test("uses ZAI model for librarian when Z.ai is available", () => {
// #given user has Z.ai and Claude max20
const config: InstallConfig = {
hasClaude: true,
isMax20: true,
hasOpenAI: false,
hasGemini: false,
hasCopilot: false,
hasOpencodeZen: false,
hasZaiCodingPlan: true,
hasKimiForCoding: false,
}
// #when generating config
const result = generateOmoConfig(config)
// #then librarian should use ZAI model
expect((result.agents as Record<string, { model: string }>).librarian.model).toBe("zai-coding-plan/glm-4.7")
// #then Sisyphus uses Claude (OR logic)
expect((result.agents as Record<string, { model: string }>).sisyphus.model).toBe("anthropic/claude-opus-4-6")
})
test("uses native OpenAI models when only ChatGPT available", () => {
// #given user has only ChatGPT subscription
const config: InstallConfig = {
hasClaude: false,
isMax20: false,
hasOpenAI: true,
hasGemini: false,
hasCopilot: false,
hasOpencodeZen: false,
hasZaiCodingPlan: false,
hasKimiForCoding: false,
}
// #when generating config
const result = generateOmoConfig(config)
// #then Sisyphus resolves to gpt-5.4 medium (openai is now in sisyphus chain)
expect((result.agents as Record<string, { model: string; variant?: string }>).sisyphus.model).toBe("openai/gpt-5.4")
expect((result.agents as Record<string, { model: string; variant?: string }>).sisyphus.variant).toBe("medium")
// #then Oracle should use native OpenAI (first fallback entry)
expect((result.agents as Record<string, { model: string }>).oracle.model).toBe("openai/gpt-5.4")
// #then multimodal-looker should use native OpenAI (first fallback entry is gpt-5.4)
expect((result.agents as Record<string, { model: string }>)["multimodal-looker"].model).toBe("openai/gpt-5.4")
})
test("uses haiku for explore when Claude max20", () => {
// #given user has Claude max20
const config: InstallConfig = {
hasClaude: true,
isMax20: true,
hasOpenAI: false,
hasGemini: false,
hasCopilot: false,
hasOpencodeZen: false,
hasZaiCodingPlan: false,
hasKimiForCoding: false,
}
// #when generating config
const result = generateOmoConfig(config)
// #then explore should use haiku (max20 plan uses Claude quota)
expect((result.agents as Record<string, { model: string }>).explore.model).toBe("anthropic/claude-haiku-4-5")
})
test("uses haiku for explore regardless of max20 flag", () => {
// #given user has Claude but not max20
const config: InstallConfig = {
hasClaude: true,
isMax20: false,
hasOpenAI: false,
hasGemini: false,
hasCopilot: false,
hasOpencodeZen: false,
hasZaiCodingPlan: false,
hasKimiForCoding: false,
}
// #when generating config
const result = generateOmoConfig(config)
// #then explore should use haiku (isMax20 doesn't affect explore anymore)
expect((result.agents as Record<string, { model: string }>).explore.model).toBe("anthropic/claude-haiku-4-5")
})
})

View File

@@ -41,39 +41,37 @@ export async function addPluginToOpenCodeConfig(currentVersion: string): Promise
const config = parseResult.config
const plugins = config.plugin ?? []
const canonicalEntries = plugins.filter(
// Check for existing plugin (either current or legacy name)
const currentNameIndex = plugins.findIndex(
(plugin) => plugin === PLUGIN_NAME || plugin.startsWith(`${PLUGIN_NAME}@`)
)
const legacyEntries = plugins.filter(
const legacyNameIndex = plugins.findIndex(
(plugin) => plugin === LEGACY_PLUGIN_NAME || plugin.startsWith(`${LEGACY_PLUGIN_NAME}@`)
)
const otherPlugins = plugins.filter(
(plugin) => !(plugin === PLUGIN_NAME || plugin.startsWith(`${PLUGIN_NAME}@`))
&& !(plugin === LEGACY_PLUGIN_NAME || plugin.startsWith(`${LEGACY_PLUGIN_NAME}@`))
)
const normalizedPlugins = [...otherPlugins]
if (canonicalEntries.length > 0) {
normalizedPlugins.push(canonicalEntries[0])
} else if (legacyEntries.length > 0) {
const versionMatch = legacyEntries[0].match(/@(.+)$/)
const preservedVersion = versionMatch ? versionMatch[1] : null
normalizedPlugins.push(preservedVersion ? `${PLUGIN_NAME}@${preservedVersion}` : pluginEntry)
// If either name exists, update to new name
if (currentNameIndex !== -1) {
if (plugins[currentNameIndex] === pluginEntry) {
return { success: true, configPath: path }
}
plugins[currentNameIndex] = pluginEntry
} else if (legacyNameIndex !== -1) {
// Upgrade legacy name to new name
plugins[legacyNameIndex] = pluginEntry
} else {
normalizedPlugins.push(pluginEntry)
plugins.push(pluginEntry)
}
config.plugin = normalizedPlugins
config.plugin = plugins
if (format === "jsonc") {
const content = readFileSync(path, "utf-8")
const pluginArrayRegex = /((?:"plugin"|plugin)\s*:\s*)\[([\s\S]*?)\]/
const pluginArrayRegex = /"plugin"\s*:\s*\[([\s\S]*?)\]/
const match = content.match(pluginArrayRegex)
if (match) {
const formattedPlugins = normalizedPlugins.map((p) => `"${p}"`).join(",\n ")
const newContent = content.replace(pluginArrayRegex, `$1[\n ${formattedPlugins}\n ]`)
const formattedPlugins = plugins.map((p) => `"${p}"`).join(",\n ")
const newContent = content.replace(pluginArrayRegex, `"plugin": [\n ${formattedPlugins}\n ]`)
writeFileSync(path, newContent)
} else {
const newContent = content.replace(/(\{)/, `$1\n "plugin": ["${pluginEntry}"],`)

View File

@@ -1,142 +0,0 @@
/// <reference types="bun-types" />
import { describe, expect, test } from "bun:test"
import { generateOmoConfig } from "../config-manager"
import type { InstallConfig } from "../types"
describe("generateOmoConfig - model fallback system", () => {
test("uses github-copilot sonnet fallback when only copilot available", () => {
//#given
const config: InstallConfig = {
hasClaude: false,
isMax20: false,
hasOpenAI: false,
hasGemini: false,
hasCopilot: true,
hasOpencodeZen: false,
hasZaiCodingPlan: false,
hasKimiForCoding: false,
hasOpencodeGo: false,
}
//#when
const result = generateOmoConfig(config)
//#then
expect([
"github-copilot/claude-opus-4.6",
"github-copilot/claude-opus-4-6",
]).toContain((result.agents as Record<string, { model: string }>).sisyphus.model)
})
test("uses ultimate fallback when no providers configured", () => {
//#given
const config: InstallConfig = {
hasClaude: false,
isMax20: false,
hasOpenAI: false,
hasGemini: false,
hasCopilot: false,
hasOpencodeZen: false,
hasZaiCodingPlan: false,
hasKimiForCoding: false,
hasOpencodeGo: false,
}
//#when
const result = generateOmoConfig(config)
//#then
expect(result.$schema).toBe("https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json")
expect((result.agents as Record<string, { model: string }>).sisyphus).toBeUndefined()
})
test("uses ZAI model for librarian when Z.ai is available", () => {
//#given
const config: InstallConfig = {
hasClaude: true,
isMax20: true,
hasOpenAI: false,
hasGemini: false,
hasCopilot: false,
hasOpencodeZen: false,
hasZaiCodingPlan: true,
hasKimiForCoding: false,
hasOpencodeGo: false,
}
//#when
const result = generateOmoConfig(config)
//#then
expect((result.agents as Record<string, { model: string }>).librarian.model).toBe("zai-coding-plan/glm-4.7")
expect((result.agents as Record<string, { model: string }>).sisyphus.model).toBe("anthropic/claude-opus-4-6")
})
test("uses native OpenAI models when only ChatGPT available", () => {
//#given
const config: InstallConfig = {
hasClaude: false,
isMax20: false,
hasOpenAI: true,
hasGemini: false,
hasCopilot: false,
hasOpencodeZen: false,
hasZaiCodingPlan: false,
hasKimiForCoding: false,
hasOpencodeGo: false,
}
//#when
const result = generateOmoConfig(config)
//#then
expect((result.agents as Record<string, { model: string; variant?: string }>).sisyphus.model).toBe("openai/gpt-5.4")
expect((result.agents as Record<string, { model: string; variant?: string }>).sisyphus.variant).toBe("medium")
expect((result.agents as Record<string, { model: string }>).oracle.model).toBe("openai/gpt-5.4")
expect((result.agents as Record<string, { model: string }>)['multimodal-looker'].model).toBe("openai/gpt-5.4")
})
test("uses haiku for explore when Claude max20", () => {
//#given
const config: InstallConfig = {
hasClaude: true,
isMax20: true,
hasOpenAI: false,
hasGemini: false,
hasCopilot: false,
hasOpencodeZen: false,
hasZaiCodingPlan: false,
hasKimiForCoding: false,
hasOpencodeGo: false,
}
//#when
const result = generateOmoConfig(config)
//#then
expect((result.agents as Record<string, { model: string }>).explore.model).toBe("anthropic/claude-haiku-4-5")
})
test("uses haiku for explore regardless of max20 flag", () => {
//#given
const config: InstallConfig = {
hasClaude: true,
isMax20: false,
hasOpenAI: false,
hasGemini: false,
hasCopilot: false,
hasOpencodeZen: false,
hasZaiCodingPlan: false,
hasKimiForCoding: false,
hasOpencodeGo: false,
}
//#when
const result = generateOmoConfig(config)
//#then
expect((result.agents as Record<string, { model: string }>).explore.model).toBe("anthropic/claude-haiku-4-5")
})
})

View File

@@ -1,56 +0,0 @@
/// <reference types="bun-types" />
import { afterEach, describe, expect, mock, test } from "bun:test"
import { fetchNpmDistTags } from "../config-manager"
describe("fetchNpmDistTags", () => {
const originalFetch = globalThis.fetch
afterEach(() => {
globalThis.fetch = originalFetch
})
test("returns dist-tags on success", async () => {
//#given
globalThis.fetch = mock(() =>
Promise.resolve({
ok: true,
json: () => Promise.resolve({ latest: "3.13.1", beta: "3.14.0-beta.1" }),
} as Response)
) as unknown as typeof fetch
//#when
const result = await fetchNpmDistTags("oh-my-openagent")
//#then
expect(result).toEqual({ latest: "3.13.1", beta: "3.14.0-beta.1" })
})
test("returns null on network failure", async () => {
//#given
globalThis.fetch = mock(() => Promise.reject(new Error("Network error"))) as unknown as typeof fetch
//#when
const result = await fetchNpmDistTags("oh-my-openagent")
//#then
expect(result).toBeNull()
})
test("returns null on non-ok response", async () => {
//#given
globalThis.fetch = mock(() =>
Promise.resolve({
ok: false,
status: 404,
} as Response)
) as unknown as typeof fetch
//#when
const result = await fetchNpmDistTags("oh-my-openagent")
//#then
expect(result).toBeNull()
})
})

View File

@@ -28,9 +28,10 @@ describe("detectCurrentConfig - single package detection", () => {
delete process.env.OPENCODE_CONFIG_DIR
})
it("detects both legacy and canonical plugin entries", () => {
it("detects oh-my-opencode in plugin array", () => {
// given
writeFileSync(testConfigPath, JSON.stringify({ plugin: ["oh-my-opencode", "oh-my-openagent@3.11.0"] }, null, 2) + "\n", "utf-8")
const config = { plugin: ["oh-my-opencode"] }
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
// when
const result = detectCurrentConfig()
@@ -39,9 +40,58 @@ describe("detectCurrentConfig - single package detection", () => {
expect(result.isInstalled).toBe(true)
})
it("returns false when plugin not present with similar name", () => {
it("detects oh-my-opencode with version pin", () => {
// given
writeFileSync(testConfigPath, JSON.stringify({ plugin: ["oh-my-openagent-extra"] }, null, 2) + "\n", "utf-8")
const config = { plugin: ["oh-my-opencode@3.11.0"] }
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
// when
const result = detectCurrentConfig()
// then
expect(result.isInstalled).toBe(true)
})
it("detects oh-my-openagent as installed (legacy name)", () => {
// given
const config = { plugin: ["oh-my-openagent"] }
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
// when
const result = detectCurrentConfig()
// then
expect(result.isInstalled).toBe(true)
})
it("detects oh-my-openagent with version pin as installed (legacy name)", () => {
// given
const config = { plugin: ["oh-my-openagent@3.11.0"] }
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
// when
const result = detectCurrentConfig()
// then
expect(result.isInstalled).toBe(true)
})
it("returns false when plugin not present", () => {
// given
const config = { plugin: ["some-other-plugin"] }
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
// when
const result = detectCurrentConfig()
// then
expect(result.isInstalled).toBe(false)
})
it("returns false when plugin not present (even with similar name)", () => {
// given - not exactly oh-my-openagent
const config = { plugin: ["oh-my-openagent-extra"] }
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
// when
const result = detectCurrentConfig()
@@ -53,7 +103,11 @@ describe("detectCurrentConfig - single package detection", () => {
it("detects OpenCode Go from the existing omo config", () => {
// given
writeFileSync(testConfigPath, JSON.stringify({ plugin: ["oh-my-opencode"] }, null, 2) + "\n", "utf-8")
writeFileSync(testOmoConfigPath, JSON.stringify({ agents: { atlas: { model: "opencode-go/kimi-k2.5" } } }, null, 2) + "\n", "utf-8")
writeFileSync(
testOmoConfigPath,
JSON.stringify({ agents: { atlas: { model: "opencode-go/kimi-k2.5" } } }, null, 2) + "\n",
"utf-8",
)
// when
const result = detectCurrentConfig()
@@ -83,9 +137,10 @@ describe("addPluginToOpenCodeConfig - single package writes", () => {
delete process.env.OPENCODE_CONFIG_DIR
})
it("writes canonical plugin entry for new installs", async () => {
it("keeps oh-my-opencode when it already exists", async () => {
// given
writeFileSync(testConfigPath, JSON.stringify({}, null, 2) + "\n", "utf-8")
const config = { plugin: ["oh-my-opencode"] }
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
// when
const result = await addPluginToOpenCodeConfig("3.11.0")
@@ -93,12 +148,13 @@ describe("addPluginToOpenCodeConfig - single package writes", () => {
// then
expect(result.success).toBe(true)
const savedConfig = JSON.parse(readFileSync(testConfigPath, "utf-8"))
expect(savedConfig.plugin).toEqual(["oh-my-openagent"])
expect(savedConfig.plugin).toContain("oh-my-opencode")
})
it("upgrades a bare legacy plugin entry to canonical", async () => {
it("replaces version-pinned oh-my-opencode@X.Y.Z", async () => {
// given
writeFileSync(testConfigPath, JSON.stringify({ plugin: ["oh-my-opencode"] }, null, 2) + "\n", "utf-8")
const config = { plugin: ["oh-my-opencode@3.10.0"] }
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
// when
const result = await addPluginToOpenCodeConfig("3.11.0")
@@ -106,12 +162,14 @@ describe("addPluginToOpenCodeConfig - single package writes", () => {
// then
expect(result.success).toBe(true)
const savedConfig = JSON.parse(readFileSync(testConfigPath, "utf-8"))
expect(savedConfig.plugin).toEqual(["oh-my-openagent"])
expect(savedConfig.plugin).toContain("oh-my-opencode")
expect(savedConfig.plugin).not.toContain("oh-my-opencode@3.10.0")
})
it("upgrades a version-pinned legacy entry to canonical", async () => {
it("recognizes oh-my-openagent as already installed (legacy name)", async () => {
// given
writeFileSync(testConfigPath, JSON.stringify({ plugin: ["oh-my-opencode@3.10.0"] }, null, 2) + "\n", "utf-8")
const config = { plugin: ["oh-my-openagent"] }
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
// when
const result = await addPluginToOpenCodeConfig("3.11.0")
@@ -119,12 +177,15 @@ describe("addPluginToOpenCodeConfig - single package writes", () => {
// then
expect(result.success).toBe(true)
const savedConfig = JSON.parse(readFileSync(testConfigPath, "utf-8"))
expect(savedConfig.plugin).toEqual(["oh-my-openagent@3.10.0"])
// Should upgrade to new name
expect(savedConfig.plugin).toContain("oh-my-opencode")
expect(savedConfig.plugin).not.toContain("oh-my-openagent")
})
it("removes stale legacy entry when canonical and legacy entries both exist", async () => {
it("replaces version-pinned oh-my-openagent@X.Y.Z with new name", async () => {
// given
writeFileSync(testConfigPath, JSON.stringify({ plugin: ["oh-my-openagent", "oh-my-opencode"] }, null, 2) + "\n", "utf-8")
const config = { plugin: ["oh-my-openagent@3.10.0"] }
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
// when
const result = await addPluginToOpenCodeConfig("3.11.0")
@@ -132,12 +193,15 @@ describe("addPluginToOpenCodeConfig - single package writes", () => {
// then
expect(result.success).toBe(true)
const savedConfig = JSON.parse(readFileSync(testConfigPath, "utf-8"))
expect(savedConfig.plugin).toEqual(["oh-my-openagent"])
// Legacy should be replaced with new name
expect(savedConfig.plugin).toContain("oh-my-opencode")
expect(savedConfig.plugin).not.toContain("oh-my-openagent")
})
it("preserves a canonical entry when it already exists", async () => {
it("adds new plugin when none exists", async () => {
// given
writeFileSync(testConfigPath, JSON.stringify({ plugin: ["oh-my-openagent@3.10.0"] }, null, 2) + "\n", "utf-8")
const config = {}
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
// when
const result = await addPluginToOpenCodeConfig("3.11.0")
@@ -145,21 +209,20 @@ describe("addPluginToOpenCodeConfig - single package writes", () => {
// then
expect(result.success).toBe(true)
const savedConfig = JSON.parse(readFileSync(testConfigPath, "utf-8"))
expect(savedConfig.plugin).toEqual(["oh-my-openagent@3.10.0"])
expect(savedConfig.plugin).toContain("oh-my-opencode")
})
it("rewrites quoted jsonc plugin field in place", async () => {
it("adds plugin when plugin array is empty", async () => {
// given
testConfigPath = join(testConfigDir, "opencode.jsonc")
writeFileSync(testConfigPath, '{\n "plugin": ["oh-my-opencode"]\n}\n', "utf-8")
const config = { plugin: [] }
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
// when
const result = await addPluginToOpenCodeConfig("3.11.0")
// then
expect(result.success).toBe(true)
const savedContent = readFileSync(testConfigPath, "utf-8")
expect(savedContent.includes('"plugin": [\n "oh-my-openagent"\n ]')).toBe(true)
expect(savedContent.includes("oh-my-opencode")).toBe(false)
const savedConfig = JSON.parse(readFileSync(testConfigPath, "utf-8"))
expect(savedConfig.plugin).toContain("oh-my-opencode")
})
})

View File

@@ -1,56 +0,0 @@
/// <reference types="bun-types" />
import { afterEach, describe, expect, mock, test } from "bun:test"
import { getPluginNameWithVersion } from "../config-manager"
describe("getPluginNameWithVersion", () => {
const originalFetch = globalThis.fetch
afterEach(() => {
globalThis.fetch = originalFetch
})
test("returns the canonical latest tag when current version matches latest", async () => {
//#given
globalThis.fetch = mock(() =>
Promise.resolve({
ok: true,
json: () => Promise.resolve({ latest: "3.13.1", beta: "3.14.0-beta.1" }),
} as Response)
) as unknown as typeof fetch
//#when
const result = await getPluginNameWithVersion("3.13.1")
//#then
expect(result).toBe("oh-my-openagent@latest")
})
test("preserves the canonical prerelease channel when fetch fails", async () => {
//#given
globalThis.fetch = mock(() => Promise.reject(new Error("Network error"))) as unknown as typeof fetch
//#when
const result = await getPluginNameWithVersion("3.14.0-beta.1")
//#then
expect(result).toBe("oh-my-openagent@beta")
})
test("returns the canonical bare package name for stable fallback", async () => {
//#given
globalThis.fetch = mock(() =>
Promise.resolve({
ok: false,
status: 404,
} as Response)
) as unknown as typeof fetch
//#when
const result = await getPluginNameWithVersion("3.13.1")
//#then
expect(result).toBe("oh-my-openagent")
})
})

View File

@@ -1,7 +1,6 @@
import { PLUGIN_NAME } from "../../shared"
import { fetchNpmDistTags } from "./npm-dist-tags"
const DEFAULT_PACKAGE_NAME = PLUGIN_NAME
const DEFAULT_PACKAGE_NAME = "oh-my-opencode"
const PRIORITIZED_TAGS = ["latest", "beta", "next"] as const
function getFallbackEntry(version: string, packageName: string): string {

View File

@@ -142,6 +142,48 @@ describe("model-resolution check", () => {
snapshot: { source: "bundled-snapshot" },
})
})
it("keeps provider-prefixed overrides for transport while capability diagnostics use pattern aliases", async () => {
const { getModelResolutionInfoWithOverrides } = await import("./model-resolution")
const info = getModelResolutionInfoWithOverrides({
categories: {
"visual-engineering": { model: "google/gemini-3.1-pro-high" },
},
})
const visual = info.categories.find((category) => category.name === "visual-engineering")
expect(visual).toBeDefined()
expect(visual!.effectiveModel).toBe("google/gemini-3.1-pro-high")
expect(visual!.capabilityDiagnostics).toMatchObject({
resolutionMode: "alias-backed",
canonicalization: {
source: "pattern-alias",
ruleID: "gemini-3.1-pro-tier-alias",
},
})
})
it("keeps provider-prefixed Claude overrides for transport while capability diagnostics canonicalize to bare IDs", async () => {
const { getModelResolutionInfoWithOverrides } = await import("./model-resolution")
const info = getModelResolutionInfoWithOverrides({
agents: {
oracle: { model: "anthropic/claude-opus-4-6-thinking" },
},
})
const oracle = info.agents.find((agent) => agent.name === "oracle")
expect(oracle).toBeDefined()
expect(oracle!.effectiveModel).toBe("anthropic/claude-opus-4-6-thinking")
expect(oracle!.capabilityDiagnostics).toMatchObject({
resolutionMode: "alias-backed",
canonicalization: {
source: "pattern-alias",
ruleID: "claude-thinking-legacy-alias",
},
})
})
})
describe("checkModelResolution", () => {

View File

@@ -9,7 +9,7 @@ import { buildEffectiveResolution, getEffectiveModel } from "./model-resolution-
import type { AgentResolutionInfo, CategoryResolutionInfo, ModelResolutionInfo, OmoConfig } from "./model-resolution-types"
function parseProviderModel(value: string): { providerID: string; modelID: string } | null {
const slashIndex = value.lastIndexOf("/")
const slashIndex = value.indexOf("/")
if (slashIndex <= 0 || slashIndex === value.length - 1) {
return null
}

View File

@@ -1,19 +1,9 @@
/// <reference types="bun-types" />
import { beforeEach, describe, expect, it, mock } from "bun:test"
import { PLUGIN_NAME } from "../../../shared"
import type { PluginInfo } from "./system-plugin"
type SystemModule = typeof import("./system")
async function importFreshSystemModule(): Promise<SystemModule> {
return import(`./system?test=${Date.now()}-${Math.random()}`)
}
const mockFindOpenCodeBinary = mock(async () => ({ path: "/usr/local/bin/opencode" }))
const mockGetOpenCodeVersion = mock(async () => "1.0.200")
const mockCompareVersions = mock((_leftVersion?: string, _rightVersion?: string) => true)
const mockGetPluginInfo = mock((): PluginInfo => ({
const mockCompareVersions = mock(() => true)
const mockGetPluginInfo = mock(() => ({
registered: true,
entry: "oh-my-opencode",
isPinned: false,
@@ -28,8 +18,7 @@ const mockGetLoadedPluginVersion = mock(() => ({
expectedVersion: "3.0.0",
loadedVersion: "3.1.0",
}))
const mockGetLatestPluginVersion = mock(async (_currentVersion: string | null) => null as string | null)
const mockGetSuggestedInstallTag = mock(() => "latest")
const mockGetLatestPluginVersion = mock(async () => null)
mock.module("./system-binary", () => ({
findOpenCodeBinary: mockFindOpenCodeBinary,
@@ -44,9 +33,10 @@ mock.module("./system-plugin", () => ({
mock.module("./system-loaded-version", () => ({
getLoadedPluginVersion: mockGetLoadedPluginVersion,
getLatestPluginVersion: mockGetLatestPluginVersion,
getSuggestedInstallTag: mockGetSuggestedInstallTag,
}))
const { checkSystem } = await import("./system?test")
describe("system check", () => {
beforeEach(() => {
mockFindOpenCodeBinary.mockReset()
@@ -55,7 +45,6 @@ describe("system check", () => {
mockGetPluginInfo.mockReset()
mockGetLoadedPluginVersion.mockReset()
mockGetLatestPluginVersion.mockReset()
mockGetSuggestedInstallTag.mockReset()
mockFindOpenCodeBinary.mockResolvedValue({ path: "/usr/local/bin/opencode" })
mockGetOpenCodeVersion.mockResolvedValue("1.0.200")
@@ -76,14 +65,10 @@ describe("system check", () => {
loadedVersion: "3.1.0",
})
mockGetLatestPluginVersion.mockResolvedValue(null)
mockGetSuggestedInstallTag.mockReturnValue("latest")
})
describe("#given cache directory contains spaces", () => {
it("uses a quoted cache directory in mismatch fix command", async () => {
//#given
const { checkSystem } = await importFreshSystemModule()
//#when
const result = await checkSystem()
@@ -102,11 +87,9 @@ describe("system check", () => {
loadedVersion: "3.0.0-canary.1",
})
mockGetLatestPluginVersion.mockResolvedValue("3.0.0-canary.2")
mockGetSuggestedInstallTag.mockReturnValue("canary")
mockCompareVersions.mockImplementation((leftVersion?: string, rightVersion?: string) => {
mockCompareVersions.mockImplementation((leftVersion: string, rightVersion: string) => {
return !(leftVersion === "3.0.0-canary.1" && rightVersion === "3.0.0-canary.2")
})
const { checkSystem } = await importFreshSystemModule()
//#when
const result = await checkSystem()
@@ -114,94 +97,8 @@ describe("system check", () => {
//#then
const outdatedIssue = result.issues.find((issue) => issue.title === "Loaded plugin is outdated")
expect(outdatedIssue?.fix).toBe(
`Update: cd "/Users/test/Library/Caches/opencode with spaces" && bun add ${PLUGIN_NAME}@canary`
'Update: cd "/Users/test/Library/Caches/opencode with spaces" && bun add oh-my-opencode@canary'
)
})
})
describe("#given OpenCode plugin entry uses legacy package name", () => {
it("adds a warning for a bare legacy entry", async () => {
//#given
mockGetPluginInfo.mockReturnValue({
registered: true,
entry: "oh-my-opencode",
isPinned: false,
pinnedVersion: null,
configPath: null,
isLocalDev: false,
})
const { checkSystem } = await importFreshSystemModule()
//#when
const result = await checkSystem()
//#then
const legacyEntryIssue = result.issues.find((issue) => issue.title === "Using legacy package name")
expect(legacyEntryIssue?.severity).toBe("warning")
expect(legacyEntryIssue?.fix).toBe(
'Update your opencode.json plugin entry: "oh-my-opencode" → "oh-my-openagent"'
)
})
it("adds a warning for a version-pinned legacy entry", async () => {
//#given
mockGetPluginInfo.mockReturnValue({
registered: true,
entry: "oh-my-opencode@3.0.0",
isPinned: true,
pinnedVersion: "3.0.0",
configPath: null,
isLocalDev: false,
})
const { checkSystem } = await importFreshSystemModule()
//#when
const result = await checkSystem()
//#then
const legacyEntryIssue = result.issues.find((issue) => issue.title === "Using legacy package name")
expect(legacyEntryIssue?.severity).toBe("warning")
expect(legacyEntryIssue?.fix).toBe(
'Update your opencode.json plugin entry: "oh-my-opencode@3.0.0" → "oh-my-openagent@3.0.0"'
)
})
it("does not warn for a canonical plugin entry", async () => {
//#given
mockGetPluginInfo.mockReturnValue({
registered: true,
entry: PLUGIN_NAME,
isPinned: false,
pinnedVersion: null,
configPath: null,
isLocalDev: false,
})
const { checkSystem } = await importFreshSystemModule()
//#when
const result = await checkSystem()
//#then
expect(result.issues.some((issue) => issue.title === "Using legacy package name")).toBe(false)
})
it("does not warn for a local-dev legacy entry", async () => {
//#given
mockGetPluginInfo.mockReturnValue({
registered: true,
entry: "oh-my-opencode",
isPinned: false,
pinnedVersion: null,
configPath: null,
isLocalDev: true,
})
const { checkSystem } = await importFreshSystemModule()
//#when
const result = await checkSystem()
//#then
expect(result.issues.some((issue) => issue.title === "Using legacy package name")).toBe(false)
})
})
})

View File

@@ -6,7 +6,6 @@ import { findOpenCodeBinary, getOpenCodeVersion, compareVersions } from "./syste
import { getPluginInfo } from "./system-plugin"
import { getLatestPluginVersion, getLoadedPluginVersion, getSuggestedInstallTag } from "./system-loaded-version"
import { parseJsonc } from "../../../shared"
import { PLUGIN_NAME, LEGACY_PLUGIN_NAME } from "../../../shared/plugin-identity"
function isConfigValid(configPath: string | null): boolean {
if (!configPath) return true
@@ -83,30 +82,14 @@ export async function checkSystem(): Promise<CheckResult> {
if (!pluginInfo.registered) {
issues.push({
title: `${PLUGIN_NAME} is not registered`,
title: "oh-my-opencode is not registered",
description: "Plugin entry is missing from OpenCode configuration.",
fix: `Run: bunx ${PLUGIN_NAME} install`,
fix: "Run: bunx oh-my-opencode install",
severity: "error",
affects: ["all agents"],
})
}
if (pluginInfo.entry && !pluginInfo.isLocalDev) {
const isLegacyName = pluginInfo.entry === LEGACY_PLUGIN_NAME
|| pluginInfo.entry.startsWith(`${LEGACY_PLUGIN_NAME}@`)
if (isLegacyName) {
const suggestedEntry = pluginInfo.entry.replace(LEGACY_PLUGIN_NAME, PLUGIN_NAME)
issues.push({
title: "Using legacy package name",
description: `Your opencode.json references "${LEGACY_PLUGIN_NAME}" which has been renamed to "${PLUGIN_NAME}". The old name may stop working in a future release.`,
fix: `Update your opencode.json plugin entry: "${pluginInfo.entry}" → "${suggestedEntry}"`,
severity: "warning",
affects: ["plugin loading"],
})
}
}
if (loadedInfo.expectedVersion && loadedInfo.loadedVersion && loadedInfo.expectedVersion !== loadedInfo.loadedVersion) {
issues.push({
title: "Loaded plugin version mismatch",
@@ -125,7 +108,7 @@ export async function checkSystem(): Promise<CheckResult> {
issues.push({
title: "Loaded plugin is outdated",
description: `Loaded ${systemInfo.loadedVersion}, latest ${latestVersion}.`,
fix: `Update: cd "${loadedInfo.cacheDir}" && bun add ${PLUGIN_NAME}@${installTag}`,
fix: `Update: cd "${loadedInfo.cacheDir}" && bun add oh-my-opencode@${installTag}`,
severity: "warning",
affects: ["plugin features"],
})

View File

@@ -1,5 +1,4 @@
import color from "picocolors"
import { PLUGIN_NAME } from "../../shared"
export const SYMBOLS = {
check: color.green("\u2713"),
@@ -39,6 +38,6 @@ export const EXIT_CODES = {
export const MIN_OPENCODE_VERSION = "1.0.150"
export const PACKAGE_NAME = PLUGIN_NAME
export const PACKAGE_NAME = "oh-my-opencode"
export const OPENCODE_BINARIES = ["opencode", "opencode-desktop"] as const

View File

@@ -113,10 +113,10 @@ describe("install CLI - binary check behavior", () => {
const configPath = join(tempDir, "opencode.json")
expect(existsSync(configPath)).toBe(true)
// then opencode.json should have plugin entry
const config = JSON.parse(readFileSync(configPath, "utf-8"))
expect(config.plugin).toBeDefined()
expect(config.plugin.some((p: string) => p.includes("oh-my-openagent"))).toBe(true)
expect(config.plugin.some((p: string) => p.includes("oh-my-opencode"))).toBe(false)
expect(config.plugin.some((p: string) => p.includes("oh-my-opencode"))).toBe(true)
// then exit code should be 0 (success)
expect(exitCode).toBe(0)

View File

@@ -458,7 +458,7 @@ describe("generateModelConfig", () => {
const result = generateModelConfig(config)
// #then
expect(result.agents?.hephaestus?.model).toBe("openai/gpt-5.4")
expect(result.agents?.hephaestus?.model).toBe("openai/gpt-5.3-codex")
expect(result.agents?.hephaestus?.variant).toBe("medium")
})
@@ -484,7 +484,7 @@ describe("generateModelConfig", () => {
const result = generateModelConfig(config)
// #then
expect(result.agents?.hephaestus?.model).toBe("opencode/gpt-5.4")
expect(result.agents?.hephaestus?.model).toBe("opencode/gpt-5.3-codex")
expect(result.agents?.hephaestus?.variant).toBe("medium")
})

View File

@@ -159,15 +159,8 @@ describe("integration: --session-id", () => {
describe("integration: --on-complete", () => {
let spawnSpy: ReturnType<typeof spyOn>
let originalPlatform: NodeJS.Platform
let originalEnv: Record<string, string | undefined>
beforeEach(() => {
originalPlatform = process.platform
originalEnv = {
SHELL: process.env.SHELL,
PSModulePath: process.env.PSModulePath,
}
spyOn(console, "error").mockImplementation(() => {})
spawnSpy = spyOn(spawnWithWindowsHideModule, "spawnWithWindowsHide").mockReturnValue({
exited: Promise.resolve(0),
@@ -179,22 +172,11 @@ describe("integration: --on-complete", () => {
})
afterEach(() => {
Object.defineProperty(process, "platform", { value: originalPlatform })
for (const [key, value] of Object.entries(originalEnv)) {
if (value !== undefined) {
process.env[key] = value
} else {
delete process.env[key]
}
}
spawnSpy.mockRestore()
})
it("passes all 4 env vars as strings to spawned process", async () => {
// given
Object.defineProperty(process, "platform", { value: "linux" })
process.env.SHELL = "/bin/bash"
delete process.env.PSModulePath
spawnSpy.mockClear()
// when
@@ -224,15 +206,8 @@ describe("integration: option combinations", () => {
let mockStdout: MockWriteStream
let mockStderr: MockWriteStream
let spawnSpy: ReturnType<typeof spyOn>
let originalPlatform: NodeJS.Platform
let originalEnv: Record<string, string | undefined>
beforeEach(() => {
originalPlatform = process.platform
originalEnv = {
SHELL: process.env.SHELL,
PSModulePath: process.env.PSModulePath,
}
spyOn(console, "log").mockImplementation(() => {})
spyOn(console, "error").mockImplementation(() => {})
mockStdout = createMockWriteStream()
@@ -247,22 +222,11 @@ describe("integration: option combinations", () => {
})
afterEach(() => {
Object.defineProperty(process, "platform", { value: originalPlatform })
for (const [key, value] of Object.entries(originalEnv)) {
if (value !== undefined) {
process.env[key] = value
} else {
delete process.env[key]
}
}
spawnSpy?.mockRestore?.()
})
it("json output and on-complete hook can both execute", async () => {
// given - json manager active + on-complete hook ready
Object.defineProperty(process, "platform", { value: "linux" })
process.env.SHELL = "/bin/bash"
delete process.env.PSModulePath
const result: RunResult = {
sessionId: "session-123",
success: true,

View File

@@ -4,9 +4,6 @@ import * as loggerModule from "../../shared/logger"
import { executeOnCompleteHook } from "./on-complete-hook"
describe("executeOnCompleteHook", () => {
let originalPlatform: NodeJS.Platform
let originalEnv: Record<string, string | undefined>
function createStream(text: string): ReadableStream<Uint8Array> | undefined {
if (text.length === 0) {
return undefined
@@ -34,32 +31,15 @@ describe("executeOnCompleteHook", () => {
let logSpy: ReturnType<typeof spyOn<typeof loggerModule, "log">>
beforeEach(() => {
originalPlatform = process.platform
originalEnv = {
SHELL: process.env.SHELL,
PSModulePath: process.env.PSModulePath,
ComSpec: process.env.ComSpec,
}
logSpy = spyOn(loggerModule, "log").mockImplementation(() => {})
})
afterEach(() => {
Object.defineProperty(process, "platform", { value: originalPlatform })
for (const [key, value] of Object.entries(originalEnv)) {
if (value !== undefined) {
process.env[key] = value
} else {
delete process.env[key]
}
}
logSpy.mockRestore()
})
it("uses sh on unix shells and passes correct env vars", async () => {
it("executes command with correct env vars", async () => {
// given
Object.defineProperty(process, "platform", { value: "linux" })
process.env.SHELL = "/bin/bash"
delete process.env.PSModulePath
const spawnSpy = spyOn(spawnWithWindowsHideModule, "spawnWithWindowsHide").mockReturnValue(createProc(0))
try {
@@ -88,82 +68,6 @@ describe("executeOnCompleteHook", () => {
}
})
it("uses powershell when PowerShell is detected on Windows", async () => {
// given
Object.defineProperty(process, "platform", { value: "win32" })
process.env.PSModulePath = "C:\\Program Files\\PowerShell\\Modules"
delete process.env.SHELL
const spawnSpy = spyOn(spawnWithWindowsHideModule, "spawnWithWindowsHide").mockReturnValue(createProc(0))
try {
// when
await executeOnCompleteHook({
command: "Write-Host done",
sessionId: "session-123",
exitCode: 0,
durationMs: 5000,
messageCount: 10,
})
// then
const [args] = spawnSpy.mock.calls[0] as Parameters<typeof spawnWithWindowsHideModule.spawnWithWindowsHide>
expect(args).toEqual(["powershell.exe", "-NoProfile", "-Command", "Write-Host done"])
} finally {
spawnSpy.mockRestore()
}
})
it("uses pwsh when PowerShell is detected on non-Windows platforms", async () => {
// given
Object.defineProperty(process, "platform", { value: "linux" })
process.env.PSModulePath = "/usr/local/share/powershell/Modules"
delete process.env.SHELL
const spawnSpy = spyOn(spawnWithWindowsHideModule, "spawnWithWindowsHide").mockReturnValue(createProc(0))
try {
// when
await executeOnCompleteHook({
command: "Write-Host done",
sessionId: "session-123",
exitCode: 0,
durationMs: 5000,
messageCount: 10,
})
// then
const [args] = spawnSpy.mock.calls[0] as Parameters<typeof spawnWithWindowsHideModule.spawnWithWindowsHide>
expect(args).toEqual(["pwsh", "-NoProfile", "-Command", "Write-Host done"])
} finally {
spawnSpy.mockRestore()
}
})
it("falls back to cmd.exe on Windows when PowerShell is not detected", async () => {
// given
Object.defineProperty(process, "platform", { value: "win32" })
delete process.env.PSModulePath
delete process.env.SHELL
process.env.ComSpec = "C:\\Windows\\System32\\cmd.exe"
const spawnSpy = spyOn(spawnWithWindowsHideModule, "spawnWithWindowsHide").mockReturnValue(createProc(0))
try {
// when
await executeOnCompleteHook({
command: "echo done",
sessionId: "session-123",
exitCode: 0,
durationMs: 5000,
messageCount: 10,
})
// then
const [args] = spawnSpy.mock.calls[0] as Parameters<typeof spawnWithWindowsHideModule.spawnWithWindowsHide>
expect(args).toEqual(["C:\\Windows\\System32\\cmd.exe", "/d", "/s", "/c", "echo done"])
} finally {
spawnSpy.mockRestore()
}
})
it("env var values are strings", async () => {
// given
const spawnSpy = spyOn(spawnWithWindowsHideModule, "spawnWithWindowsHide").mockReturnValue(createProc(0))

View File

@@ -1,5 +1,5 @@
import { spawnWithWindowsHide } from "../../shared/spawn-with-windows-hide"
import { detectShellType, log } from "../../shared"
import { log } from "../../shared"
async function readOutput(
stream: ReadableStream<Uint8Array> | undefined,
@@ -20,24 +20,6 @@ async function readOutput(
}
}
function resolveHookShellCommand(command: string): string[] {
const shellType = detectShellType()
switch (shellType) {
case "powershell": {
const powershellExecutable = process.platform === "win32" ? "powershell.exe" : "pwsh"
return [powershellExecutable, "-NoProfile", "-Command", command]
}
case "cmd":
return [process.env.ComSpec || "cmd.exe", "/d", "/s", "/c", command]
case "csh":
return ["csh", "-c", command]
case "unix":
default:
return ["sh", "-c", command]
}
}
export async function executeOnCompleteHook(options: {
command: string
sessionId: string
@@ -55,8 +37,7 @@ export async function executeOnCompleteHook(options: {
log("Running on-complete hook", { command: trimmedCommand })
try {
const shellCommand = resolveHookShellCommand(trimmedCommand)
const proc = spawnWithWindowsHide(shellCommand, {
const proc = spawnWithWindowsHide(["sh", "-c", trimmedCommand], {
env: {
...process.env,
SESSION_ID: sessionId,

View File

@@ -1,6 +1,5 @@
import * as p from "@clack/prompts"
import color from "picocolors"
import { PLUGIN_NAME } from "../shared"
import type { InstallArgs } from "./types"
import {
addPluginToOpenCodeConfig,
@@ -44,7 +43,7 @@ export async function runTuiInstaller(args: InstallArgs, version: string): Promi
const config = await promptInstallConfig(detected)
if (!config) return 1
spinner.start(`Adding ${PLUGIN_NAME} to OpenCode config`)
spinner.start("Adding oh-my-opencode to OpenCode config")
const pluginResult = await addPluginToOpenCodeConfig(version)
if (!pluginResult.success) {
spinner.stop(`Failed to add plugin: ${pluginResult.error}`)
@@ -53,7 +52,7 @@ export async function runTuiInstaller(args: InstallArgs, version: string): Promi
}
spinner.stop(`Plugin added to ${color.cyan(pluginResult.configPath)}`)
spinner.start(`Writing ${PLUGIN_NAME} configuration`)
spinner.start("Writing oh-my-opencode configuration")
const omoResult = writeOmoConfig(config)
if (!omoResult.success) {
spinner.stop(`Failed to write config: ${omoResult.error}`)

View File

@@ -969,45 +969,6 @@ describe("GitMasterConfigSchema", () => {
})
})
describe("OhMyOpenCodeConfigSchema - git_master defaults (#2040)", () => {
test("git_master defaults are applied when section is missing from config", () => {
//#given
const config = {}
//#when
const result = OhMyOpenCodeConfigSchema.safeParse(config)
//#then
expect(result.success).toBe(true)
if (result.success) {
expect(result.data.git_master).toBeDefined()
expect(result.data.git_master.commit_footer).toBe(true)
expect(result.data.git_master.include_co_authored_by).toBe(true)
expect(result.data.git_master.git_env_prefix).toBe("GIT_MASTER=1")
}
})
test("git_master respects explicit false values", () => {
//#given
const config = {
git_master: {
commit_footer: false,
include_co_authored_by: false,
},
}
//#when
const result = OhMyOpenCodeConfigSchema.safeParse(config)
//#then
expect(result.success).toBe(true)
if (result.success) {
expect(result.data.git_master.commit_footer).toBe(false)
expect(result.data.git_master.include_co_authored_by).toBe(false)
}
})
})
describe("skills schema", () => {
test("accepts skills.sources configuration", () => {
//#given

View File

@@ -16,10 +16,6 @@ export const BackgroundTaskConfigSchema = z.object({
staleTimeoutMs: z.number().min(60000).optional(),
/** Timeout for tasks that never received any progress update, falling back to startedAt (default: 1800000 = 30 minutes, minimum: 60000 = 1 minute) */
messageStalenessTimeoutMs: z.number().min(60000).optional(),
/** Absolute TTL for non-terminal tasks in milliseconds (default: 1800000 = 30 minutes, minimum: 300000 = 5 minutes). Tasks exceeding this age from their last activity (or startedAt if no progress) are pruned. */
taskTtlMs: z.number().min(300000).optional(),
/** Timeout for tasks whose session has completely disappeared from the status registry (default: 60000 = 1 minute, minimum: 10000 = 10 seconds). When a session is gone (likely crashed), this shorter timeout is used instead of the normal stale timeout. */
sessionGoneTimeoutMs: z.number().min(10000).optional(),
syncPollTimeoutMs: z.number().min(60000).optional(),
/** Maximum tool calls per subagent task before circuit breaker triggers (default: 200, minimum: 10). Prevents runaway loops from burning unlimited tokens. */
maxToolCalls: z.number().int().min(10).optional(),

View File

@@ -21,8 +21,6 @@ export const ExperimentalConfigSchema = z.object({
hashline_edit: z.boolean().optional(),
/** Append fallback model info to session title when a runtime fallback occurs (default: false) */
model_fallback_title: z.boolean().optional(),
/** Maximum number of tools to register. When set, lower-priority tools are excluded to stay within provider limits (e.g., OpenAI's 128-tool cap). Accounts for ~20 OpenCode built-in tools. */
max_tools: z.number().int().min(1).optional(),
})
export type ExperimentalConfig = z.infer<typeof ExperimentalConfigSchema>

View File

@@ -47,13 +47,11 @@ export const HookNameSchema = z.enum([
"tasks-todowrite-disabler",
"runtime-fallback",
"write-existing-file-guard",
"bash-file-read-guard",
"anthropic-effort",
"hashline-read-enhancer",
"read-image-resizer",
"todo-description-override",
"webfetch-redirect-guard",
"legacy-plugin-toast",
])
export type HookName = z.infer<typeof HookNameSchema>

View File

@@ -60,11 +60,7 @@ export const OhMyOpenCodeConfigSchema = z.object({
model_capabilities: ModelCapabilitiesConfigSchema.optional(),
openclaw: OpenClawConfigSchema.optional(),
babysitting: BabysittingConfigSchema.optional(),
git_master: GitMasterConfigSchema.default({
commit_footer: true,
include_co_authored_by: true,
git_env_prefix: "GIT_MASTER=1",
}),
git_master: GitMasterConfigSchema.optional(),
browser_automation_engine: BrowserAutomationConfigSchema.optional(),
websearch: WebsearchConfigSchema.optional(),
tmux: TmuxConfigSchema.optional(),

View File

@@ -7,10 +7,8 @@ import { BackgroundManager } from "./features/background-agent"
import { SkillMcpManager } from "./features/skill-mcp-manager"
import { initTaskToastManager } from "./features/task-toast-manager"
import { TmuxSessionManager } from "./features/tmux-subagent"
import { registerManagerForCleanup } from "./features/background-agent/process-cleanup"
import { createConfigHandler } from "./plugin-handlers"
import { log } from "./shared"
import { markServerRunningInProcess } from "./shared/tmux/tmux-utils/server-health"
export type Managers = {
tmuxSessionManager: TmuxSessionManager
@@ -28,17 +26,8 @@ export function createManagers(args: {
}): Managers {
const { ctx, pluginConfig, tmuxConfig, modelCacheState, backgroundNotificationHookEnabled } = args
markServerRunningInProcess()
const tmuxSessionManager = new TmuxSessionManager(ctx, tmuxConfig)
registerManagerForCleanup({
shutdown: async () => {
await tmuxSessionManager.cleanup().catch((error) => {
log("[create-managers] tmux cleanup error during process shutdown:", error)
})
},
})
const backgroundManager = new BackgroundManager(
ctx,
pluginConfig.background_task,

View File

@@ -1,6 +1,6 @@
import type { BackgroundTask } from "./types"
export type BackgroundTaskNotificationStatus = "COMPLETED" | "CANCELLED" | "INTERRUPTED" | "ERROR"
export type BackgroundTaskNotificationStatus = "COMPLETED" | "CANCELLED" | "INTERRUPTED"
export function buildBackgroundTaskNotificationText(input: {
task: BackgroundTask
@@ -15,43 +15,21 @@ export function buildBackgroundTaskNotificationText(input: {
const errorInfo = task.error ? `\n**Error:** ${task.error}` : ""
if (allComplete) {
const succeededTasks = completedTasks.filter((t) => t.status === "completed")
const failedTasks = completedTasks.filter((t) => t.status !== "completed")
const succeededText = succeededTasks.length > 0
? succeededTasks.map((t) => `- \`${t.id}\`: ${t.description}`).join("\n")
: ""
const failedText = failedTasks.length > 0
? failedTasks.map((t) => `- \`${t.id}\`: ${t.description} [${t.status.toUpperCase()}]${t.error ? ` - ${t.error}` : ""}`).join("\n")
: ""
const hasFailures = failedTasks.length > 0
const header = hasFailures
? `[ALL BACKGROUND TASKS FINISHED - ${failedTasks.length} FAILED]`
: "[ALL BACKGROUND TASKS COMPLETE]"
let body = ""
if (succeededText) {
body += `**Completed:**\n${succeededText}\n`
}
if (failedText) {
body += `\n**Failed:**\n${failedText}\n`
}
if (!body) {
body = `- \`${task.id}\`: ${task.description} [${task.status.toUpperCase()}]${task.error ? ` - ${task.error}` : ""}\n`
}
const completedTasksText = completedTasks
.map((t) => `- \`${t.id}\`: ${t.description}`)
.join("\n")
return `<system-reminder>
${header}
[ALL BACKGROUND TASKS COMPLETE]
${body.trim()}
**Completed:**
${completedTasksText || `- \`${task.id}\`: ${task.description}`}
Use \`background_output(task_id="<id>")\` to retrieve each result.${hasFailures ? `\n\n**ACTION REQUIRED:** ${failedTasks.length} task(s) failed. Check errors above and decide whether to retry or proceed.` : ""}
Use \`background_output(task_id="<id>")\` to retrieve each result.
</system-reminder>`
}
const agentInfo = task.category ? `${task.agent} (${task.category})` : task.agent
const isFailure = statusText !== "COMPLETED"
return `<system-reminder>
[BACKGROUND TASK ${statusText}]
@@ -61,7 +39,7 @@ Use \`background_output(task_id="<id>")\` to retrieve each result.${hasFailures
**Duration:** ${duration}${errorInfo}
**${remainingCount} task${remainingCount === 1 ? "" : "s"} still in progress.** You WILL be notified when ALL complete.
${isFailure ? "**ACTION REQUIRED:** This task failed. Check the error and decide whether to retry, cancel remaining tasks, or continue." : "Do NOT poll - continue productive work."}
Do NOT poll - continue productive work.
Use \`background_output(task_id="${task.id}")\` to retrieve this result when ready.
</system-reminder>`

View File

@@ -10,7 +10,6 @@ export const DEFAULT_MAX_TOOL_CALLS = 4000
export const DEFAULT_CIRCUIT_BREAKER_CONSECUTIVE_THRESHOLD = 20
export const DEFAULT_CIRCUIT_BREAKER_ENABLED = true
export const MIN_RUNTIME_BEFORE_STALE_MS = 30_000
export const DEFAULT_SESSION_GONE_TIMEOUT_MS = 60_000
export const MIN_IDLE_TIME_MS = 5000
export const POLLING_INTERVAL_MS = 3000
export const TASK_CLEANUP_DELAY_MS = 10 * 60 * 1000

View File

@@ -3312,9 +3312,6 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
prompt: async () => ({}),
promptAsync: async () => ({}),
abort: async () => ({}),
get: async () => {
throw new Error("missing")
},
},
}
const manager = new BackgroundManager({ client, directory: tmpdir() } as unknown as PluginInput, { staleTimeoutMs: 180_000 })
@@ -3351,9 +3348,6 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
prompt: async () => ({}),
promptAsync: async () => ({}),
abort: async () => ({}),
get: async () => {
throw new Error("missing")
},
},
}
const manager = new BackgroundManager({ client, directory: tmpdir() } as unknown as PluginInput, { staleTimeoutMs: 180_000 })
@@ -3443,7 +3437,6 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
status: "running",
startedAt: new Date(Date.now() - 15 * 60 * 1000),
progress: undefined,
consecutiveMissedPolls: 2,
}
getTaskMap(manager).set(task.id, task)
@@ -3478,7 +3471,6 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
status: "running",
startedAt: new Date(Date.now() - 15 * 60 * 1000),
progress: undefined,
consecutiveMissedPolls: 2,
}
getTaskMap(manager).set(task.id, task)
@@ -3486,12 +3478,12 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
//#when — no progress update for 15 minutes
await manager["checkAndInterruptStaleTasks"]({})
//#then — killed because session gone from status registry
//#then — killed after messageStalenessTimeout
expect(task.status).toBe("cancelled")
expect(task.error).toContain("session gone from status registry")
expect(task.error).toContain("no activity")
})
test("should NOT interrupt task with no lastUpdate within session-gone timeout", async () => {
test("should NOT interrupt task with no lastUpdate within messageStalenessTimeout", async () => {
//#given
const client = {
session: {
@@ -3500,7 +3492,7 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
abort: async () => ({}),
},
}
const manager = new BackgroundManager({ client, directory: tmpdir() } as unknown as PluginInput, { messageStalenessTimeoutMs: 600_000, sessionGoneTimeoutMs: 600_000 })
const manager = new BackgroundManager({ client, directory: tmpdir() } as unknown as PluginInput, { messageStalenessTimeoutMs: 600_000 })
const task: BackgroundTask = {
id: "task-fresh-no-update",
@@ -3517,7 +3509,7 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
getTaskMap(manager).set(task.id, task)
//#when — only 5 min since start, within 10min session-gone timeout
//#when — only 5 min since start, within 10min timeout
await manager["checkAndInterruptStaleTasks"]({})
//#then — task survives
@@ -3736,9 +3728,6 @@ describe("BackgroundManager.handleEvent - session.deleted cascade", () => {
properties: { info: { id: parentSessionID } },
})
// Flush twice: cancelTask now awaits session.abort() before cleanupPendingByParent,
// so we need additional microtask ticks to let the cascade complete fully.
await flushBackgroundNotifications()
await flushBackgroundNotifications()
// then
@@ -4274,7 +4263,7 @@ describe("BackgroundManager.pruneStaleTasksAndNotifications - removes pruned tas
expect(retainedTask?.status).toBe("error")
expect(getTaskMap(manager).has(staleTask.id)).toBe(true)
expect(notifications).toHaveLength(1)
expect(notifications[0]).toContain("[ALL BACKGROUND TASKS FINISHED")
expect(notifications[0]).toContain("[ALL BACKGROUND TASKS COMPLETE]")
expect(notifications[0]).toContain(staleTask.description)
expect(getCompletionTimers(manager).has(staleTask.id)).toBe(true)
expect(removeTaskCalls).toContain(staleTask.id)

View File

@@ -147,7 +147,7 @@ export class BackgroundManager {
private queuesByKey: Map<string, QueueItem[]> = new Map()
private processingKeys: Set<string> = new Set()
private completionTimers: Map<string, ReturnType<typeof setTimeout>> = new Map()
private completedTaskSummaries: Map<string, Array<{id: string, description: string, status: string, error?: string}>> = new Map()
private completedTaskSummaries: Map<string, Array<{id: string, description: string}>> = new Map()
private idleDeferralTimers: Map<string, ReturnType<typeof setTimeout>> = new Map()
private notificationQueueByParent: Map<string, Promise<void>> = new Map()
private rootDescendantCounts: Map<string, number>
@@ -538,7 +538,7 @@ export class BackgroundManager {
})(),
parts: [createInternalAgentTextPart(input.prompt)],
},
}).catch(async (error) => {
}).catch((error) => {
log("[background-agent] promptAsync error:", error)
const existingTask = this.findBySession(sessionID)
if (existingTask) {
@@ -561,8 +561,7 @@ export class BackgroundManager {
removeTaskToastTracking(existingTask.id)
// Abort the session to prevent infinite polling hang
// Awaited to prevent dangling promise during subagent teardown (Bun/WebKit SIGABRT)
await this.client.session.abort({
this.client.session.abort({
path: { id: sessionID },
}).catch(() => {})
@@ -824,7 +823,7 @@ export class BackgroundManager {
})(),
parts: [createInternalAgentTextPart(input.prompt)],
},
}).catch(async (error) => {
}).catch((error) => {
log("[background-agent] resume prompt error:", error)
existingTask.status = "interrupt"
const errorMessage = error instanceof Error ? error.message : String(error)
@@ -843,9 +842,8 @@ export class BackgroundManager {
removeTaskToastTracking(existingTask.id)
// Abort the session to prevent infinite polling hang
// Awaited to prevent dangling promise during subagent teardown (Bun/WebKit SIGABRT)
if (existingTask.sessionID) {
await this.client.session.abort({
this.client.session.abort({
path: { id: existingTask.sessionID },
}).catch(() => {})
}
@@ -1394,8 +1392,7 @@ export class BackgroundManager {
}
if (abortSession && task.sessionID) {
// Awaited to prevent dangling promise during subagent teardown (Bun/WebKit SIGABRT)
await this.client.session.abort({
this.client.session.abort({
path: { id: task.sessionID },
}).catch(() => {})
@@ -1513,8 +1510,7 @@ export class BackgroundManager {
}
if (task.sessionID) {
// Awaited to prevent dangling promise during subagent teardown (Bun/WebKit SIGABRT)
await this.client.session.abort({
this.client.session.abort({
path: { id: task.sessionID },
}).catch(() => {})
@@ -1556,8 +1552,6 @@ export class BackgroundManager {
this.completedTaskSummaries.get(task.parentSessionID)!.push({
id: task.id,
description: task.description,
status: task.status,
error: task.error,
})
// Update pending tracking and check if all tasks complete
@@ -1579,7 +1573,7 @@ export class BackgroundManager {
}
const completedTasks = allComplete
? (this.completedTaskSummaries.get(task.parentSessionID) ?? [{ id: task.id, description: task.description, status: task.status, error: task.error }])
? (this.completedTaskSummaries.get(task.parentSessionID) ?? [{ id: task.id, description: task.description }])
: []
if (allComplete) {
@@ -1597,40 +1591,20 @@ export class BackgroundManager {
let notification: string
if (allComplete) {
const succeededTasks = completedTasks.filter(t => t.status === "completed")
const failedTasks = completedTasks.filter(t => t.status !== "completed")
const succeededText = succeededTasks.length > 0
? succeededTasks.map(t => `- \`${t.id}\`: ${t.description}`).join("\n")
: ""
const failedText = failedTasks.length > 0
? failedTasks.map(t => `- \`${t.id}\`: ${t.description} [${t.status.toUpperCase()}]${t.error ? ` - ${t.error}` : ""}`).join("\n")
: ""
const hasFailures = failedTasks.length > 0
const header = hasFailures
? `[ALL BACKGROUND TASKS FINISHED - ${failedTasks.length} FAILED]`
: "[ALL BACKGROUND TASKS COMPLETE]"
let body = ""
if (succeededText) {
body += `**Completed:**\n${succeededText}\n`
}
if (failedText) {
body += `\n**Failed:**\n${failedText}\n`
}
if (!body) {
body = `- \`${task.id}\`: ${task.description} [${task.status.toUpperCase()}]${task.error ? ` - ${task.error}` : ""}\n`
}
const completedTasksText = completedTasks
.map(t => `- \`${t.id}\`: ${t.description}`)
.join("\n")
notification = `<system-reminder>
${header}
[ALL BACKGROUND TASKS COMPLETE]
${body.trim()}
**Completed:**
${completedTasksText || `- \`${task.id}\`: ${task.description}`}
Use \`background_output(task_id="<id>")\` to retrieve each result.${hasFailures ? `\n\n**ACTION REQUIRED:** ${failedTasks.length} task(s) failed. Check errors above and decide whether to retry or proceed.` : ""}
Use \`background_output(task_id="<id>")\` to retrieve each result.
</system-reminder>`
} else {
// Individual completion - silent notification
notification = `<system-reminder>
[BACKGROUND TASK ${statusText}]
**ID:** \`${task.id}\`
@@ -1638,7 +1612,7 @@ Use \`background_output(task_id="<id>")\` to retrieve each result.${hasFailures
**Duration:** ${duration}${errorInfo}
**${remainingCount} task${remainingCount === 1 ? "" : "s"} still in progress.** You WILL be notified when ALL complete.
${statusText === "COMPLETED" ? "Do NOT poll - continue productive work." : "**ACTION REQUIRED:** This task failed. Check the error and decide whether to retry, cancel remaining tasks, or continue."}
Do NOT poll - continue productive work.
Use \`background_output(task_id="${task.id}")\` to retrieve this result when ready.
</system-reminder>`
@@ -1701,14 +1675,11 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
resolvedModel: model,
})
const isTaskFailure = task.status === "error" || task.status === "cancelled" || task.status === "interrupt"
const shouldReply = allComplete || isTaskFailure
try {
await this.client.session.promptAsync({
path: { id: task.parentSessionID },
body: {
noReply: !shouldReply,
noReply: !allComplete,
...(agent !== undefined ? { agent } : {}),
...(model !== undefined ? { model } : {}),
...(resolvedTools ? { tools: resolvedTools } : {}),
@@ -1718,8 +1689,7 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
log("[background-agent] Sent notification to parent session:", {
taskId: task.id,
allComplete,
isTaskFailure,
noReply: !shouldReply,
noReply: !allComplete,
})
} catch (error) {
if (isAbortedSessionError(error)) {
@@ -1755,7 +1725,6 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
pruneStaleTasksAndNotifications({
tasks: this.tasks,
notifications: this.notifications,
taskTtlMs: this.config?.taskTtlMs,
onTaskPruned: (taskId, task, errorMessage) => {
const wasPending = task.status === "pending"
log("[background-agent] Pruning stale task:", { taskId, status: task.status, age: Math.round(((wasPending ? task.queuedAt?.getTime() : task.startedAt?.getTime()) ? (Date.now() - (wasPending ? task.queuedAt!.getTime() : task.startedAt!.getTime())) : 0) / 1000) + "s" })
@@ -1818,53 +1787,6 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
})
}
private async verifySessionExists(sessionID: string): Promise<boolean> {
try {
const result = await this.client.session.get({ path: { id: sessionID } })
return !!result.data
} catch {
return false
}
}
private async failCrashedTask(task: BackgroundTask, errorMessage: string): Promise<void> {
task.status = "error"
task.error = errorMessage
task.completedAt = new Date()
if (task.rootSessionID) {
this.unregisterRootDescendant(task.rootSessionID)
}
this.taskHistory.record(task.parentSessionID, { id: task.id, sessionID: task.sessionID, agent: task.agent, description: task.description, status: "error", category: task.category, startedAt: task.startedAt, completedAt: task.completedAt })
if (task.concurrencyKey) {
this.concurrencyManager.release(task.concurrencyKey)
task.concurrencyKey = undefined
}
const completionTimer = this.completionTimers.get(task.id)
if (completionTimer) {
clearTimeout(completionTimer)
this.completionTimers.delete(task.id)
}
const idleTimer = this.idleDeferralTimers.get(task.id)
if (idleTimer) {
clearTimeout(idleTimer)
this.idleDeferralTimers.delete(task.id)
}
this.cleanupPendingByParent(task)
this.clearNotificationsForTask(task.id)
removeTaskToastTracking(task.id)
this.scheduleTaskRemoval(task.id)
if (task.sessionID) {
SessionCategoryRegistry.remove(task.sessionID)
}
this.markForNotification(task)
this.enqueueNotificationForParent(task.parentSessionID, () => this.notifyParentSession(task)).catch(err => {
log("[background-agent] Error in notifyParentSession for crashed task:", { taskId: task.id, error: err })
})
}
private async pollRunningTasks(): Promise<void> {
if (this.pollingInFlight) return
this.pollingInFlight = true
@@ -1926,20 +1848,11 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
}
// Session is idle or no longer in status response (completed/disappeared)
const sessionGoneFromStatus = !sessionStatus
const completionSource = sessionStatus?.type === "idle"
? "polling (idle status)"
: "polling (session gone from status)"
const hasValidOutput = await this.validateSessionHasOutput(sessionID)
if (!hasValidOutput) {
if (sessionGoneFromStatus) {
const sessionExists = await this.verifySessionExists(sessionID)
if (!sessionExists) {
log("[background-agent] Session no longer exists (crashed), marking task as error:", task.id)
await this.failCrashedTask(task, "Subagent session no longer exists (process likely crashed). The session disappeared without producing any output.")
continue
}
}
log("[background-agent] Polling idle/gone but no valid output yet, waiting:", task.id)
continue
}

View File

@@ -11,7 +11,7 @@ function registerProcessSignal(
handler()
if (exitAfter) {
process.exitCode = 0
setTimeout(() => process.exit(), 6000)
setTimeout(() => process.exit(), 6000).unref()
}
}
process.on(signal, listener)
@@ -32,26 +32,16 @@ export function registerManagerForCleanup(manager: CleanupTarget): void {
if (cleanupRegistered) return
cleanupRegistered = true
let cleanupPromise: Promise<void> | undefined
const cleanupAll = () => {
if (cleanupPromise) return
const promises: Promise<void>[] = []
for (const m of cleanupManagers) {
try {
promises.push(
Promise.resolve(m.shutdown()).catch((error) => {
log("[background-agent] Error during async shutdown cleanup:", error)
})
)
void Promise.resolve(m.shutdown()).catch((error) => {
log("[background-agent] Error during async shutdown cleanup:", error)
})
} catch (error) {
log("[background-agent] Error during shutdown cleanup:", error)
}
}
cleanupPromise = Promise.allSettled(promises).then(() => {})
cleanupPromise.then(() => {
log("[background-agent] All shutdown cleanup completed")
})
}
const registerSignal = (signal: ProcessCleanupEvent, exitAfter: boolean): void => {

View File

@@ -8,7 +8,6 @@ describe("checkAndInterruptStaleTasks", () => {
const mockClient = {
session: {
abort: mock(() => Promise.resolve()),
get: mock(() => Promise.resolve({ data: { id: "ses-1" } })),
},
}
const mockConcurrencyManager = {
@@ -36,11 +35,6 @@ describe("checkAndInterruptStaleTasks", () => {
beforeEach(() => {
fixedTime = Date.now()
spyOn(globalThis.Date, "now").mockReturnValue(fixedTime)
mockClient.session.abort.mockClear()
mockClient.session.get.mockReset()
mockClient.session.get.mockResolvedValue({ data: { id: "ses-1" } })
mockConcurrencyManager.release.mockClear()
mockNotify.mockClear()
})
afterEach(() => {
@@ -294,165 +288,29 @@ describe("checkAndInterruptStaleTasks", () => {
expect(task.status).toBe("running")
})
it("should NOT cancel healthy task on first missing status poll", async () => {
//#given — one missing poll should not be enough to declare the session gone
it("should use default stale timeout when session status is unknown/missing", async () => {
//#given — lastUpdate exceeds stale timeout, session not in status map
const task = createRunningTask({
startedAt: new Date(Date.now() - 300_000),
progress: {
toolCalls: 1,
lastUpdate: new Date(Date.now() - 120_000),
lastUpdate: new Date(Date.now() - 200_000),
},
})
//#when
//#when — empty sessionStatuses (session not found)
await checkAndInterruptStaleTasks({
tasks: [task],
client: mockClient as never,
config: { staleTimeoutMs: 180_000, sessionGoneTimeoutMs: 60_000 },
config: { staleTimeoutMs: 180_000 },
concurrencyManager: mockConcurrencyManager as never,
notifyParentSession: mockNotify,
sessionStatuses: {},
})
//#then
expect(task.status).toBe("running")
expect(task.consecutiveMissedPolls).toBe(1)
expect(mockClient.session.get).not.toHaveBeenCalled()
})
it("should NOT cancel task when session.get confirms the session still exists", async () => {
//#given — repeated missing polls but direct lookup still succeeds
const task = createRunningTask({
startedAt: new Date(Date.now() - 300_000),
progress: {
toolCalls: 1,
lastUpdate: new Date(Date.now() - 120_000),
},
consecutiveMissedPolls: 2,
})
//#when
await checkAndInterruptStaleTasks({
tasks: [task],
client: mockClient as never,
config: { staleTimeoutMs: 180_000, sessionGoneTimeoutMs: 60_000 },
concurrencyManager: mockConcurrencyManager as never,
notifyParentSession: mockNotify,
sessionStatuses: {},
})
//#then
expect(task.status).toBe("running")
expect(task.consecutiveMissedPolls).toBe(0)
expect(mockClient.session.get).toHaveBeenCalledWith({ path: { id: "ses-1" } })
})
it("should use session-gone timeout when session is missing from status map (with progress)", async () => {
//#given — lastUpdate 2min ago, session completely gone from status
const task = createRunningTask({
startedAt: new Date(Date.now() - 300_000),
progress: {
toolCalls: 1,
lastUpdate: new Date(Date.now() - 120_000),
},
consecutiveMissedPolls: 2,
})
mockClient.session.get.mockRejectedValue(new Error("missing"))
//#when — empty sessionStatuses (session gone), sessionGoneTimeoutMs = 60s
await checkAndInterruptStaleTasks({
tasks: [task],
client: mockClient as never,
config: { staleTimeoutMs: 180_000, sessionGoneTimeoutMs: 60_000 },
concurrencyManager: mockConcurrencyManager as never,
notifyParentSession: mockNotify,
sessionStatuses: {},
})
//#then — cancelled because session gone timeout (60s) < timeSinceLastUpdate (120s)
//#then — unknown session treated as potentially stale, apply default timeout
expect(task.status).toBe("cancelled")
expect(task.error).toContain("session gone from status registry")
})
it("should use session-gone timeout when session is missing from status map (no progress)", async () => {
//#given — task started 2min ago, no progress, session completely gone
const task = createRunningTask({
startedAt: new Date(Date.now() - 120_000),
progress: undefined,
consecutiveMissedPolls: 2,
})
mockClient.session.get.mockRejectedValue(new Error("missing"))
//#when — session gone, sessionGoneTimeoutMs = 60s
await checkAndInterruptStaleTasks({
tasks: [task],
client: mockClient as never,
config: { messageStalenessTimeoutMs: 600_000, sessionGoneTimeoutMs: 60_000 },
concurrencyManager: mockConcurrencyManager as never,
notifyParentSession: mockNotify,
sessionStatuses: {},
})
//#then — cancelled because session gone timeout (60s) < runtime (120s)
expect(task.status).toBe("cancelled")
expect(task.error).toContain("session gone from status registry")
})
it("should NOT use session-gone timeout when session is idle (present in status map)", async () => {
//#given — lastUpdate 2min ago, session is idle (present in status but not active)
const task = createRunningTask({
startedAt: new Date(Date.now() - 300_000),
progress: {
toolCalls: 1,
lastUpdate: new Date(Date.now() - 120_000),
},
consecutiveMissedPolls: 2,
})
mockClient.session.get.mockRejectedValue(new Error("missing"))
//#when — session is idle (present in map), staleTimeoutMs = 180s
await checkAndInterruptStaleTasks({
tasks: [task],
client: mockClient as never,
config: { staleTimeoutMs: 180_000, sessionGoneTimeoutMs: 60_000 },
concurrencyManager: mockConcurrencyManager as never,
notifyParentSession: mockNotify,
sessionStatuses: { "ses-1": { type: "idle" } },
})
//#then — still running because normal staleTimeout (180s) > timeSinceLastUpdate (120s)
expect(task.status).toBe("running")
})
it("should use default session-gone timeout when not configured", async () => {
//#given — lastUpdate 2min ago, session gone, no sessionGoneTimeoutMs config
const task = createRunningTask({
startedAt: new Date(Date.now() - 300_000),
progress: {
toolCalls: 1,
lastUpdate: new Date(Date.now() - 120_000),
},
consecutiveMissedPolls: 2,
})
mockClient.session.get.mockRejectedValue(new Error("missing"))
//#when — no config (default sessionGoneTimeoutMs = 60_000)
await checkAndInterruptStaleTasks({
tasks: [task],
client: mockClient as never,
config: undefined,
concurrencyManager: mockConcurrencyManager as never,
notifyParentSession: mockNotify,
sessionStatuses: {},
})
//#then — cancelled because default session gone timeout (60s) < timeSinceLastUpdate (120s)
expect(task.status).toBe("cancelled")
expect(task.error).toContain("session gone from status registry")
expect(task.error).toContain("Stale timeout")
})
it("should NOT interrupt task when session is busy (OpenCode status), even if lastUpdate exceeds stale timeout", async () => {
@@ -668,132 +526,6 @@ describe("pruneStaleTasksAndNotifications", () => {
expect(pruned).toContain("old-task")
})
it("#given running task with recent progress #when startedAt exceeds TTL #then should NOT prune", () => {
//#given
const tasks = new Map<string, BackgroundTask>()
const activeTask: BackgroundTask = {
id: "active-task",
parentSessionID: "parent",
parentMessageID: "msg",
description: "active",
prompt: "active",
agent: "oracle",
status: "running",
startedAt: new Date(Date.now() - 45 * 60 * 1000),
progress: {
toolCalls: 10,
lastUpdate: new Date(Date.now() - 5 * 60 * 1000),
},
}
tasks.set("active-task", activeTask)
const pruned: string[] = []
const notifications = new Map<string, BackgroundTask[]>()
//#when
pruneStaleTasksAndNotifications({
tasks,
notifications,
onTaskPruned: (taskId) => pruned.push(taskId),
})
//#then
expect(pruned).toEqual([])
})
it("#given running task with stale progress #when lastUpdate exceeds TTL #then should prune", () => {
//#given
const tasks = new Map<string, BackgroundTask>()
const staleTask: BackgroundTask = {
id: "stale-task",
parentSessionID: "parent",
parentMessageID: "msg",
description: "stale",
prompt: "stale",
agent: "oracle",
status: "running",
startedAt: new Date(Date.now() - 60 * 60 * 1000),
progress: {
toolCalls: 10,
lastUpdate: new Date(Date.now() - 35 * 60 * 1000),
},
}
tasks.set("stale-task", staleTask)
const pruned: string[] = []
const notifications = new Map<string, BackgroundTask[]>()
//#when
pruneStaleTasksAndNotifications({
tasks,
notifications,
onTaskPruned: (taskId) => pruned.push(taskId),
})
//#then
expect(pruned).toContain("stale-task")
})
it("#given custom taskTtlMs #when task exceeds custom TTL #then should prune", () => {
//#given
const tasks = new Map<string, BackgroundTask>()
const task: BackgroundTask = {
id: "custom-ttl-task",
parentSessionID: "parent",
parentMessageID: "msg",
description: "custom",
prompt: "custom",
agent: "explore",
status: "running",
startedAt: new Date(Date.now() - 61 * 60 * 1000),
}
tasks.set("custom-ttl-task", task)
const pruned: string[] = []
const notifications = new Map<string, BackgroundTask[]>()
//#when
pruneStaleTasksAndNotifications({
tasks,
notifications,
taskTtlMs: 60 * 60 * 1000,
onTaskPruned: (taskId) => pruned.push(taskId),
})
//#then
expect(pruned).toContain("custom-ttl-task")
})
it("#given custom taskTtlMs #when task within custom TTL #then should NOT prune", () => {
//#given
const tasks = new Map<string, BackgroundTask>()
const task: BackgroundTask = {
id: "within-ttl-task",
parentSessionID: "parent",
parentMessageID: "msg",
description: "within",
prompt: "within",
agent: "explore",
status: "running",
startedAt: new Date(Date.now() - 45 * 60 * 1000),
}
tasks.set("within-ttl-task", task)
const pruned: string[] = []
const notifications = new Map<string, BackgroundTask[]>()
//#when
pruneStaleTasksAndNotifications({
tasks,
notifications,
taskTtlMs: 60 * 60 * 1000,
onTaskPruned: (taskId) => pruned.push(taskId),
})
//#then
expect(pruned).toEqual([])
})
it("should prune terminal tasks when completion time exceeds terminal TTL", () => {
//#given
const tasks = new Map<string, BackgroundTask>()

View File

@@ -7,7 +7,6 @@ import type { OpencodeClient } from "./opencode-client"
import {
DEFAULT_MESSAGE_STALENESS_TIMEOUT_MS,
DEFAULT_SESSION_GONE_TIMEOUT_MS,
DEFAULT_STALE_TIMEOUT_MS,
MIN_RUNTIME_BEFORE_STALE_MS,
TERMINAL_TASK_TTL_MS,
@@ -16,8 +15,6 @@ import {
import { removeTaskToastTracking } from "./remove-task-toast-tracking"
import { isActiveSessionStatus } from "./session-status-classifier"
const MIN_SESSION_GONE_POLLS = 3
const TERMINAL_TASK_STATUSES = new Set<BackgroundTask["status"]>([
"completed",
"error",
@@ -29,10 +26,8 @@ export function pruneStaleTasksAndNotifications(args: {
tasks: Map<string, BackgroundTask>
notifications: Map<string, BackgroundTask[]>
onTaskPruned: (taskId: string, task: BackgroundTask, errorMessage: string) => void
taskTtlMs?: number
}): void {
const { tasks, notifications, onTaskPruned } = args
const effectiveTtl = args.taskTtlMs ?? TASK_TTL_MS
const now = Date.now()
const tasksWithPendingNotifications = new Set<string>()
@@ -57,22 +52,18 @@ export function pruneStaleTasksAndNotifications(args: {
continue
}
const lastActivity = task.status === "running" && task.progress?.lastUpdate
? task.progress.lastUpdate.getTime()
: undefined
const timestamp = task.status === "pending"
? task.queuedAt?.getTime()
: (lastActivity ?? task.startedAt?.getTime())
: task.startedAt?.getTime()
if (!timestamp) continue
const age = now - timestamp
if (age <= effectiveTtl) continue
if (age <= TASK_TTL_MS) continue
const ttlMinutes = Math.round(effectiveTtl / 60000)
const errorMessage = task.status === "pending"
? `Task timed out while queued (${ttlMinutes} minutes)`
: `Task timed out after ${ttlMinutes} minutes of inactivity`
? "Task timed out while queued (30 minutes)"
: "Task timed out after 30 minutes"
onTaskPruned(taskId, task, errorMessage)
}
@@ -86,7 +77,7 @@ export function pruneStaleTasksAndNotifications(args: {
const validNotifications = queued.filter((task) => {
if (!task.startedAt) return false
const age = now - task.startedAt.getTime()
return age <= effectiveTtl
return age <= TASK_TTL_MS
})
if (validNotifications.length === 0) {
@@ -99,15 +90,6 @@ export function pruneStaleTasksAndNotifications(args: {
export type SessionStatusMap = Record<string, { type: string }>
async function verifySessionExists(client: OpencodeClient, sessionID: string): Promise<boolean> {
try {
const result = await client.session.get({ path: { id: sessionID } })
return !!result.data
} catch {
return false
}
}
export async function checkAndInterruptStaleTasks(args: {
tasks: Iterable<BackgroundTask>
client: OpencodeClient
@@ -127,7 +109,6 @@ export async function checkAndInterruptStaleTasks(args: {
onTaskInterrupted = (task) => removeTaskToastTracking(task.id),
} = args
const staleTimeoutMs = config?.staleTimeoutMs ?? DEFAULT_STALE_TIMEOUT_MS
const sessionGoneTimeoutMs = config?.sessionGoneTimeoutMs ?? DEFAULT_SESSION_GONE_TIMEOUT_MS
const now = Date.now()
const messageStalenessMs = config?.messageStalenessTimeoutMs ?? DEFAULT_MESSAGE_STALENESS_TIMEOUT_MS
@@ -141,32 +122,15 @@ export async function checkAndInterruptStaleTasks(args: {
const sessionStatus = sessionStatuses?.[sessionID]?.type
const sessionIsRunning = sessionStatus !== undefined && isActiveSessionStatus(sessionStatus)
const sessionMissing = sessionStatuses !== undefined && sessionStatus === undefined
const runtime = now - startedAt.getTime()
if (sessionMissing) {
task.consecutiveMissedPolls = (task.consecutiveMissedPolls ?? 0) + 1
} else if (sessionStatuses !== undefined) {
task.consecutiveMissedPolls = 0
}
const sessionGone = sessionMissing && (task.consecutiveMissedPolls ?? 0) >= MIN_SESSION_GONE_POLLS
if (!task.progress?.lastUpdate) {
if (sessionIsRunning) continue
if (sessionMissing && !sessionGone) continue
const effectiveTimeout = sessionGone ? sessionGoneTimeoutMs : messageStalenessMs
if (runtime <= effectiveTimeout) continue
if (sessionGone && await verifySessionExists(client, sessionID)) {
task.consecutiveMissedPolls = 0
continue
}
if (runtime <= messageStalenessMs) continue
const staleMinutes = Math.round(runtime / 60000)
const reason = sessionGone ? "session gone from status registry" : "no activity"
task.status = "cancelled"
task.error = `Stale timeout (${reason} for ${staleMinutes}min since start). This is a FINAL cancellation - do NOT create a replacement task. If the timeout is too short, increase 'background_task.${sessionGone ? "sessionGoneTimeoutMs" : "staleTimeoutMs"}' in .opencode/oh-my-opencode.json.`
task.error = `Stale timeout (no activity for ${staleMinutes}min since start). This is a FINAL cancellation - do NOT create a replacement task. If the timeout is too short, increase 'background_task.staleTimeoutMs' in .opencode/oh-my-opencode.json.`
task.completedAt = new Date()
if (task.concurrencyKey) {
@@ -192,20 +156,13 @@ export async function checkAndInterruptStaleTasks(args: {
if (runtime < MIN_RUNTIME_BEFORE_STALE_MS) continue
const timeSinceLastUpdate = now - task.progress.lastUpdate.getTime()
const effectiveStaleTimeout = sessionGone ? sessionGoneTimeoutMs : staleTimeoutMs
if (timeSinceLastUpdate <= effectiveStaleTimeout) continue
if (timeSinceLastUpdate <= staleTimeoutMs) continue
if (task.status !== "running") continue
if (sessionGone && await verifySessionExists(client, sessionID)) {
task.consecutiveMissedPolls = 0
continue
}
const staleMinutes = Math.round(timeSinceLastUpdate / 60000)
const reason = sessionGone ? "session gone from status registry" : "no activity"
task.status = "cancelled"
task.error = `Stale timeout (${reason} for ${staleMinutes}min). This is a FINAL cancellation - do NOT create a replacement task. If the timeout is too short, increase 'background_task.${sessionGone ? "sessionGoneTimeoutMs" : "staleTimeoutMs"}' in .opencode/oh-my-opencode.json.`
task.completedAt = new Date()
const staleMinutes = Math.round(timeSinceLastUpdate / 60000)
task.status = "cancelled"
task.error = `Stale timeout (no activity for ${staleMinutes}min). This is a FINAL cancellation - do NOT create a replacement task. If the timeout is too short, increase 'background_task.staleTimeoutMs' in .opencode/oh-my-opencode.json.`
task.completedAt = new Date()
if (task.concurrencyKey) {
concurrencyManager.release(task.concurrencyKey)

View File

@@ -66,8 +66,6 @@ export interface BackgroundTask {
lastMsgCount?: number
/** Number of consecutive polls with stable message count */
stablePolls?: number
/** Number of consecutive polls where session was missing from status map */
consecutiveMissedPolls?: number
}
export interface LaunchInput {

View File

@@ -2,4 +2,3 @@ export * from "./types"
export * from "./constants"
export * from "./storage"
export * from "./top-level-task"
export * from "./worktree-sync"

View File

@@ -1,88 +0,0 @@
import { describe, expect, test, beforeEach, afterEach } from "bun:test"
import { existsSync, mkdirSync, rmSync, writeFileSync, readFileSync } from "node:fs"
import { join } from "node:path"
import { tmpdir } from "node:os"
import { syncSisyphusStateFromWorktree } from "./worktree-sync"
describe("syncSisyphusStateFromWorktree", () => {
const BASE = join(tmpdir(), "worktree-sync-test-" + Date.now())
const WORKTREE = join(BASE, "worktree")
const MAIN_REPO = join(BASE, "main")
beforeEach(() => {
mkdirSync(WORKTREE, { recursive: true })
mkdirSync(MAIN_REPO, { recursive: true })
})
afterEach(() => {
if (existsSync(BASE)) {
rmSync(BASE, { recursive: true, force: true })
}
})
test("#given no .sisyphus in worktree #when syncing #then returns true without error", () => {
const result = syncSisyphusStateFromWorktree(WORKTREE, MAIN_REPO)
expect(result).toBe(true)
expect(existsSync(join(MAIN_REPO, ".sisyphus"))).toBe(false)
})
test("#given .sisyphus with boulder.json in worktree #when syncing #then copies to main repo", () => {
const worktreeSisyphus = join(WORKTREE, ".sisyphus")
mkdirSync(worktreeSisyphus, { recursive: true })
writeFileSync(join(worktreeSisyphus, "boulder.json"), '{"active_plan":"/plan.md","plan_name":"test"}')
const result = syncSisyphusStateFromWorktree(WORKTREE, MAIN_REPO)
expect(result).toBe(true)
const copied = readFileSync(join(MAIN_REPO, ".sisyphus", "boulder.json"), "utf-8")
expect(JSON.parse(copied).plan_name).toBe("test")
})
test("#given nested .sisyphus dirs in worktree #when syncing #then copies full tree recursively", () => {
const worktreePlans = join(WORKTREE, ".sisyphus", "plans")
const worktreeNotepads = join(WORKTREE, ".sisyphus", "notepads", "my-plan")
mkdirSync(worktreePlans, { recursive: true })
mkdirSync(worktreeNotepads, { recursive: true })
writeFileSync(join(worktreePlans, "my-plan.md"), "- [x] Task 1\n- [ ] Task 2")
writeFileSync(join(worktreeNotepads, "learnings.md"), "learned something")
const result = syncSisyphusStateFromWorktree(WORKTREE, MAIN_REPO)
expect(result).toBe(true)
expect(readFileSync(join(MAIN_REPO, ".sisyphus", "plans", "my-plan.md"), "utf-8")).toContain("Task 1")
expect(readFileSync(join(MAIN_REPO, ".sisyphus", "notepads", "my-plan", "learnings.md"), "utf-8")).toBe("learned something")
})
test("#given existing .sisyphus in main repo #when syncing #then worktree state overwrites stale state", () => {
const mainSisyphus = join(MAIN_REPO, ".sisyphus")
mkdirSync(mainSisyphus, { recursive: true })
writeFileSync(join(mainSisyphus, "boulder.json"), '{"plan_name":"old"}')
const worktreeSisyphus = join(WORKTREE, ".sisyphus")
mkdirSync(worktreeSisyphus, { recursive: true })
writeFileSync(join(worktreeSisyphus, "boulder.json"), '{"plan_name":"updated"}')
const result = syncSisyphusStateFromWorktree(WORKTREE, MAIN_REPO)
expect(result).toBe(true)
const content = readFileSync(join(mainSisyphus, "boulder.json"), "utf-8")
expect(JSON.parse(content).plan_name).toBe("updated")
})
test("#given pre-existing files in main .sisyphus #when syncing #then preserves files not in worktree", () => {
const mainSisyphus = join(MAIN_REPO, ".sisyphus", "rules")
mkdirSync(mainSisyphus, { recursive: true })
writeFileSync(join(mainSisyphus, "my-rule.md"), "existing rule")
const worktreeSisyphus = join(WORKTREE, ".sisyphus")
mkdirSync(worktreeSisyphus, { recursive: true })
writeFileSync(join(worktreeSisyphus, "boulder.json"), '{"plan_name":"new"}')
const result = syncSisyphusStateFromWorktree(WORKTREE, MAIN_REPO)
expect(result).toBe(true)
expect(readFileSync(join(MAIN_REPO, ".sisyphus", "rules", "my-rule.md"), "utf-8")).toBe("existing rule")
expect(existsSync(join(MAIN_REPO, ".sisyphus", "boulder.json"))).toBe(true)
})
})

View File

@@ -1,34 +0,0 @@
import { existsSync, cpSync, mkdirSync } from "node:fs"
import { join } from "node:path"
import { BOULDER_DIR } from "./constants"
import { log } from "../../shared/logger"
export function syncSisyphusStateFromWorktree(worktreePath: string, mainRepoPath: string): boolean {
const srcDir = join(worktreePath, BOULDER_DIR)
const destDir = join(mainRepoPath, BOULDER_DIR)
if (!existsSync(srcDir)) {
log("[worktree-sync] No .sisyphus directory in worktree, nothing to sync", { worktreePath })
return true
}
try {
if (!existsSync(destDir)) {
mkdirSync(destDir, { recursive: true })
}
cpSync(srcDir, destDir, { recursive: true, force: true })
log("[worktree-sync] Synced .sisyphus state from worktree to main repo", {
worktreePath,
mainRepoPath,
})
return true
} catch (err) {
log("[worktree-sync] Failed to sync .sisyphus state", {
worktreePath,
mainRepoPath,
error: String(err),
})
return false
}
}

View File

@@ -115,14 +115,9 @@ Register these as task/todo items so progress is tracked and visible throughout
When working in a worktree (\`worktree_path\` is set in boulder.json) and ALL plan tasks are complete:
1. Commit all remaining changes in the worktree
2. **Sync .sisyphus state back**: Copy \`.sisyphus/\` from the worktree to the main repo before removal.
This is CRITICAL when \`.sisyphus/\` is gitignored — state written during worktree execution would otherwise be lost.
\`\`\`bash
cp -r <worktree-path>/.sisyphus/* <main-repo>/.sisyphus/ 2>/dev/null || true
\`\`\`
3. Switch to the main working directory (the original repo, NOT the worktree)
4. Merge the worktree branch into the current branch: \`git merge <worktree-branch>\`
5. If merge succeeds, clean up: \`git worktree remove <worktree-path>\`
6. Remove the boulder.json state
2. Switch to the main working directory (the original repo, NOT the worktree)
3. Merge the worktree branch into the current branch: \`git merge <worktree-branch>\`
4. If merge succeeds, clean up: \`git worktree remove <worktree-path>\`
5. Remove the boulder.json state
This is the DEFAULT behavior when \`--worktree\` was used. Skip merge only if the user explicitly instructs otherwise (e.g., asks to create a PR instead).`

View File

@@ -1,125 +0,0 @@
import { execFileSync } from "node:child_process"
import { afterEach, beforeEach, describe, expect, it } from "bun:test"
import { mkdirSync, rmSync, writeFileSync } from "node:fs"
import { tmpdir } from "node:os"
import { join } from "node:path"
import { loadOpencodeGlobalCommands, loadOpencodeProjectCommands } from "./loader"
const TEST_DIR = join(tmpdir(), `claude-code-command-loader-${Date.now()}`)
function writeCommand(directory: string, name: string, description: string): void {
mkdirSync(directory, { recursive: true })
writeFileSync(
join(directory, `${name}.md`),
`---\ndescription: ${description}\n---\nRun ${name}.\n`,
)
}
describe("claude-code command loader", () => {
let originalOpencodeConfigDir: string | undefined
beforeEach(() => {
mkdirSync(TEST_DIR, { recursive: true })
originalOpencodeConfigDir = process.env.OPENCODE_CONFIG_DIR
})
afterEach(() => {
if (originalOpencodeConfigDir === undefined) {
delete process.env.OPENCODE_CONFIG_DIR
} else {
process.env.OPENCODE_CONFIG_DIR = originalOpencodeConfigDir
}
rmSync(TEST_DIR, { recursive: true, force: true })
})
it("#given a parent .opencode/commands directory #when loadOpencodeProjectCommands is called from child directory #then it loads the ancestor command", async () => {
// given
const projectDir = join(TEST_DIR, "project")
const childDir = join(projectDir, "apps", "desktop")
writeCommand(join(projectDir, ".opencode", "commands"), "ancestor", "Ancestor command")
// when
const commands = await loadOpencodeProjectCommands(childDir)
// then
expect(commands.ancestor?.description).toBe("(opencode-project) Ancestor command")
})
it("#given a .opencode/command directory #when loadOpencodeProjectCommands is called #then it loads the singular alias directory", async () => {
// given
writeCommand(join(TEST_DIR, ".opencode", "command"), "singular", "Singular command")
// when
const commands = await loadOpencodeProjectCommands(TEST_DIR)
// then
expect(commands.singular?.description).toBe("(opencode-project) Singular command")
})
it("#given duplicate project command names across ancestors #when loadOpencodeProjectCommands is called #then the nearest directory wins", async () => {
// given
const projectRoot = join(TEST_DIR, "project")
const childDir = join(projectRoot, "apps", "desktop")
const ancestorDir = join(TEST_DIR, ".opencode", "commands")
const projectDir = join(projectRoot, ".opencode", "commands")
writeCommand(ancestorDir, "duplicate", "Ancestor command")
writeCommand(projectDir, "duplicate", "Nearest command")
// when
const commands = await loadOpencodeProjectCommands(childDir)
// then
expect(commands.duplicate?.description).toBe("(opencode-project) Nearest command")
})
it("#given a global .opencode/commands directory #when loadOpencodeGlobalCommands is called #then it loads the plural alias directory", async () => {
// given
const opencodeConfigDir = join(TEST_DIR, "opencode-config")
process.env.OPENCODE_CONFIG_DIR = opencodeConfigDir
writeCommand(join(opencodeConfigDir, "commands"), "global-plural", "Global plural command")
// when
const commands = await loadOpencodeGlobalCommands()
// then
expect(commands["global-plural"]?.description).toBe("(opencode) Global plural command")
})
it("#given duplicate global command names across profile and parent dirs #when loadOpencodeGlobalCommands is called #then the profile dir wins", async () => {
// given
const opencodeRootDir = join(TEST_DIR, "opencode-root")
const profileConfigDir = join(opencodeRootDir, "profiles", "codex")
process.env.OPENCODE_CONFIG_DIR = profileConfigDir
writeCommand(join(opencodeRootDir, "commands"), "duplicate-global", "Parent global command")
writeCommand(join(profileConfigDir, "commands"), "duplicate-global", "Profile global command")
// when
const commands = await loadOpencodeGlobalCommands()
// then
expect(commands["duplicate-global"]?.description).toBe("(opencode) Profile global command")
})
it("#given nested project opencode commands in a worktree #when loadOpencodeProjectCommands is called #then it preserves slash names and stops at the worktree root", async () => {
// given
const repositoryDir = join(TEST_DIR, "repo")
const nestedDirectory = join(repositoryDir, "packages", "app", "src")
mkdirSync(nestedDirectory, { recursive: true })
execFileSync("git", ["init"], {
cwd: repositoryDir,
stdio: ["ignore", "ignore", "ignore"],
})
writeCommand(join(repositoryDir, ".opencode", "commands", "deploy"), "staging", "Deploy staging")
writeCommand(join(repositoryDir, ".opencode", "command"), "release", "Release command")
writeCommand(join(TEST_DIR, ".opencode", "commands"), "outside", "Outside command")
// when
const commands = await loadOpencodeProjectCommands(nestedDirectory)
// then
expect(commands["deploy/staging"]?.description).toBe("(opencode-project) Deploy staging")
expect(commands.release?.description).toBe("(opencode-project) Release command")
expect(commands.outside).toBeUndefined()
expect(commands["deploy:staging"]).toBeUndefined()
})
})

View File

@@ -3,11 +3,7 @@ import { join, basename } from "path"
import { parseFrontmatter } from "../../shared/frontmatter"
import { sanitizeModelField } from "../../shared/model-sanitizer"
import { isMarkdownFile } from "../../shared/file-utils"
import {
findProjectOpencodeCommandDirs,
getClaudeConfigDir,
getOpenCodeCommandDirs,
} from "../../shared"
import { getClaudeConfigDir, getOpenCodeConfigDir } from "../../shared"
import { log } from "../../shared/logger"
import type { CommandScope, CommandDefinition, CommandFrontmatter, LoadedCommand } from "./types"
@@ -50,7 +46,7 @@ async function loadCommandsFromDir(
if (entry.isDirectory()) {
if (entry.name.startsWith(".")) continue
const subDirPath = join(commandsDir, entry.name)
const subPrefix = prefix ? `${prefix}/${entry.name}` : entry.name
const subPrefix = prefix ? `${prefix}:${entry.name}` : entry.name
const subCommands = await loadCommandsFromDir(subDirPath, scope, visited, subPrefix)
commands.push(...subCommands)
continue
@@ -60,7 +56,7 @@ async function loadCommandsFromDir(
const commandPath = join(commandsDir, entry.name)
const baseCommandName = basename(entry.name, ".md")
const commandName = prefix ? `${prefix}/${baseCommandName}` : baseCommandName
const commandName = prefix ? `${prefix}:${baseCommandName}` : baseCommandName
try {
const content = await fs.readFile(commandPath, "utf-8")
@@ -103,25 +99,9 @@ $ARGUMENTS
return commands
}
function deduplicateLoadedCommandsByName(commands: LoadedCommand[]): LoadedCommand[] {
const seen = new Set<string>()
const deduplicatedCommands: LoadedCommand[] = []
for (const command of commands) {
if (seen.has(command.name)) {
continue
}
seen.add(command.name)
deduplicatedCommands.push(command)
}
return deduplicatedCommands
}
function commandsToRecord(commands: LoadedCommand[]): Record<string, CommandDefinition> {
const result: Record<string, CommandDefinition> = {}
for (const cmd of deduplicateLoadedCommandsByName(commands)) {
for (const cmd of commands) {
const { name: _name, argumentHint: _argumentHint, ...openCodeCompatible } = cmd.definition
result[cmd.name] = openCodeCompatible as CommandDefinition
}
@@ -141,21 +121,16 @@ export async function loadProjectCommands(directory?: string): Promise<Record<st
}
export async function loadOpencodeGlobalCommands(): Promise<Record<string, CommandDefinition>> {
const opencodeCommandDirs = getOpenCodeCommandDirs({ binary: "opencode" })
const allCommands = await Promise.all(
opencodeCommandDirs.map((commandsDir) => loadCommandsFromDir(commandsDir, "opencode")),
)
return commandsToRecord(allCommands.flat())
const configDir = getOpenCodeConfigDir({ binary: "opencode" })
const opencodeCommandsDir = join(configDir, "command")
const commands = await loadCommandsFromDir(opencodeCommandsDir, "opencode")
return commandsToRecord(commands)
}
export async function loadOpencodeProjectCommands(directory?: string): Promise<Record<string, CommandDefinition>> {
const opencodeProjectDirs = findProjectOpencodeCommandDirs(directory ?? process.cwd())
const allCommands = await Promise.all(
opencodeProjectDirs.map((commandsDir) =>
loadCommandsFromDir(commandsDir, "opencode-project"),
),
)
return commandsToRecord(allCommands.flat())
const opencodeProjectDir = join(directory ?? process.cwd(), ".opencode", "command")
const commands = await loadCommandsFromDir(opencodeProjectDir, "opencode-project")
return commandsToRecord(commands)
}
export async function loadAllCommands(directory?: string): Promise<Record<string, CommandDefinition>> {

View File

@@ -10,7 +10,6 @@ import type {
} from "./types"
import { transformMcpServer } from "./transformer"
import { log } from "../../shared/logger"
import { shouldLoadMcpServer } from "./scope-filter"
interface McpConfigPath {
path: string
@@ -76,7 +75,6 @@ export async function loadMcpConfigs(
const loadedServers: LoadedMcpServer[] = []
const paths = getMcpConfigPaths()
const disabledSet = new Set(disabledMcps)
const cwd = process.cwd()
for (const { path, scope } of paths) {
const config = await loadMcpConfigFile(path)
@@ -88,15 +86,6 @@ export async function loadMcpConfigs(
continue
}
if (!shouldLoadMcpServer(serverConfig, cwd)) {
log(`Skipping MCP server "${name}" because local scope does not match cwd`, {
path,
projectPath: serverConfig.projectPath,
cwd,
})
continue
}
if (serverConfig.disabled) {
log(`Disabling MCP server "${name}"`, { path })
delete servers[name]

View File

@@ -1,28 +0,0 @@
import { existsSync, realpathSync } from "fs"
import { resolve } from "path"
import type { ClaudeCodeMcpServer } from "./types"
function normalizePath(path: string): string {
const resolvedPath = resolve(path)
if (!existsSync(resolvedPath)) {
return resolvedPath
}
return realpathSync(resolvedPath)
}
export function shouldLoadMcpServer(
server: Pick<ClaudeCodeMcpServer, "scope" | "projectPath">,
cwd = process.cwd()
): boolean {
if (server.scope !== "local") {
return true
}
if (!server.projectPath) {
return false
}
return normalizePath(server.projectPath) === normalizePath(cwd)
}

View File

@@ -1,82 +0,0 @@
import { afterEach, beforeEach, describe, expect, it, mock } from "bun:test"
import { mkdirSync, rmSync, writeFileSync } from "fs"
import { tmpdir } from "os"
import { join } from "path"
const TEST_DIR = join(tmpdir(), `mcp-scope-filtering-test-${Date.now()}`)
const TEST_HOME = join(TEST_DIR, "home")
describe("loadMcpConfigs", () => {
beforeEach(() => {
mkdirSync(TEST_DIR, { recursive: true })
mkdirSync(TEST_HOME, { recursive: true })
mock.module("os", () => ({
homedir: () => TEST_HOME,
tmpdir,
}))
mock.module("../../shared", () => ({
getClaudeConfigDir: () => join(TEST_HOME, ".claude"),
}))
mock.module("../../shared/logger", () => ({
log: () => {},
}))
})
afterEach(() => {
mock.restore()
rmSync(TEST_DIR, { recursive: true, force: true })
})
describe("#given user-scoped MCP entries with local scope metadata", () => {
it("#when loading configs #then only servers matching the current project path are loaded", async () => {
writeFileSync(
join(TEST_HOME, ".claude.json"),
JSON.stringify({
mcpServers: {
globalServer: {
command: "npx",
args: ["global-server"],
},
matchingLocal: {
command: "npx",
args: ["matching-local"],
scope: "local",
projectPath: TEST_DIR,
},
nonMatchingLocal: {
command: "npx",
args: ["non-matching-local"],
scope: "local",
projectPath: join(TEST_DIR, "other-project"),
},
missingProjectPath: {
command: "npx",
args: ["missing-project-path"],
scope: "local",
},
},
})
)
const originalCwd = process.cwd()
process.chdir(TEST_DIR)
try {
const { loadMcpConfigs } = await import("./loader")
const result = await loadMcpConfigs()
expect(result.servers).toHaveProperty("globalServer")
expect(result.servers).toHaveProperty("matchingLocal")
expect(result.servers).not.toHaveProperty("nonMatchingLocal")
expect(result.servers).not.toHaveProperty("missingProjectPath")
expect(result.loadedServers.map((server) => server.name)).toEqual([
"globalServer",
"matchingLocal",
])
} finally {
process.chdir(originalCwd)
}
})
})
})

View File

@@ -1,29 +0,0 @@
import { describe, expect, it } from "bun:test"
import { transformMcpServer } from "./transformer"
describe("transformMcpServer", () => {
describe("#given a remote MCP server with oauth config", () => {
it("#when transforming the server #then preserves oauth on the remote config", () => {
const transformed = transformMcpServer("remote-oauth", {
type: "http",
url: "https://mcp.example.com",
headers: { Authorization: "Bearer test" },
oauth: {
clientId: "client-id",
scopes: ["read", "write"],
},
})
expect(transformed).toEqual({
type: "remote",
url: "https://mcp.example.com",
headers: { Authorization: "Bearer test" },
oauth: {
clientId: "client-id",
scopes: ["read", "write"],
},
enabled: true,
})
})
})
})

View File

@@ -30,10 +30,6 @@ export function transformMcpServer(
config.headers = expanded.headers
}
if (expanded.oauth && Object.keys(expanded.oauth).length > 0) {
config.oauth = expanded.oauth
}
return config
}

View File

@@ -1,10 +1,5 @@
export type McpScope = "user" | "project" | "local"
export interface McpOAuthConfig {
clientId?: string
scopes?: string[]
}
export interface ClaudeCodeMcpServer {
type?: "http" | "sse" | "stdio"
url?: string
@@ -12,9 +7,10 @@ export interface ClaudeCodeMcpServer {
args?: string[]
env?: Record<string, string>
headers?: Record<string, string>
oauth?: McpOAuthConfig
scope?: McpScope
projectPath?: string
oauth?: {
clientId?: string
scopes?: string[]
}
disabled?: boolean
}
@@ -33,7 +29,6 @@ export interface McpRemoteConfig {
type: "remote"
url: string
headers?: Record<string, string>
oauth?: McpOAuthConfig
enabled?: boolean
}

View File

@@ -101,39 +101,4 @@ describe("discoverInstalledPlugins", () => {
expect(discovered.plugins).toHaveLength(1)
expect(discovered.plugins[0]?.name).toBe("oh-my-opencode")
})
it("derives canonical package name from npm plugin keys", () => {
//#given
const pluginsHome = process.env.CLAUDE_PLUGINS_HOME as string
const installPath = join(createTemporaryDirectory("omo-plugin-install-"), "oh-my-openagent")
mkdirSync(installPath, { recursive: true })
const databasePath = join(pluginsHome, "installed_plugins.json")
writeFileSync(
databasePath,
JSON.stringify({
version: 2,
plugins: {
"oh-my-openagent@3.13.1": [
{
scope: "user",
installPath,
version: "3.13.1",
installedAt: "2026-03-26T00:00:00Z",
lastUpdated: "2026-03-26T00:00:00Z",
},
],
},
}),
"utf-8",
)
//#when
const discovered = discoverInstalledPlugins()
//#then
expect(discovered.errors).toHaveLength(0)
expect(discovered.plugins).toHaveLength(1)
expect(discovered.plugins[0]?.name).toBe("oh-my-openagent")
})
})

View File

@@ -1,76 +0,0 @@
import { afterEach, beforeEach, describe, expect, it, mock } from "bun:test"
import { mkdirSync, rmSync, writeFileSync } from "fs"
import { tmpdir } from "os"
import { join } from "path"
import type { LoadedPlugin } from "./types"
const TEST_DIR = join(tmpdir(), `plugin-mcp-loader-test-${Date.now()}`)
const PROJECT_DIR = join(TEST_DIR, "project")
const PLUGIN_DIR = join(TEST_DIR, "plugin")
const MCP_CONFIG_PATH = join(PLUGIN_DIR, "mcp.json")
describe("loadPluginMcpServers", () => {
beforeEach(() => {
mkdirSync(PROJECT_DIR, { recursive: true })
mkdirSync(PLUGIN_DIR, { recursive: true })
mock.module("../../shared/logger", () => ({
log: () => {},
}))
})
afterEach(() => {
mock.restore()
rmSync(TEST_DIR, { recursive: true, force: true })
})
describe("#given plugin MCP entries with local scope metadata", () => {
it("#when loading plugin MCP servers #then only entries matching the current cwd are included", async () => {
writeFileSync(
MCP_CONFIG_PATH,
JSON.stringify({
mcpServers: {
globalServer: {
command: "npx",
args: ["global-plugin-server"],
},
matchingLocal: {
command: "npx",
args: ["matching-plugin-local"],
scope: "local",
projectPath: PROJECT_DIR,
},
nonMatchingLocal: {
command: "npx",
args: ["non-matching-plugin-local"],
scope: "local",
projectPath: join(PROJECT_DIR, "other-project"),
},
},
})
)
const plugin: LoadedPlugin = {
name: "demo-plugin",
version: "1.0.0",
scope: "project",
installPath: PLUGIN_DIR,
pluginKey: "demo-plugin@test",
mcpPath: MCP_CONFIG_PATH,
}
const originalCwd = process.cwd()
process.chdir(PROJECT_DIR)
try {
const { loadPluginMcpServers } = await import("./mcp-server-loader")
const servers = await loadPluginMcpServers([plugin])
expect(servers).toHaveProperty("demo-plugin:globalServer")
expect(servers).toHaveProperty("demo-plugin:matchingLocal")
expect(servers).not.toHaveProperty("demo-plugin:nonMatchingLocal")
} finally {
process.chdir(originalCwd)
}
})
})
})

View File

@@ -1,7 +1,6 @@
import { existsSync } from "fs"
import type { McpServerConfig } from "../claude-code-mcp-loader/types"
import { expandEnvVarsInObject } from "../claude-code-mcp-loader/env-expander"
import { shouldLoadMcpServer } from "../claude-code-mcp-loader/scope-filter"
import { transformMcpServer } from "../claude-code-mcp-loader/transformer"
import type { ClaudeCodeMcpConfig } from "../claude-code-mcp-loader/types"
import { log } from "../../shared/logger"
@@ -12,7 +11,6 @@ export async function loadPluginMcpServers(
plugins: LoadedPlugin[],
): Promise<Record<string, McpServerConfig>> {
const servers: Record<string, McpServerConfig> = {}
const cwd = process.cwd()
for (const plugin of plugins) {
if (!plugin.mcpPath || !existsSync(plugin.mcpPath)) continue
@@ -27,15 +25,6 @@ export async function loadPluginMcpServers(
if (!config.mcpServers) continue
for (const [name, serverConfig] of Object.entries(config.mcpServers)) {
if (!shouldLoadMcpServer(serverConfig, cwd)) {
log(`Skipping local plugin MCP server "${name}" outside current cwd`, {
path: plugin.mcpPath,
projectPath: serverConfig.projectPath,
cwd,
})
continue
}
if (serverConfig.disabled) {
log(`Skipping disabled MCP server "${name}" from plugin ${plugin.name}`)
continue

View File

@@ -11,23 +11,12 @@ export function getMainSessionID(): string | undefined {
return _mainSessionID
}
const registeredAgentNames = new Set<string>()
export function registerAgentName(name: string): void {
registeredAgentNames.add(name.toLowerCase())
}
export function isAgentRegistered(name: string): boolean {
return registeredAgentNames.has(name.toLowerCase())
}
/** @internal For testing only */
export function _resetForTesting(): void {
_mainSessionID = undefined
subagentSessions.clear()
syncSubagentSessions.clear()
sessionAgentMap.clear()
registeredAgentNames.clear()
}
const sessionAgentMap = new Map<string, string>()

View File

@@ -1,7 +1,7 @@
import { afterEach, beforeEach, describe, expect, it } from "bun:test"
import { mkdirSync, rmSync, writeFileSync } from "fs"
import { join } from "path"
import { homedir, tmpdir } from "os"
import { tmpdir } from "os"
import { SkillsConfigSchema } from "../../config/schema/skills"
import { discoverConfigSourceSkills, normalizePathForGlob } from "./config-source-discovery"
@@ -69,28 +69,6 @@ describe("config source discovery", () => {
expect(names).not.toContain("skip/skipped-skill")
})
it("loads skills from ~/ sources path", async () => {
// given
const homeSkillsDir = join(homedir(), `.omo-config-source-${Date.now()}`)
writeSkill(join(homeSkillsDir, "tilde-skill"), "tilde-skill", "Loaded from tilde path")
const config = SkillsConfigSchema.parse({
sources: [{ path: `~/${homeSkillsDir.split(homedir())[1]?.replace(/^\//, "")}`, recursive: true }],
})
try {
// when
const skills = await discoverConfigSourceSkills({
config,
configDir: join(TEST_DIR, "config"),
})
// then
expect(skills.some((skill) => skill.name === "tilde-skill")).toBe(true)
} finally {
rmSync(homeSkillsDir, { recursive: true, force: true })
}
})
it("normalizes windows separators before glob matching", () => {
// given
const windowsPath = "keep\\nested\\SKILL.md"

View File

@@ -1,5 +1,4 @@
import { promises as fs } from "fs"
import { homedir } from "os"
import { dirname, extname, isAbsolute, join, relative } from "path"
import picomatch from "picomatch"
import type { SkillsConfig } from "../../config/schema"
@@ -16,14 +15,6 @@ function isHttpUrl(path: string): boolean {
}
function toAbsolutePath(path: string, configDir: string): string {
if (path === "~") {
return homedir()
}
if (path.startsWith("~/")) {
return join(homedir(), path.slice(2))
}
if (isAbsolute(path)) {
return path
}

View File

@@ -3,13 +3,9 @@ import { parseFrontmatter } from "../../shared/frontmatter"
import type { LoadedSkill } from "./types"
export function extractSkillTemplate(skill: LoadedSkill): string {
if (skill.scope === "config" && skill.definition.template) {
return skill.definition.template
}
if (skill.path) {
const content = readFileSync(skill.path, "utf-8")
const { body } = parseFrontmatter(content)
if (skill.path) {
const content = readFileSync(skill.path, "utf-8")
const { body } = parseFrontmatter(content)
return body.trim()
}
return skill.definition.template || ""

View File

@@ -615,92 +615,5 @@ Skill body.
expect(skill).toBeDefined()
expect(skill?.scope).toBe("project")
})
it("#given a skill in ancestor .agents/skills/ #when discoverProjectAgentsSkills is called from child directory #then it discovers the ancestor skill", async () => {
// given
const skillContent = `---
name: ancestor-agent-skill
description: A skill from ancestor .agents/skills directory
---
Skill body.
`
const projectDir = join(TEST_DIR, "project")
const childDir = join(projectDir, "apps", "worker")
const agentsProjectSkillsDir = join(projectDir, ".agents", "skills")
const skillDir = join(agentsProjectSkillsDir, "ancestor-agent-skill")
mkdirSync(childDir, { recursive: true })
mkdirSync(skillDir, { recursive: true })
writeFileSync(join(skillDir, "SKILL.md"), skillContent)
// when
const { discoverProjectAgentsSkills } = await import("./loader")
const skills = await discoverProjectAgentsSkills(childDir)
const skill = skills.find((candidate) => candidate.name === "ancestor-agent-skill")
// then
expect(skill).toBeDefined()
expect(skill?.scope).toBe("project")
})
})
describe("opencode project skill discovery", () => {
it("#given a skill in ancestor .opencode/skills/ #when discoverOpencodeProjectSkills is called from child directory #then it discovers the ancestor skill", async () => {
// given
const skillContent = `---
name: ancestor-opencode-skill
description: A skill from ancestor .opencode/skills directory
---
Skill body.
`
const projectDir = join(TEST_DIR, "project")
const childDir = join(projectDir, "packages", "cli")
const skillsDir = join(projectDir, ".opencode", "skills", "ancestor-opencode-skill")
mkdirSync(childDir, { recursive: true })
mkdirSync(skillsDir, { recursive: true })
writeFileSync(join(skillsDir, "SKILL.md"), skillContent)
// when
const { discoverOpencodeProjectSkills } = await import("./loader")
const skills = await discoverOpencodeProjectSkills(childDir)
const skill = skills.find((candidate) => candidate.name === "ancestor-opencode-skill")
// then
expect(skill).toBeDefined()
expect(skill?.scope).toBe("opencode-project")
})
it("#given a skill in .opencode/skill/ #when discoverOpencodeProjectSkills is called #then it discovers the singular alias directory", async () => {
// given
const skillContent = `---
name: singular-opencode-skill
description: A skill from .opencode/skill directory
---
Skill body.
`
const singularSkillDir = join(
TEST_DIR,
".opencode",
"skill",
"singular-opencode-skill",
)
mkdirSync(singularSkillDir, { recursive: true })
writeFileSync(join(singularSkillDir, "SKILL.md"), skillContent)
// when
const { discoverOpencodeProjectSkills } = await import("./loader")
const originalCwd = process.cwd()
process.chdir(TEST_DIR)
try {
const skills = await discoverOpencodeProjectSkills()
const skill = skills.find((candidate) => candidate.name === "singular-opencode-skill")
// then
expect(skill).toBeDefined()
expect(skill?.scope).toBe("opencode-project")
} finally {
process.chdir(originalCwd)
}
})
})
})

View File

@@ -3,11 +3,6 @@ import { homedir } from "os"
import { getClaudeConfigDir } from "../../shared/claude-config-dir"
import { getOpenCodeConfigDir } from "../../shared/opencode-config-dir"
import { getOpenCodeSkillDirs } from "../../shared/opencode-command-dirs"
import {
findProjectAgentsSkillDirs,
findProjectClaudeSkillDirs,
findProjectOpencodeSkillDirs,
} from "../../shared/project-discovery-dirs"
import type { CommandDefinition } from "../claude-code-command-loader/types"
import type { LoadedSkill } from "./types"
import { skillsToCommandDefinitionRecord } from "./skill-definition-record"
@@ -21,11 +16,9 @@ export async function loadUserSkills(): Promise<Record<string, CommandDefinition
}
export async function loadProjectSkills(directory?: string): Promise<Record<string, CommandDefinition>> {
const projectSkillDirs = findProjectClaudeSkillDirs(directory ?? process.cwd())
const allSkills = await Promise.all(
projectSkillDirs.map((skillsDir) => loadSkillsFromDir({ skillsDir, scope: "project" })),
)
return skillsToCommandDefinitionRecord(deduplicateSkillsByName(allSkills.flat()))
const projectSkillsDir = join(directory ?? process.cwd(), ".claude", "skills")
const skills = await loadSkillsFromDir({ skillsDir: projectSkillsDir, scope: "project" })
return skillsToCommandDefinitionRecord(skills)
}
export async function loadOpencodeGlobalSkills(): Promise<Record<string, CommandDefinition>> {
@@ -37,28 +30,8 @@ export async function loadOpencodeGlobalSkills(): Promise<Record<string, Command
}
export async function loadOpencodeProjectSkills(directory?: string): Promise<Record<string, CommandDefinition>> {
const opencodeProjectSkillDirs = findProjectOpencodeSkillDirs(
directory ?? process.cwd(),
)
const allSkills = await Promise.all(
opencodeProjectSkillDirs.map((skillsDir) =>
loadSkillsFromDir({ skillsDir, scope: "opencode-project" }),
),
)
return skillsToCommandDefinitionRecord(deduplicateSkillsByName(allSkills.flat()))
}
export async function loadProjectAgentsSkills(directory?: string): Promise<Record<string, CommandDefinition>> {
const agentsProjectSkillDirs = findProjectAgentsSkillDirs(directory ?? process.cwd())
const allSkills = await Promise.all(
agentsProjectSkillDirs.map((skillsDir) => loadSkillsFromDir({ skillsDir, scope: "project" })),
)
return skillsToCommandDefinitionRecord(deduplicateSkillsByName(allSkills.flat()))
}
export async function loadGlobalAgentsSkills(): Promise<Record<string, CommandDefinition>> {
const agentsGlobalDir = join(homedir(), ".agents", "skills")
const skills = await loadSkillsFromDir({ skillsDir: agentsGlobalDir, scope: "user" })
const opencodeProjectDir = join(directory ?? process.cwd(), ".opencode", "skills")
const skills = await loadSkillsFromDir({ skillsDir: opencodeProjectDir, scope: "opencode-project" })
return skillsToCommandDefinitionRecord(skills)
}
@@ -131,11 +104,8 @@ export async function discoverUserClaudeSkills(): Promise<LoadedSkill[]> {
}
export async function discoverProjectClaudeSkills(directory?: string): Promise<LoadedSkill[]> {
const projectSkillDirs = findProjectClaudeSkillDirs(directory ?? process.cwd())
const allSkills = await Promise.all(
projectSkillDirs.map((skillsDir) => loadSkillsFromDir({ skillsDir, scope: "project" })),
)
return deduplicateSkillsByName(allSkills.flat())
const projectSkillsDir = join(directory ?? process.cwd(), ".claude", "skills")
return loadSkillsFromDir({ skillsDir: projectSkillsDir, scope: "project" })
}
export async function discoverOpencodeGlobalSkills(): Promise<LoadedSkill[]> {
@@ -147,23 +117,13 @@ export async function discoverOpencodeGlobalSkills(): Promise<LoadedSkill[]> {
}
export async function discoverOpencodeProjectSkills(directory?: string): Promise<LoadedSkill[]> {
const opencodeProjectSkillDirs = findProjectOpencodeSkillDirs(
directory ?? process.cwd(),
)
const allSkills = await Promise.all(
opencodeProjectSkillDirs.map((skillsDir) =>
loadSkillsFromDir({ skillsDir, scope: "opencode-project" }),
),
)
return deduplicateSkillsByName(allSkills.flat())
const opencodeProjectDir = join(directory ?? process.cwd(), ".opencode", "skills")
return loadSkillsFromDir({ skillsDir: opencodeProjectDir, scope: "opencode-project" })
}
export async function discoverProjectAgentsSkills(directory?: string): Promise<LoadedSkill[]> {
const agentsProjectSkillDirs = findProjectAgentsSkillDirs(directory ?? process.cwd())
const allSkills = await Promise.all(
agentsProjectSkillDirs.map((skillsDir) => loadSkillsFromDir({ skillsDir, scope: "project" })),
)
return deduplicateSkillsByName(allSkills.flat())
const agentsProjectDir = join(directory ?? process.cwd(), ".agents", "skills")
return loadSkillsFromDir({ skillsDir: agentsProjectDir, scope: "project" })
}
export async function discoverGlobalAgentsSkills(): Promise<LoadedSkill[]> {

View File

@@ -1,86 +0,0 @@
import { execFileSync } from "node:child_process"
import { afterEach, beforeEach, describe, expect, it } from "bun:test"
import { mkdtempSync, mkdirSync, rmSync, writeFileSync } from "node:fs"
import { tmpdir } from "node:os"
import { join } from "node:path"
import {
discoverOpencodeProjectSkills,
discoverProjectAgentsSkills,
discoverProjectClaudeSkills,
} from "./loader"
function writeSkill(directory: string, name: string, description: string): void {
mkdirSync(directory, { recursive: true })
writeFileSync(
join(directory, "SKILL.md"),
`---\nname: ${name}\ndescription: ${description}\n---\nBody\n`,
)
}
describe("project skill discovery", () => {
let tempDir = ""
beforeEach(() => {
tempDir = mkdtempSync(join(tmpdir(), "omo-project-skill-discovery-"))
})
afterEach(() => {
rmSync(tempDir, { recursive: true, force: true })
})
it("discovers ancestor project skill directories up to the worktree root", async () => {
// given
const repositoryDir = join(tempDir, "repo")
const nestedDirectory = join(repositoryDir, "packages", "app", "src")
mkdirSync(nestedDirectory, { recursive: true })
execFileSync("git", ["init"], {
cwd: repositoryDir,
stdio: ["ignore", "ignore", "ignore"],
})
writeSkill(
join(repositoryDir, ".claude", "skills", "repo-claude"),
"repo-claude",
"Discovered from the repository root",
)
writeSkill(
join(repositoryDir, ".agents", "skills", "repo-agents"),
"repo-agents",
"Discovered from the repository root",
)
writeSkill(
join(repositoryDir, ".opencode", "skill", "repo-opencode"),
"repo-opencode",
"Discovered from the repository root",
)
writeSkill(
join(tempDir, ".claude", "skills", "outside-claude"),
"outside-claude",
"Should stay outside the worktree",
)
writeSkill(
join(tempDir, ".agents", "skills", "outside-agents"),
"outside-agents",
"Should stay outside the worktree",
)
writeSkill(
join(tempDir, ".opencode", "skills", "outside-opencode"),
"outside-opencode",
"Should stay outside the worktree",
)
// when
const [claudeSkills, agentSkills, opencodeSkills] = await Promise.all([
discoverProjectClaudeSkills(nestedDirectory),
discoverProjectAgentsSkills(nestedDirectory),
discoverOpencodeProjectSkills(nestedDirectory),
])
// then
expect(claudeSkills.map(skill => skill.name)).toEqual(["repo-claude"])
expect(agentSkills.map(skill => skill.name)).toEqual(["repo-agents"])
expect(opencodeSkills.map(skill => skill.name)).toEqual(["repo-opencode"])
})
})

Some files were not shown because too many files have changed in this diff Show More