commit
78dead335d
34
CHANGELOG.md
34
CHANGELOG.md
@ -6,6 +6,40 @@ All notable changes to this project will be documented in this file.
|
|||||||
|
|
||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
|
||||||
|
|
||||||
|
## [0.20.0] - 2026-02-19
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- **Faceted Prompting module** (`src/faceted-prompting/`): Standalone library for facet composition, resolution, template rendering, and truncation — zero dependencies on TAKT internals. Includes `DataEngine` interface with `FileDataEngine` and `CompositeDataEngine` implementations for pluggable facet storage
|
||||||
|
- **Analytics module** (`src/features/analytics/`): Local-only review quality metrics collection — event types (review findings, fix actions, movement results), JSONL writer with date-based rotation, report parser, and metrics computation
|
||||||
|
- **`takt metrics review` command**: Display review quality metrics (re-report counts, round-trip ratio, resolution iterations, REJECT counts by rule, rebuttal resolution ratio) with configurable time window (`--since`)
|
||||||
|
- **`takt purge` command**: Purge old analytics event files with configurable retention period (`--retention-days`)
|
||||||
|
- **`takt reset config` command**: Reset global config to builtin template with automatic backup of the existing config
|
||||||
|
- **PR duplicate prevention**: When a PR already exists for the current branch, push and comment on the existing PR instead of creating a duplicate (#304)
|
||||||
|
- Retry mode now positions the cursor on the failed movement when selecting which movement to retry
|
||||||
|
- E2E tests for run-recovery and config-priority scenarios
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- **README overhaul**: Compressed from ~950 lines to ~270 lines — details split into dedicated docs (`docs/configuration.md`, `docs/cli-reference.md`, `docs/task-management.md`, `docs/ci-cd.md`, `docs/builtin-catalog.md`) with Japanese equivalents. Redefined product concept around 4 value axes: batteries included, practical, reproducible, multi-agent
|
||||||
|
- **Config system refactored**: Unified configuration resolution to `resolveConfigValue()` and `loadConfig()`, eliminating scattered config access patterns across the codebase
|
||||||
|
- **`takt config` command removed**: Replaced by `takt reset config` for resetting to defaults
|
||||||
|
- Builtin config templates refreshed with updated comments and structure
|
||||||
|
- `@anthropic-ai/claude-agent-sdk` updated to v0.2.47
|
||||||
|
- Instruct mode prompt improvements for task re-instruction
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed issue where builtin piece file references used absolute path instead of relative (#304)
|
||||||
|
- Removed unused imports and variables across multiple files
|
||||||
|
|
||||||
|
### Internal
|
||||||
|
|
||||||
|
- Unified `loadConfig`, `resolveConfigValue`, piece config resolution, and config priority paths
|
||||||
|
- Added E2E tests for config priority and run recovery scenarios
|
||||||
|
- Added `postExecution.test.ts` for PR creation flow testing
|
||||||
|
- Cleaned up unused imports and variables
|
||||||
|
|
||||||
## [0.19.0] - 2026-02-18
|
## [0.19.0] - 2026-02-18
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|||||||
@ -1,105 +1,91 @@
|
|||||||
# TAKT global configuration sample
|
# TAKT global configuration sample
|
||||||
# Location: ~/.takt/config.yaml
|
# Location: ~/.takt/config.yaml
|
||||||
|
|
||||||
# ---- Core ----
|
# =====================================
|
||||||
language: en
|
# General settings (piece-independent)
|
||||||
default_piece: default
|
# =====================================
|
||||||
log_level: info
|
language: en # UI language: en | ja
|
||||||
|
log_level: info # Log level: debug | info | warn | error
|
||||||
|
provider: claude # Default provider: claude | codex | opencode | mock
|
||||||
|
# model: sonnet # Optional model name passed to provider
|
||||||
|
|
||||||
# ---- Provider ----
|
# Execution control
|
||||||
# provider: claude | codex | opencode | mock
|
# worktree_dir: ~/takt-worktrees # Base directory for shared clone execution
|
||||||
provider: claude
|
# auto_pr: false # Auto-create PR after worktree execution
|
||||||
|
branch_name_strategy: ai # Branch strategy: romaji | ai
|
||||||
|
concurrency: 2 # Concurrent task execution for takt run (1-10)
|
||||||
|
# task_poll_interval_ms: 500 # Polling interval in ms during takt run (100-5000)
|
||||||
|
# prevent_sleep: false # Prevent macOS idle sleep while running
|
||||||
|
|
||||||
# Model (optional)
|
# Output / notifications
|
||||||
# Claude examples: opus, sonnet, haiku
|
# minimal_output: false # Minimized output for CI logs
|
||||||
# Codex examples: gpt-5.2-codex, gpt-5.1-codex
|
# verbose: false # Verbose output mode
|
||||||
# OpenCode format: provider/model
|
# notification_sound: true # Master switch for sounds
|
||||||
# model: sonnet
|
# notification_sound_events: # Per-event sound toggle (unset means true)
|
||||||
|
|
||||||
# Per-persona provider override
|
|
||||||
# persona_providers:
|
|
||||||
# coder: codex
|
|
||||||
# reviewer: claude
|
|
||||||
|
|
||||||
# Provider-specific movement permission policy
|
|
||||||
# Priority:
|
|
||||||
# 1) project provider_profiles override
|
|
||||||
# 2) global provider_profiles override
|
|
||||||
# 3) project provider_profiles default
|
|
||||||
# 4) global provider_profiles default
|
|
||||||
# 5) movement.required_permission_mode (minimum floor)
|
|
||||||
# provider_profiles:
|
|
||||||
# codex:
|
|
||||||
# default_permission_mode: full
|
|
||||||
# movement_permission_overrides:
|
|
||||||
# ai_review: readonly
|
|
||||||
# claude:
|
|
||||||
# default_permission_mode: edit
|
|
||||||
|
|
||||||
# Provider-specific runtime options
|
|
||||||
# provider_options:
|
|
||||||
# codex:
|
|
||||||
# network_access: true
|
|
||||||
# claude:
|
|
||||||
# sandbox:
|
|
||||||
# allow_unsandboxed_commands: true
|
|
||||||
|
|
||||||
# ---- API Keys ----
|
|
||||||
# Environment variables take priority:
|
|
||||||
# TAKT_ANTHROPIC_API_KEY / TAKT_OPENAI_API_KEY / TAKT_OPENCODE_API_KEY
|
|
||||||
# anthropic_api_key: ""
|
|
||||||
# openai_api_key: ""
|
|
||||||
# opencode_api_key: ""
|
|
||||||
|
|
||||||
# ---- Runtime ----
|
|
||||||
# Global runtime preparation (piece_config.runtime overrides this)
|
|
||||||
# runtime:
|
|
||||||
# prepare:
|
|
||||||
# - gradle
|
|
||||||
# - node
|
|
||||||
|
|
||||||
# ---- Execution ----
|
|
||||||
# worktree_dir: ~/takt-worktrees
|
|
||||||
# auto_pr: false
|
|
||||||
# prevent_sleep: false
|
|
||||||
|
|
||||||
# ---- Run Loop ----
|
|
||||||
# concurrency: 1
|
|
||||||
# task_poll_interval_ms: 500
|
|
||||||
# interactive_preview_movements: 3
|
|
||||||
# branch_name_strategy: romaji
|
|
||||||
|
|
||||||
# ---- Output ----
|
|
||||||
# minimal_output: false
|
|
||||||
# notification_sound: true
|
|
||||||
# notification_sound_events:
|
|
||||||
# iteration_limit: true
|
# iteration_limit: true
|
||||||
# piece_complete: true
|
# piece_complete: true
|
||||||
# piece_abort: true
|
# piece_abort: true
|
||||||
# run_complete: true
|
# run_complete: true
|
||||||
# run_abort: true
|
# run_abort: true
|
||||||
# observability:
|
# observability:
|
||||||
# provider_events: true
|
# provider_events: false # Persist provider stream events
|
||||||
|
|
||||||
# ---- Builtins ----
|
# Credentials (environment variables take priority)
|
||||||
# enable_builtin_pieces: true
|
# anthropic_api_key: "sk-ant-..." # Claude API key
|
||||||
# disabled_builtins:
|
# openai_api_key: "sk-..." # Codex/OpenAI API key
|
||||||
# - magi
|
# opencode_api_key: "..." # OpenCode API key
|
||||||
|
# codex_cli_path: "/absolute/path/to/codex" # Absolute path to Codex CLI
|
||||||
|
|
||||||
# ---- Pipeline ----
|
# Pipeline
|
||||||
# pipeline:
|
# pipeline:
|
||||||
# default_branch_prefix: "takt/"
|
# default_branch_prefix: "takt/" # Prefix for pipeline-created branches
|
||||||
# commit_message_template: "feat: {title} (#{issue})"
|
# commit_message_template: "feat: {title} (#{issue})" # Commit template
|
||||||
# pr_body_template: |
|
# pr_body_template: | # PR body template
|
||||||
# ## Summary
|
# ## Summary
|
||||||
# {issue_body}
|
# {issue_body}
|
||||||
# Closes #{issue}
|
# Closes #{issue}
|
||||||
|
|
||||||
# ---- Preferences ----
|
# Misc
|
||||||
# bookmarks_file: ~/.takt/preferences/bookmarks.yaml
|
# bookmarks_file: ~/.takt/preferences/bookmarks.yaml # Bookmark file location
|
||||||
# piece_categories_file: ~/.takt/preferences/piece-categories.yaml
|
|
||||||
|
|
||||||
# ---- Debug ----
|
# =====================================
|
||||||
# debug:
|
# Piece-related settings (global defaults)
|
||||||
# enabled: false
|
# =====================================
|
||||||
# log_file: ~/.takt/logs/debug.log
|
# 1) Route provider per persona
|
||||||
|
# persona_providers:
|
||||||
|
# coder: codex # Run coder persona on codex
|
||||||
|
# reviewer: claude # Run reviewer persona on claude
|
||||||
|
|
||||||
|
# 2) Provider options (global < project < piece)
|
||||||
|
# provider_options:
|
||||||
|
# codex:
|
||||||
|
# network_access: true # Allow network access for Codex
|
||||||
|
# opencode:
|
||||||
|
# network_access: true # Allow network access for OpenCode
|
||||||
|
# claude:
|
||||||
|
# sandbox:
|
||||||
|
# allow_unsandboxed_commands: false # true allows unsandboxed execution for listed commands
|
||||||
|
# excluded_commands:
|
||||||
|
# - "npm publish" # Commands excluded from sandbox
|
||||||
|
|
||||||
|
# 3) Movement permission policy
|
||||||
|
# provider_profiles:
|
||||||
|
# codex:
|
||||||
|
# default_permission_mode: full # Base permission: readonly | edit | full
|
||||||
|
# movement_permission_overrides:
|
||||||
|
# ai_review: readonly # Per-movement override
|
||||||
|
# claude:
|
||||||
|
# default_permission_mode: edit
|
||||||
|
|
||||||
|
# 4) Runtime preparation before execution (recommended: enabled)
|
||||||
|
runtime:
|
||||||
|
prepare:
|
||||||
|
- gradle # Prepare Gradle cache/env under .runtime
|
||||||
|
- node # Prepare npm cache/env under .runtime
|
||||||
|
|
||||||
|
# 5) Piece list / categories
|
||||||
|
# enable_builtin_pieces: true # Enable built-in pieces from builtins/{lang}/pieces
|
||||||
|
# disabled_builtins:
|
||||||
|
# - magi # Built-in piece names to disable
|
||||||
|
# piece_categories_file: ~/.takt/preferences/piece-categories.yaml # Category definition file
|
||||||
|
# interactive_preview_movements: 3 # Preview movement count in interactive mode (0-10)
|
||||||
|
|||||||
@ -1,105 +1,91 @@
|
|||||||
# TAKT グローバル設定サンプル
|
# TAKT グローバル設定サンプル
|
||||||
# 配置場所: ~/.takt/config.yaml
|
# 配置場所: ~/.takt/config.yaml
|
||||||
|
|
||||||
# ---- 基本 ----
|
# =====================================
|
||||||
language: ja
|
# 通常設定(ピース非依存)
|
||||||
default_piece: default
|
# =====================================
|
||||||
log_level: info
|
language: ja # 表示言語: ja | en
|
||||||
|
log_level: info # ログレベル: debug | info | warn | error
|
||||||
|
provider: claude # デフォルト実行プロバイダー: claude | codex | opencode | mock
|
||||||
|
# model: sonnet # 省略可。providerに渡すモデル名
|
||||||
|
|
||||||
# ---- プロバイダー ----
|
# 実行制御
|
||||||
# provider: claude | codex | opencode | mock
|
# worktree_dir: ~/takt-worktrees # 共有clone作成先ディレクトリ
|
||||||
provider: claude
|
# auto_pr: false # worktree実行後に自動PR作成するか
|
||||||
|
branch_name_strategy: ai # ブランチ名生成: romaji | ai
|
||||||
|
concurrency: 2 # takt run の同時実行数(1-10)
|
||||||
|
# task_poll_interval_ms: 500 # takt run のタスク監視間隔ms(100-5000)
|
||||||
|
# prevent_sleep: false # macOS実行中のスリープ防止(caffeinate)
|
||||||
|
|
||||||
# モデル(任意)
|
# 出力・通知
|
||||||
# Claude 例: opus, sonnet, haiku
|
# minimal_output: false # 出力を最小化(CI向け)
|
||||||
# Codex 例: gpt-5.2-codex, gpt-5.1-codex
|
# verbose: false # 詳細ログを有効化
|
||||||
# OpenCode 形式: provider/model
|
# notification_sound: true # 通知音全体のON/OFF
|
||||||
# model: sonnet
|
# notification_sound_events: # イベント別通知音(未指定はtrue扱い)
|
||||||
|
|
||||||
# ペルソナ別プロバイダー上書き
|
|
||||||
# persona_providers:
|
|
||||||
# coder: codex
|
|
||||||
# reviewer: claude
|
|
||||||
|
|
||||||
# プロバイダー別 movement 権限ポリシー
|
|
||||||
# 優先順:
|
|
||||||
# 1) project provider_profiles override
|
|
||||||
# 2) global provider_profiles override
|
|
||||||
# 3) project provider_profiles default
|
|
||||||
# 4) global provider_profiles default
|
|
||||||
# 5) movement.required_permission_mode(下限補正)
|
|
||||||
# provider_profiles:
|
|
||||||
# codex:
|
|
||||||
# default_permission_mode: full
|
|
||||||
# movement_permission_overrides:
|
|
||||||
# ai_review: readonly
|
|
||||||
# claude:
|
|
||||||
# default_permission_mode: edit
|
|
||||||
|
|
||||||
# プロバイダー別ランタイムオプション
|
|
||||||
# provider_options:
|
|
||||||
# codex:
|
|
||||||
# network_access: true
|
|
||||||
# claude:
|
|
||||||
# sandbox:
|
|
||||||
# allow_unsandboxed_commands: true
|
|
||||||
|
|
||||||
# ---- API キー ----
|
|
||||||
# 環境変数が優先:
|
|
||||||
# TAKT_ANTHROPIC_API_KEY / TAKT_OPENAI_API_KEY / TAKT_OPENCODE_API_KEY
|
|
||||||
# anthropic_api_key: ""
|
|
||||||
# openai_api_key: ""
|
|
||||||
# opencode_api_key: ""
|
|
||||||
|
|
||||||
# ---- ランタイム ----
|
|
||||||
# グローバルなランタイム準備(piece_config.runtime があればそちらを優先)
|
|
||||||
# runtime:
|
|
||||||
# prepare:
|
|
||||||
# - gradle
|
|
||||||
# - node
|
|
||||||
|
|
||||||
# ---- 実行 ----
|
|
||||||
# worktree_dir: ~/takt-worktrees
|
|
||||||
# auto_pr: false
|
|
||||||
# prevent_sleep: false
|
|
||||||
|
|
||||||
# ---- Run Loop ----
|
|
||||||
# concurrency: 1
|
|
||||||
# task_poll_interval_ms: 500
|
|
||||||
# interactive_preview_movements: 3
|
|
||||||
# branch_name_strategy: romaji
|
|
||||||
|
|
||||||
# ---- 出力 ----
|
|
||||||
# minimal_output: false
|
|
||||||
# notification_sound: true
|
|
||||||
# notification_sound_events:
|
|
||||||
# iteration_limit: true
|
# iteration_limit: true
|
||||||
# piece_complete: true
|
# piece_complete: true
|
||||||
# piece_abort: true
|
# piece_abort: true
|
||||||
# run_complete: true
|
# run_complete: true
|
||||||
# run_abort: true
|
# run_abort: true
|
||||||
# observability:
|
# observability:
|
||||||
# provider_events: true
|
# provider_events: false # providerイベントログを記録
|
||||||
|
|
||||||
# ---- Builtins ----
|
# 認証情報(環境変数優先)
|
||||||
# enable_builtin_pieces: true
|
# anthropic_api_key: "sk-ant-..." # Claude APIキー
|
||||||
# disabled_builtins:
|
# openai_api_key: "sk-..." # Codex APIキー
|
||||||
# - magi
|
# opencode_api_key: "..." # OpenCode APIキー
|
||||||
|
# codex_cli_path: "/absolute/path/to/codex" # Codex CLI絶対パス
|
||||||
|
|
||||||
# ---- Pipeline ----
|
# パイプライン
|
||||||
# pipeline:
|
# pipeline:
|
||||||
# default_branch_prefix: "takt/"
|
# default_branch_prefix: "takt/" # pipeline作成ブランチの接頭辞
|
||||||
# commit_message_template: "feat: {title} (#{issue})"
|
# commit_message_template: "feat: {title} (#{issue})" # コミット文テンプレート
|
||||||
# pr_body_template: |
|
# pr_body_template: | # PR本文テンプレート
|
||||||
# ## Summary
|
# ## Summary
|
||||||
# {issue_body}
|
# {issue_body}
|
||||||
# Closes #{issue}
|
# Closes #{issue}
|
||||||
|
|
||||||
# ---- Preferences ----
|
# その他
|
||||||
# bookmarks_file: ~/.takt/preferences/bookmarks.yaml
|
# bookmarks_file: ~/.takt/preferences/bookmarks.yaml # ブックマーク保存先
|
||||||
# piece_categories_file: ~/.takt/preferences/piece-categories.yaml
|
|
||||||
|
|
||||||
# ---- Debug ----
|
# =====================================
|
||||||
# debug:
|
# ピースにも関わる設定(global defaults)
|
||||||
# enabled: false
|
# =====================================
|
||||||
# log_file: ~/.takt/logs/debug.log
|
# 1) ペルソナ単位でプロバイダーを切り替える
|
||||||
|
# persona_providers:
|
||||||
|
# coder: codex # coderペルソナはcodexで実行
|
||||||
|
# reviewer: claude # reviewerペルソナはclaudeで実行
|
||||||
|
|
||||||
|
# 2) provider 固有オプション(global < project < piece)
|
||||||
|
# provider_options:
|
||||||
|
# codex:
|
||||||
|
# network_access: true # Codex実行時のネットワークアクセス許可
|
||||||
|
# opencode:
|
||||||
|
# network_access: true # OpenCode実行時のネットワークアクセス許可
|
||||||
|
# claude:
|
||||||
|
# sandbox:
|
||||||
|
# allow_unsandboxed_commands: false # trueで対象コマンドを非サンドボックス実行
|
||||||
|
# excluded_commands:
|
||||||
|
# - "npm publish" # 非サンドボックス対象コマンド
|
||||||
|
|
||||||
|
# 3) movement の権限ポリシー
|
||||||
|
# provider_profiles:
|
||||||
|
# codex:
|
||||||
|
# default_permission_mode: full # 既定権限: readonly | edit | full
|
||||||
|
# movement_permission_overrides:
|
||||||
|
# ai_review: readonly # movement単位の上書き
|
||||||
|
# claude:
|
||||||
|
# default_permission_mode: edit
|
||||||
|
|
||||||
|
# 4) 実行前のランタイム準備(推奨: 有効化)
|
||||||
|
runtime:
|
||||||
|
prepare:
|
||||||
|
- gradle # Gradleキャッシュ/環境を .runtime 配下に準備
|
||||||
|
- node # npmキャッシュ/環境を .runtime 配下に準備
|
||||||
|
|
||||||
|
# 5) ピース一覧/カテゴリ
|
||||||
|
# enable_builtin_pieces: true # builtins/{lang}/pieces を有効化
|
||||||
|
# disabled_builtins:
|
||||||
|
# - magi # 無効化するビルトインピース名
|
||||||
|
# piece_categories_file: ~/.takt/preferences/piece-categories.yaml # カテゴリ定義ファイル
|
||||||
|
# interactive_preview_movements: 3 # 対話モードのプレビュー件数(0-10)
|
||||||
|
|||||||
@ -6,6 +6,40 @@
|
|||||||
|
|
||||||
フォーマットは [Keep a Changelog](https://keepachangelog.com/en/1.1.0/) に基づいています。
|
フォーマットは [Keep a Changelog](https://keepachangelog.com/en/1.1.0/) に基づいています。
|
||||||
|
|
||||||
|
## [0.20.0] - 2026-02-19
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- **Faceted Prompting モジュール** (`src/faceted-prompting/`): ファセット合成・解決・テンプレートレンダリング・トランケーションのスタンドアロンライブラリ — TAKT 内部への依存ゼロ。プラガブルなファセットストレージのための `DataEngine` インターフェースと `FileDataEngine`、`CompositeDataEngine` 実装を含む
|
||||||
|
- **Analytics モジュール** (`src/features/analytics/`): ローカル専用のレビュー品質メトリクス収集 — イベント型(レビュー指摘、修正アクション、ムーブメント結果)、日付ローテーション付き JSONL ライター、レポートパーサー、メトリクス計算
|
||||||
|
- **`takt metrics review` コマンド**: レビュー品質メトリクスを表示(再報告カウント、ラウンドトリップ率、解決イテレーション数、ルール別 REJECT カウント、反論解決率)。`--since` で時間枠を設定可能
|
||||||
|
- **`takt purge` コマンド**: 古いアナリティクスイベントファイルを削除。`--retention-days` で保持期間を設定可能
|
||||||
|
- **`takt reset config` コマンド**: グローバル設定をビルトインテンプレートにリセット(既存設定の自動バックアップ付き)
|
||||||
|
- **PR 重複防止**: 現在のブランチに既に PR が存在する場合、新規作成ではなく既存 PR へのプッシュとコメント追加で対応 (#304)
|
||||||
|
- リトライ時のムーブメント選択で失敗箇所にカーソルを初期配置
|
||||||
|
- run-recovery と config-priority シナリオの E2E テストを追加
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- **README を大幅改訂**: 約950行から約270行に圧縮 — 詳細情報を専用ドキュメント(`docs/configuration.md`、`docs/cli-reference.md`、`docs/task-management.md`、`docs/ci-cd.md`、`docs/builtin-catalog.md`)に分離し、日本語版も作成。プロダクトコンセプトを4軸(すぐ始められる、実用的、再現可能、マルチエージェント)で再定義
|
||||||
|
- **設定システムのリファクタリング**: 設定解決を `resolveConfigValue()` と `loadConfig()` に統一し、コードベース全体に散在していた設定アクセスパターンを解消
|
||||||
|
- **`takt config` コマンド削除**: デフォルトへのリセットを行う `takt reset config` に置き換え
|
||||||
|
- ビルトイン設定テンプレートのコメントと構造を刷新
|
||||||
|
- `@anthropic-ai/claude-agent-sdk` を v0.2.47 に更新
|
||||||
|
- タスク再指示のインストラクトモードプロンプトを改善
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- ビルトインピースのファイル参照が相対パスではなく絶対パスを使用していた問題を修正 (#304)
|
||||||
|
- 複数ファイルにまたがる未使用 import・変数を削除
|
||||||
|
|
||||||
|
### Internal
|
||||||
|
|
||||||
|
- `loadConfig`、`resolveConfigValue`、ピース設定解決、設定優先順位パスの統一
|
||||||
|
- config-priority と run-recovery シナリオの E2E テストを追加
|
||||||
|
- PR 作成フローテスト用の `postExecution.test.ts` を追加
|
||||||
|
- 未使用 import・変数のクリーンアップ
|
||||||
|
|
||||||
## [0.19.0] - 2026-02-18
|
## [0.19.0] - 2026-02-18
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
109
docs/builtin-catalog.ja.md
Normal file
109
docs/builtin-catalog.ja.md
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
# ビルトインカタログ
|
||||||
|
|
||||||
|
[English](./builtin-catalog.md)
|
||||||
|
|
||||||
|
TAKT に同梱されているすべてのビルトイン piece と persona の総合カタログです。
|
||||||
|
|
||||||
|
## おすすめ Piece
|
||||||
|
|
||||||
|
| Piece | 推奨用途 |
|
||||||
|
|----------|-----------------|
|
||||||
|
| `default-mini` | ちょっとした修正向けです。計画 → 実装 → 並列レビュー → 修正の軽量構成です。 |
|
||||||
|
| `frontend-mini` | フロントエンド向けの mini 構成です。 |
|
||||||
|
| `backend-mini` | バックエンド向けの mini 構成です。 |
|
||||||
|
| `expert-mini` | エキスパート向けの mini 構成です。 |
|
||||||
|
| `default` | 本格的な開発向けです。並列レビュアーによる多段階レビューが付いています。TAKT 自身の開発にも使用しています。 |
|
||||||
|
|
||||||
|
## 全ビルトイン Piece 一覧
|
||||||
|
|
||||||
|
カテゴリ順に並べています。
|
||||||
|
|
||||||
|
| カテゴリ | Piece | 説明 |
|
||||||
|
|---------|----------|-------------|
|
||||||
|
| 🚀 クイックスタート | `default-mini` | ミニ開発 piece: plan -> implement -> 並列レビュー (AI antipattern + supervisor) -> 必要に応じて修正。レビュー付き軽量版。 |
|
||||||
|
| | `frontend-mini` | ミニフロントエンド piece: plan -> implement -> 並列レビュー (AI antipattern + supervisor)。フロントエンドナレッジ注入付き。 |
|
||||||
|
| | `backend-mini` | ミニバックエンド piece: plan -> implement -> 並列レビュー (AI antipattern + supervisor)。バックエンドナレッジ注入付き。 |
|
||||||
|
| | `default` | フル開発 piece: plan -> implement -> AI review -> 並列レビュー (architect + QA) -> supervisor 承認。各レビュー段階に修正ループあり。 |
|
||||||
|
| | `compound-eye` | マルチモデルレビュー: 同じ指示を Claude と Codex に同時送信し、両方のレスポンスを統合。 |
|
||||||
|
| ⚡ Mini | `backend-cqrs-mini` | ミニ CQRS+ES piece: plan -> implement -> 並列レビュー (AI antipattern + supervisor)。CQRS+ES ナレッジ注入付き。 |
|
||||||
|
| | `expert-mini` | ミニエキスパート piece: plan -> implement -> 並列レビュー (AI antipattern + expert supervisor)。フルスタックナレッジ注入付き。 |
|
||||||
|
| | `expert-cqrs-mini` | ミニ CQRS+ES エキスパート piece: plan -> implement -> 並列レビュー (AI antipattern + expert supervisor)。CQRS+ES ナレッジ注入付き。 |
|
||||||
|
| 🎨 フロントエンド | `frontend` | フロントエンド特化開発 piece。React/Next.js に焦点を当てたレビューとナレッジ注入付き。 |
|
||||||
|
| ⚙️ バックエンド | `backend` | バックエンド特化開発 piece。バックエンド、セキュリティ、QA エキスパートレビュー付き。 |
|
||||||
|
| | `backend-cqrs` | CQRS+ES 特化バックエンド開発 piece。CQRS+ES、セキュリティ、QA エキスパートレビュー付き。 |
|
||||||
|
| 🔧 エキスパート | `expert` | フルスタック開発 piece: architecture、frontend、security、QA レビューと修正ループ付き。 |
|
||||||
|
| | `expert-cqrs` | フルスタック開発 piece (CQRS+ES 特化): CQRS+ES、frontend、security、QA レビューと修正ループ付き。 |
|
||||||
|
| 🛠️ リファクタリング | `structural-reform` | プロジェクト全体のレビューと構造改革: 段階的なファイル分割による反復的なコードベース再構築。 |
|
||||||
|
| 🔍 レビュー | `review-fix-minimal` | レビュー特化 piece: review -> fix -> supervisor。レビューフィードバックに基づく反復改善向け。 |
|
||||||
|
| | `review-only` | 変更を加えない読み取り専用のコードレビュー piece。 |
|
||||||
|
| 🧪 テスト | `unit-test` | ユニットテスト特化 piece: テスト分析 -> テスト実装 -> レビュー -> 修正。 |
|
||||||
|
| | `e2e-test` | E2E テスト特化 piece: E2E 分析 -> E2E 実装 -> レビュー -> 修正 (Vitest ベースの E2E フロー)。 |
|
||||||
|
| その他 | `research` | リサーチ piece: planner -> digger -> supervisor。質問せずに自律的にリサーチを実行。 |
|
||||||
|
| | `deep-research` | ディープリサーチ piece: plan -> dig -> analyze -> supervise。発見駆動型の調査で、浮上した疑問を多角的に分析。 |
|
||||||
|
| | `magi` | エヴァンゲリオンにインスパイアされた合議システム。3つの AI persona (MELCHIOR, BALTHASAR, CASPER) が分析・投票。 |
|
||||||
|
| | `passthrough` | 最薄ラッパー。タスクを coder にそのまま渡す。レビューなし。 |
|
||||||
|
|
||||||
|
`takt switch` で piece をインタラクティブに切り替えできます。
|
||||||
|
|
||||||
|
## ビルトイン Persona 一覧
|
||||||
|
|
||||||
|
| Persona | 説明 |
|
||||||
|
|---------|-------------|
|
||||||
|
| **planner** | タスク分析、仕様調査、実装計画 |
|
||||||
|
| **architect-planner** | タスク分析と設計計画: コード調査、不明点の解消、実装計画の作成 |
|
||||||
|
| **coder** | 機能実装、バグ修正 |
|
||||||
|
| **ai-antipattern-reviewer** | AI 固有のアンチパターンレビュー(存在しない API、誤った前提、スコープクリープ) |
|
||||||
|
| **architecture-reviewer** | アーキテクチャとコード品質のレビュー、仕様準拠の検証 |
|
||||||
|
| **frontend-reviewer** | フロントエンド (React/Next.js) のコード品質とベストプラクティスのレビュー |
|
||||||
|
| **cqrs-es-reviewer** | CQRS+Event Sourcing のアーキテクチャと実装のレビュー |
|
||||||
|
| **qa-reviewer** | テストカバレッジと品質保証のレビュー |
|
||||||
|
| **security-reviewer** | セキュリティ脆弱性の評価 |
|
||||||
|
| **conductor** | Phase 3 判定スペシャリスト: レポート/レスポンスを読み取りステータスタグを出力 |
|
||||||
|
| **supervisor** | 最終検証、承認 |
|
||||||
|
| **expert-supervisor** | エキスパートレベルの最終検証と包括的なレビュー統合 |
|
||||||
|
| **research-planner** | リサーチタスクの計画とスコープ定義 |
|
||||||
|
| **research-analyzer** | リサーチ結果の解釈と追加調査計画 |
|
||||||
|
| **research-digger** | 深掘り調査と情報収集 |
|
||||||
|
| **research-supervisor** | リサーチ品質の検証と完全性の評価 |
|
||||||
|
| **test-planner** | テスト戦略の分析と包括的なテスト計画 |
|
||||||
|
| **pr-commenter** | レビュー結果を GitHub PR コメントとして投稿 |
|
||||||
|
|
||||||
|
## カスタム Persona
|
||||||
|
|
||||||
|
`~/.takt/personas/` に Markdown ファイルとして persona プロンプトを作成できます。
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
# ~/.takt/personas/my-reviewer.md
|
||||||
|
|
||||||
|
You are a code reviewer specialized in security.
|
||||||
|
|
||||||
|
## Role
|
||||||
|
- Check for security vulnerabilities
|
||||||
|
- Verify input validation
|
||||||
|
- Review authentication logic
|
||||||
|
```
|
||||||
|
|
||||||
|
piece YAML の `personas` セクションマップからカスタム persona を参照します。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
personas:
|
||||||
|
my-reviewer: ~/.takt/personas/my-reviewer.md
|
||||||
|
|
||||||
|
movements:
|
||||||
|
- name: review
|
||||||
|
persona: my-reviewer
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Persona 別 Provider オーバーライド
|
||||||
|
|
||||||
|
`~/.takt/config.yaml` の `persona_providers` を使用して、piece を複製せずに特定の persona を異なる provider にルーティングできます。これにより、例えばコーディングは Codex で実行し、レビューアーは Claude に維持するといった構成が可能になります。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
persona_providers:
|
||||||
|
coder: codex # coder を Codex で実行
|
||||||
|
ai-antipattern-reviewer: claude # レビューアーは Claude を維持
|
||||||
|
```
|
||||||
|
|
||||||
|
この設定はすべての piece にグローバルに適用されます。指定された persona を使用する movement は、実行中の piece に関係なく、対応する provider にルーティングされます。
|
||||||
109
docs/builtin-catalog.md
Normal file
109
docs/builtin-catalog.md
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
# Builtin Catalog
|
||||||
|
|
||||||
|
[日本語](./builtin-catalog.ja.md)
|
||||||
|
|
||||||
|
A comprehensive catalog of all builtin pieces and personas included with TAKT.
|
||||||
|
|
||||||
|
## Recommended Pieces
|
||||||
|
|
||||||
|
| Piece | Recommended Use |
|
||||||
|
|----------|-----------------|
|
||||||
|
| `default-mini` | Quick fixes. Lightweight plan → implement → parallel review → fix loop. |
|
||||||
|
| `frontend-mini` | Frontend-focused mini configuration. |
|
||||||
|
| `backend-mini` | Backend-focused mini configuration. |
|
||||||
|
| `expert-mini` | Expert-level mini configuration. |
|
||||||
|
| `default` | Serious development. Multi-stage review with parallel reviewers. Used for TAKT's own development. |
|
||||||
|
|
||||||
|
## All Builtin Pieces
|
||||||
|
|
||||||
|
Organized by category.
|
||||||
|
|
||||||
|
| Category | Piece | Description |
|
||||||
|
|----------|----------|-------------|
|
||||||
|
| 🚀 Quick Start | `default-mini` | Mini development piece: plan -> implement -> parallel review (AI antipattern + supervisor) -> fix if needed. Lightweight with review. |
|
||||||
|
| | `frontend-mini` | Mini frontend piece: plan -> implement -> parallel review (AI antipattern + supervisor) with frontend knowledge injection. |
|
||||||
|
| | `backend-mini` | Mini backend piece: plan -> implement -> parallel review (AI antipattern + supervisor) with backend knowledge injection. |
|
||||||
|
| | `default` | Full development piece: plan -> implement -> AI review -> parallel review (architect + QA) -> supervisor approval. Includes fix loops at each review stage. |
|
||||||
|
| | `compound-eye` | Multi-model review: sends the same instruction to Claude and Codex simultaneously, then synthesizes both responses. |
|
||||||
|
| ⚡ Mini | `backend-cqrs-mini` | Mini CQRS+ES piece: plan -> implement -> parallel review (AI antipattern + supervisor) with CQRS+ES knowledge injection. |
|
||||||
|
| | `expert-mini` | Mini expert piece: plan -> implement -> parallel review (AI antipattern + expert supervisor) with full-stack knowledge injection. |
|
||||||
|
| | `expert-cqrs-mini` | Mini CQRS+ES expert piece: plan -> implement -> parallel review (AI antipattern + expert supervisor) with CQRS+ES knowledge injection. |
|
||||||
|
| 🎨 Frontend | `frontend` | Frontend-specialized development piece with React/Next.js focused reviews and knowledge injection. |
|
||||||
|
| ⚙️ Backend | `backend` | Backend-specialized development piece with backend, security, and QA expert reviews. |
|
||||||
|
| | `backend-cqrs` | CQRS+ES-specialized backend development piece with CQRS+ES, security, and QA expert reviews. |
|
||||||
|
| 🔧 Expert | `expert` | Full-stack development piece: architecture, frontend, security, QA reviews with fix loops. |
|
||||||
|
| | `expert-cqrs` | Full-stack development piece (CQRS+ES specialized): CQRS+ES, frontend, security, QA reviews with fix loops. |
|
||||||
|
| 🛠️ Refactoring | `structural-reform` | Full project review and structural reform: iterative codebase restructuring with staged file splits. |
|
||||||
|
| 🔍 Review | `review-fix-minimal` | Review-focused piece: review -> fix -> supervisor. For iterative improvement based on review feedback. |
|
||||||
|
| | `review-only` | Read-only code review piece that makes no changes. |
|
||||||
|
| 🧪 Testing | `unit-test` | Unit test focused piece: test analysis -> test implementation -> review -> fix. |
|
||||||
|
| | `e2e-test` | E2E test focused piece: E2E analysis -> E2E implementation -> review -> fix (Vitest-based E2E flow). |
|
||||||
|
| Others | `research` | Research piece: planner -> digger -> supervisor. Autonomously executes research without asking questions. |
|
||||||
|
| | `deep-research` | Deep research piece: plan -> dig -> analyze -> supervise. Discovery-driven investigation that follows emerging questions with multi-perspective analysis. |
|
||||||
|
| | `magi` | Deliberation system inspired by Evangelion. Three AI personas (MELCHIOR, BALTHASAR, CASPER) analyze and vote. |
|
||||||
|
| | `passthrough` | Thinnest wrapper. Pass task directly to coder as-is. No review. |
|
||||||
|
|
||||||
|
Use `takt switch` to switch pieces interactively.
|
||||||
|
|
||||||
|
## Builtin Personas
|
||||||
|
|
||||||
|
| Persona | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| **planner** | Task analysis, spec investigation, implementation planning |
|
||||||
|
| **architect-planner** | Task analysis and design planning: investigates code, resolves unknowns, creates implementation plans |
|
||||||
|
| **coder** | Feature implementation, bug fixing |
|
||||||
|
| **ai-antipattern-reviewer** | AI-specific antipattern review (non-existent APIs, incorrect assumptions, scope creep) |
|
||||||
|
| **architecture-reviewer** | Architecture and code quality review, spec compliance verification |
|
||||||
|
| **frontend-reviewer** | Frontend (React/Next.js) code quality and best practices review |
|
||||||
|
| **cqrs-es-reviewer** | CQRS+Event Sourcing architecture and implementation review |
|
||||||
|
| **qa-reviewer** | Test coverage and quality assurance review |
|
||||||
|
| **security-reviewer** | Security vulnerability assessment |
|
||||||
|
| **conductor** | Phase 3 judgment specialist: reads reports/responses and outputs status tags |
|
||||||
|
| **supervisor** | Final validation, approval |
|
||||||
|
| **expert-supervisor** | Expert-level final validation with comprehensive review integration |
|
||||||
|
| **research-planner** | Research task planning and scope definition |
|
||||||
|
| **research-analyzer** | Research result interpretation and additional investigation planning |
|
||||||
|
| **research-digger** | Deep investigation and information gathering |
|
||||||
|
| **research-supervisor** | Research quality validation and completeness assessment |
|
||||||
|
| **test-planner** | Test strategy analysis and comprehensive test planning |
|
||||||
|
| **pr-commenter** | Posts review findings as GitHub PR comments |
|
||||||
|
|
||||||
|
## Custom Personas
|
||||||
|
|
||||||
|
Create persona prompts as Markdown files in `~/.takt/personas/`:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
# ~/.takt/personas/my-reviewer.md
|
||||||
|
|
||||||
|
You are a code reviewer specialized in security.
|
||||||
|
|
||||||
|
## Role
|
||||||
|
- Check for security vulnerabilities
|
||||||
|
- Verify input validation
|
||||||
|
- Review authentication logic
|
||||||
|
```
|
||||||
|
|
||||||
|
Reference custom personas from piece YAML via the `personas` section map:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
personas:
|
||||||
|
my-reviewer: ~/.takt/personas/my-reviewer.md
|
||||||
|
|
||||||
|
movements:
|
||||||
|
- name: review
|
||||||
|
persona: my-reviewer
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Per-persona Provider Overrides
|
||||||
|
|
||||||
|
Use `persona_providers` in `~/.takt/config.yaml` to route specific personas to different providers without duplicating pieces. This allows you to run, for example, coding on Codex while keeping reviewers on Claude.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
persona_providers:
|
||||||
|
coder: codex # Run coder on Codex
|
||||||
|
ai-antipattern-reviewer: claude # Keep reviewers on Claude
|
||||||
|
```
|
||||||
|
|
||||||
|
This configuration applies globally to all pieces. Any movement using the specified persona will be routed to the corresponding provider, regardless of which piece is being executed.
|
||||||
178
docs/ci-cd.ja.md
Normal file
178
docs/ci-cd.ja.md
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
[English](./ci-cd.md)
|
||||||
|
|
||||||
|
# CI/CD 連携
|
||||||
|
|
||||||
|
TAKT は CI/CD パイプラインに統合して、タスク実行、PR レビュー、コード生成を自動化できます。このガイドでは GitHub Actions のセットアップ、pipeline モードのオプション、その他の CI システムでの設定について説明します。
|
||||||
|
|
||||||
|
## GitHub Actions
|
||||||
|
|
||||||
|
TAKT は GitHub Actions 連携用の公式アクション [takt-action](https://github.com/nrslib/takt-action) を提供しています。
|
||||||
|
|
||||||
|
### 完全なワークフロー例
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
name: TAKT
|
||||||
|
|
||||||
|
on:
|
||||||
|
issue_comment:
|
||||||
|
types: [created]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
takt:
|
||||||
|
if: contains(github.event.comment.body, '@takt')
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Run TAKT
|
||||||
|
uses: nrslib/takt-action@main
|
||||||
|
with:
|
||||||
|
anthropic_api_key: ${{ secrets.TAKT_ANTHROPIC_API_KEY }}
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
```
|
||||||
|
|
||||||
|
### パーミッション
|
||||||
|
|
||||||
|
`takt-action` が正しく機能するには次のパーミッションが必要です。
|
||||||
|
|
||||||
|
| パーミッション | 用途 |
|
||||||
|
|-------------|------|
|
||||||
|
| `contents: write` | ブランチの作成、コミット、コードのプッシュ |
|
||||||
|
| `issues: write` | Issue の読み取りとコメント |
|
||||||
|
| `pull-requests: write` | PR の作成と更新 |
|
||||||
|
|
||||||
|
## Pipeline モード
|
||||||
|
|
||||||
|
`--pipeline` を指定すると、非インタラクティブな pipeline モードが有効になります。ブランチの作成、piece の実行、コミット、プッシュを自動的に行います。このモードは人的操作が不可能な CI/CD 自動化向けに設計されています。
|
||||||
|
|
||||||
|
Pipeline モードでは、`--auto-pr` を明示的に指定しない限り PR は作成**されません**。
|
||||||
|
|
||||||
|
### Pipeline の全オプション
|
||||||
|
|
||||||
|
| オプション | 説明 |
|
||||||
|
|-----------|------|
|
||||||
|
| `--pipeline` | **pipeline(非インタラクティブ)モードを有効化** -- CI/自動化に必要 |
|
||||||
|
| `-t, --task <text>` | タスク内容(GitHub Issue の代替) |
|
||||||
|
| `-i, --issue <N>` | GitHub Issue 番号(インタラクティブモードでの `#N` と同等) |
|
||||||
|
| `-w, --piece <name or path>` | Piece 名または piece YAML ファイルのパス |
|
||||||
|
| `-b, --branch <name>` | ブランチ名を指定(省略時は自動生成) |
|
||||||
|
| `--auto-pr` | PR を作成(インタラクティブ: 確認スキップ、pipeline: PR 有効化) |
|
||||||
|
| `--skip-git` | ブランチ作成、コミット、プッシュをスキップ(pipeline モード、piece のみ実行) |
|
||||||
|
| `--repo <owner/repo>` | リポジトリを指定(PR 作成用) |
|
||||||
|
| `-q, --quiet` | 最小出力モード: AI 出力を抑制(CI 向け) |
|
||||||
|
| `--provider <name>` | エージェント provider を上書き(claude\|codex\|opencode\|mock) |
|
||||||
|
| `--model <name>` | エージェントモデルを上書き |
|
||||||
|
|
||||||
|
### コマンド例
|
||||||
|
|
||||||
|
**基本的な pipeline 実行**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --task "Fix bug"
|
||||||
|
```
|
||||||
|
|
||||||
|
**PR 自動作成付きの pipeline 実行**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --task "Fix bug" --auto-pr
|
||||||
|
```
|
||||||
|
|
||||||
|
**GitHub Issue をリンクして PR を作成**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --issue 99 --auto-pr
|
||||||
|
```
|
||||||
|
|
||||||
|
**Piece とブランチ名を指定**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --task "Fix bug" -w magi -b feat/fix-bug
|
||||||
|
```
|
||||||
|
|
||||||
|
**PR 作成用にリポジトリを指定**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --task "Fix bug" --auto-pr --repo owner/repo
|
||||||
|
```
|
||||||
|
|
||||||
|
**Piece のみ実行(ブランチ作成、コミット、プッシュをスキップ)**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --task "Fix bug" --skip-git
|
||||||
|
```
|
||||||
|
|
||||||
|
**最小出力モード(CI ログ向けに AI 出力を抑制)**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --task "Fix bug" --quiet
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pipeline テンプレート変数
|
||||||
|
|
||||||
|
`~/.takt/config.yaml` の pipeline 設定では、コミットメッセージと PR 本文をカスタマイズするためのテンプレート変数をサポートしています。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
pipeline:
|
||||||
|
default_branch_prefix: "takt/"
|
||||||
|
commit_message_template: "feat: {title} (#{issue})"
|
||||||
|
pr_body_template: |
|
||||||
|
## Summary
|
||||||
|
{issue_body}
|
||||||
|
Closes #{issue}
|
||||||
|
```
|
||||||
|
|
||||||
|
| 変数 | 使用可能な場所 | 説明 |
|
||||||
|
|------|--------------|------|
|
||||||
|
| `{title}` | コミットメッセージ | Issue タイトル |
|
||||||
|
| `{issue}` | コミットメッセージ、PR 本文 | Issue 番号 |
|
||||||
|
| `{issue_body}` | PR 本文 | Issue 本文 |
|
||||||
|
| `{report}` | PR 本文 | Piece 実行レポート |
|
||||||
|
|
||||||
|
## その他の CI システム
|
||||||
|
|
||||||
|
GitHub Actions 以外の CI システムでは、TAKT をグローバルにインストールして pipeline モードを直接使用します。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# takt のインストール
|
||||||
|
npm install -g takt
|
||||||
|
|
||||||
|
# pipeline モードで実行
|
||||||
|
takt --pipeline --task "Fix bug" --auto-pr --repo owner/repo
|
||||||
|
```
|
||||||
|
|
||||||
|
このアプローチは Node.js をサポートする任意の CI システムで動作します。GitLab CI、CircleCI、Jenkins、Azure DevOps などが含まれます。
|
||||||
|
|
||||||
|
## 環境変数
|
||||||
|
|
||||||
|
CI 環境での認証には、適切な API キー環境変数を設定してください。これらは他のツールとの衝突を避けるため TAKT 固有のプレフィックスを使用しています。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Claude(Anthropic)用
|
||||||
|
export TAKT_ANTHROPIC_API_KEY=sk-ant-...
|
||||||
|
|
||||||
|
# Codex(OpenAI)用
|
||||||
|
export TAKT_OPENAI_API_KEY=sk-...
|
||||||
|
|
||||||
|
# OpenCode 用
|
||||||
|
export TAKT_OPENCODE_API_KEY=...
|
||||||
|
```
|
||||||
|
|
||||||
|
優先順位: 環境変数は `config.yaml` の設定よりも優先されます。
|
||||||
|
|
||||||
|
> **注意**: 環境変数で API キーを設定すれば、Claude Code、Codex、OpenCode CLI のインストールは不要です。TAKT が対応する API を直接呼び出します。
|
||||||
|
|
||||||
|
## コストに関する注意
|
||||||
|
|
||||||
|
TAKT は AI API(Claude または OpenAI)を使用するため、特に CI/CD 環境でタスクが自動実行される場合、大きなコストが発生する可能性があります。次の点に注意してください。
|
||||||
|
|
||||||
|
- **API 使用量の監視**: 予期しない請求を避けるため、AI provider で課金アラートを設定してください。
|
||||||
|
- **`--quiet` モードの使用**: 出力量は削減されますが、API 呼び出し回数は減りません。
|
||||||
|
- **適切な piece の選択**: シンプルな piece(例: `default-mini`)はマルチステージの piece(例: 並列レビュー付きの `default`)よりも API 呼び出しが少なくなります。
|
||||||
|
- **CI トリガーの制限**: 意図しない実行を防ぐため、条件付きトリガー(例: `if: contains(github.event.comment.body, '@takt')`)を使用してください。
|
||||||
|
- **`--provider mock` でのテスト**: CI パイプラインの開発中は mock provider を使用して、実際の API コストを回避してください。
|
||||||
178
docs/ci-cd.md
Normal file
178
docs/ci-cd.md
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
[日本語](./ci-cd.ja.md)
|
||||||
|
|
||||||
|
# CI/CD Integration
|
||||||
|
|
||||||
|
TAKT can be integrated into CI/CD pipelines to automate task execution, PR reviews, and code generation. This guide covers GitHub Actions setup, pipeline mode options, and configuration for other CI systems.
|
||||||
|
|
||||||
|
## GitHub Actions
|
||||||
|
|
||||||
|
TAKT provides the official [takt-action](https://github.com/nrslib/takt-action) for GitHub Actions integration.
|
||||||
|
|
||||||
|
### Complete Workflow Example
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
name: TAKT
|
||||||
|
|
||||||
|
on:
|
||||||
|
issue_comment:
|
||||||
|
types: [created]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
takt:
|
||||||
|
if: contains(github.event.comment.body, '@takt')
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Run TAKT
|
||||||
|
uses: nrslib/takt-action@main
|
||||||
|
with:
|
||||||
|
anthropic_api_key: ${{ secrets.TAKT_ANTHROPIC_API_KEY }}
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Permissions
|
||||||
|
|
||||||
|
The following permissions are required for `takt-action` to function correctly:
|
||||||
|
|
||||||
|
| Permission | Required For |
|
||||||
|
|------------|-------------|
|
||||||
|
| `contents: write` | Creating branches, committing, and pushing code |
|
||||||
|
| `issues: write` | Reading and commenting on issues |
|
||||||
|
| `pull-requests: write` | Creating and updating pull requests |
|
||||||
|
|
||||||
|
## Pipeline Mode
|
||||||
|
|
||||||
|
Specifying `--pipeline` enables non-interactive pipeline mode. It automatically creates a branch, runs the piece, commits, and pushes. This mode is designed for CI/CD automation where no human interaction is available.
|
||||||
|
|
||||||
|
In pipeline mode, PRs are **not** created unless `--auto-pr` is explicitly specified.
|
||||||
|
|
||||||
|
### All Pipeline Options
|
||||||
|
|
||||||
|
| Option | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| `--pipeline` | **Enable pipeline (non-interactive) mode** -- Required for CI/automation |
|
||||||
|
| `-t, --task <text>` | Task content (alternative to GitHub Issue) |
|
||||||
|
| `-i, --issue <N>` | GitHub issue number (same as `#N` in interactive mode) |
|
||||||
|
| `-w, --piece <name or path>` | Piece name or path to piece YAML file |
|
||||||
|
| `-b, --branch <name>` | Specify branch name (auto-generated if omitted) |
|
||||||
|
| `--auto-pr` | Create PR (interactive: skip confirmation, pipeline: enable PR) |
|
||||||
|
| `--skip-git` | Skip branch creation, commit, and push (pipeline mode, piece-only) |
|
||||||
|
| `--repo <owner/repo>` | Specify repository (for PR creation) |
|
||||||
|
| `-q, --quiet` | Minimal output mode: suppress AI output (for CI) |
|
||||||
|
| `--provider <name>` | Override agent provider (claude\|codex\|opencode\|mock) |
|
||||||
|
| `--model <name>` | Override agent model |
|
||||||
|
|
||||||
|
### Command Examples
|
||||||
|
|
||||||
|
**Basic pipeline execution:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --task "Fix bug"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Pipeline execution with automatic PR creation:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --task "Fix bug" --auto-pr
|
||||||
|
```
|
||||||
|
|
||||||
|
**Link a GitHub issue and create a PR:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --issue 99 --auto-pr
|
||||||
|
```
|
||||||
|
|
||||||
|
**Specify piece and branch name:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --task "Fix bug" -w magi -b feat/fix-bug
|
||||||
|
```
|
||||||
|
|
||||||
|
**Specify repository for PR creation:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --task "Fix bug" --auto-pr --repo owner/repo
|
||||||
|
```
|
||||||
|
|
||||||
|
**Piece execution only (skip branch creation, commit, push):**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --task "Fix bug" --skip-git
|
||||||
|
```
|
||||||
|
|
||||||
|
**Minimal output mode (suppress AI output for CI logs):**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt --pipeline --task "Fix bug" --quiet
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pipeline Template Variables
|
||||||
|
|
||||||
|
Pipeline configuration in `~/.takt/config.yaml` supports template variables for customizing commit messages and PR bodies:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
pipeline:
|
||||||
|
default_branch_prefix: "takt/"
|
||||||
|
commit_message_template: "feat: {title} (#{issue})"
|
||||||
|
pr_body_template: |
|
||||||
|
## Summary
|
||||||
|
{issue_body}
|
||||||
|
Closes #{issue}
|
||||||
|
```
|
||||||
|
|
||||||
|
| Variable | Available In | Description |
|
||||||
|
|----------|-------------|-------------|
|
||||||
|
| `{title}` | Commit message | Issue title |
|
||||||
|
| `{issue}` | Commit message, PR body | Issue number |
|
||||||
|
| `{issue_body}` | PR body | Issue body |
|
||||||
|
| `{report}` | PR body | Piece execution report |
|
||||||
|
|
||||||
|
## Other CI Systems
|
||||||
|
|
||||||
|
For CI systems other than GitHub Actions, install TAKT globally and use pipeline mode directly:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install takt
|
||||||
|
npm install -g takt
|
||||||
|
|
||||||
|
# Run in pipeline mode
|
||||||
|
takt --pipeline --task "Fix bug" --auto-pr --repo owner/repo
|
||||||
|
```
|
||||||
|
|
||||||
|
This approach works with any CI system that supports Node.js, including GitLab CI, CircleCI, Jenkins, Azure DevOps, and others.
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
For authentication in CI environments, set the appropriate API key environment variable. These use TAKT-specific prefixes to avoid conflicts with other tools.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For Claude (Anthropic)
|
||||||
|
export TAKT_ANTHROPIC_API_KEY=sk-ant-...
|
||||||
|
|
||||||
|
# For Codex (OpenAI)
|
||||||
|
export TAKT_OPENAI_API_KEY=sk-...
|
||||||
|
|
||||||
|
# For OpenCode
|
||||||
|
export TAKT_OPENCODE_API_KEY=...
|
||||||
|
```
|
||||||
|
|
||||||
|
Priority: Environment variables take precedence over `config.yaml` settings.
|
||||||
|
|
||||||
|
> **Note**: If you set an API key via environment variable, installing Claude Code, Codex, or OpenCode CLI is not necessary. TAKT directly calls the respective API.
|
||||||
|
|
||||||
|
## Cost Considerations
|
||||||
|
|
||||||
|
TAKT uses AI APIs (Claude or OpenAI), which can incur significant costs, especially when tasks are auto-executed in CI/CD environments. Take the following precautions:
|
||||||
|
|
||||||
|
- **Monitor API usage**: Set up billing alerts with your AI provider to avoid unexpected charges.
|
||||||
|
- **Use `--quiet` mode**: Reduces output volume but does not reduce API calls.
|
||||||
|
- **Choose appropriate pieces**: Simpler pieces (e.g., `default-mini`) use fewer API calls than multi-stage pieces (e.g., `default` with parallel reviews).
|
||||||
|
- **Limit CI triggers**: Use conditional triggers (e.g., `if: contains(github.event.comment.body, '@takt')`) to prevent unintended executions.
|
||||||
|
- **Test with `--provider mock`**: Use mock provider during CI pipeline development to avoid real API costs.
|
||||||
310
docs/cli-reference.ja.md
Normal file
310
docs/cli-reference.ja.md
Normal file
@ -0,0 +1,310 @@
|
|||||||
|
# CLI リファレンス
|
||||||
|
|
||||||
|
[English](./cli-reference.md)
|
||||||
|
|
||||||
|
このドキュメントは TAKT CLI の全コマンドとオプションの完全なリファレンスです。
|
||||||
|
|
||||||
|
## グローバルオプション
|
||||||
|
|
||||||
|
| オプション | 説明 |
|
||||||
|
|-----------|------|
|
||||||
|
| `--pipeline` | pipeline(非インタラクティブ)モードを有効化 -- CI/自動化に必要 |
|
||||||
|
| `-t, --task <text>` | タスク内容(GitHub Issue の代替) |
|
||||||
|
| `-i, --issue <N>` | GitHub Issue 番号(インタラクティブモードでの `#N` と同等) |
|
||||||
|
| `-w, --piece <name or path>` | Piece 名または piece YAML ファイルのパス |
|
||||||
|
| `-b, --branch <name>` | ブランチ名を指定(省略時は自動生成) |
|
||||||
|
| `--auto-pr` | PR を作成(インタラクティブ: 確認スキップ、pipeline: PR 有効化) |
|
||||||
|
| `--skip-git` | ブランチ作成、コミット、プッシュをスキップ(pipeline モード、piece のみ実行) |
|
||||||
|
| `--repo <owner/repo>` | リポジトリを指定(PR 作成用) |
|
||||||
|
| `--create-worktree <yes\|no>` | worktree 確認プロンプトをスキップ |
|
||||||
|
| `-q, --quiet` | 最小出力モード: AI 出力を抑制(CI 向け) |
|
||||||
|
| `--provider <name>` | エージェント provider を上書き(claude\|codex\|opencode\|mock) |
|
||||||
|
| `--model <name>` | エージェントモデルを上書き |
|
||||||
|
| `--config <path>` | グローバル設定ファイルのパス(デフォルト: `~/.takt/config.yaml`) |
|
||||||
|
|
||||||
|
## インタラクティブモード
|
||||||
|
|
||||||
|
AI との会話を通じてタスク内容を精緻化してから実行するモードです。タスクの要件が曖昧な場合や、AI と相談しながら内容を詰めたい場合に便利です。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# インタラクティブモードを開始(引数なし)
|
||||||
|
takt
|
||||||
|
|
||||||
|
# 初期メッセージを指定(短い単語のみ)
|
||||||
|
takt hello
|
||||||
|
```
|
||||||
|
|
||||||
|
**注意:** `--task` オプションを指定するとインタラクティブモードをスキップして直接実行します。Issue 参照(`#6`、`--issue`)はインタラクティブモードの初期入力として使用されます。
|
||||||
|
|
||||||
|
### フロー
|
||||||
|
|
||||||
|
1. Piece を選択
|
||||||
|
2. インタラクティブモードを選択(assistant / persona / quiet / passthrough)
|
||||||
|
3. AI との会話でタスク内容を精緻化
|
||||||
|
4. `/go` でタスク指示を確定(`/go 追加の指示` のように追記も可能)、または `/play <task>` でタスクを即座に実行
|
||||||
|
5. 実行(worktree 作成、piece 実行、PR 作成)
|
||||||
|
|
||||||
|
### インタラクティブモードの種類
|
||||||
|
|
||||||
|
| モード | 説明 |
|
||||||
|
|--------|------|
|
||||||
|
| `assistant` | デフォルト。AI がタスク指示を生成する前に明確化のための質問を行う。 |
|
||||||
|
| `persona` | 最初の movement の persona と会話(そのシステムプロンプトとツールを使用)。 |
|
||||||
|
| `quiet` | 質問なしでタスク指示を生成(ベストエフォート)。 |
|
||||||
|
| `passthrough` | AI 処理なしでユーザー入力をそのままタスクテキストとして使用。 |
|
||||||
|
|
||||||
|
Piece は YAML の `interactive_mode` フィールドでデフォルトモードを設定できます。
|
||||||
|
|
||||||
|
### 実行例
|
||||||
|
|
||||||
|
```
|
||||||
|
$ takt
|
||||||
|
|
||||||
|
Select piece:
|
||||||
|
> default (current)
|
||||||
|
Development/
|
||||||
|
Research/
|
||||||
|
Cancel
|
||||||
|
|
||||||
|
Interactive mode - Enter task content. Commands: /go (execute), /cancel (exit)
|
||||||
|
|
||||||
|
> I want to add user authentication feature
|
||||||
|
|
||||||
|
[AI が要件を確認・整理]
|
||||||
|
|
||||||
|
> /go
|
||||||
|
|
||||||
|
Proposed task instructions:
|
||||||
|
---
|
||||||
|
Implement user authentication feature.
|
||||||
|
|
||||||
|
Requirements:
|
||||||
|
- Login with email address and password
|
||||||
|
- JWT token-based authentication
|
||||||
|
- Password hashing (bcrypt)
|
||||||
|
- Login/logout API endpoints
|
||||||
|
---
|
||||||
|
|
||||||
|
Proceed with these task instructions? (Y/n) y
|
||||||
|
|
||||||
|
? Create worktree? (Y/n) y
|
||||||
|
|
||||||
|
[Piece の実行を開始...]
|
||||||
|
```
|
||||||
|
|
||||||
|
## 直接タスク実行
|
||||||
|
|
||||||
|
`--task` オプションを使用して、インタラクティブモードをスキップして直接実行できます。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# --task オプションでタスク内容を指定
|
||||||
|
takt --task "Fix bug"
|
||||||
|
|
||||||
|
# piece を指定
|
||||||
|
takt --task "Add authentication" --piece expert
|
||||||
|
|
||||||
|
# PR を自動作成
|
||||||
|
takt --task "Fix bug" --auto-pr
|
||||||
|
```
|
||||||
|
|
||||||
|
**注意:** 引数として文字列を渡す場合(例: `takt "Add login feature"`)は、初期メッセージとしてインタラクティブモードに入ります。
|
||||||
|
|
||||||
|
## GitHub Issue タスク
|
||||||
|
|
||||||
|
GitHub Issue を直接タスクとして実行できます。Issue のタイトル、本文、ラベル、コメントがタスク内容として自動的に取り込まれます。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Issue 番号を指定して実行
|
||||||
|
takt #6
|
||||||
|
takt --issue 6
|
||||||
|
|
||||||
|
# Issue + piece 指定
|
||||||
|
takt #6 --piece expert
|
||||||
|
|
||||||
|
# Issue + PR 自動作成
|
||||||
|
takt #6 --auto-pr
|
||||||
|
```
|
||||||
|
|
||||||
|
**要件:** [GitHub CLI](https://cli.github.com/)(`gh`)がインストールされ、認証済みである必要があります。
|
||||||
|
|
||||||
|
## タスク管理コマンド
|
||||||
|
|
||||||
|
`.takt/tasks.yaml` と `.takt/tasks/{slug}/` 配下のタスクディレクトリを使ったバッチ処理です。複数のタスクを蓄積し、後でまとめて実行するのに便利です。
|
||||||
|
|
||||||
|
### takt add
|
||||||
|
|
||||||
|
AI との会話でタスク要件を精緻化し、`.takt/tasks.yaml` にタスクを追加します。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# AI との会話でタスク要件を精緻化し、タスクを追加
|
||||||
|
takt add
|
||||||
|
|
||||||
|
# GitHub Issue からタスクを追加(Issue 番号がブランチ名に反映される)
|
||||||
|
takt add #28
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt run
|
||||||
|
|
||||||
|
`.takt/tasks.yaml` のすべての pending タスクを実行します。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# .takt/tasks.yaml の pending タスクをすべて実行
|
||||||
|
takt run
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt watch
|
||||||
|
|
||||||
|
`.takt/tasks.yaml` を監視し、タスクが追加されると自動実行する常駐プロセスです。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# .takt/tasks.yaml を監視してタスクを自動実行(常駐プロセス)
|
||||||
|
takt watch
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt list
|
||||||
|
|
||||||
|
タスクブランチの一覧表示と操作(マージ、削除など)を行います。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# タスクブランチの一覧表示(マージ/削除)
|
||||||
|
takt list
|
||||||
|
|
||||||
|
# 非インタラクティブモード(CI/スクリプト向け)
|
||||||
|
takt list --non-interactive
|
||||||
|
takt list --non-interactive --action diff --branch takt/my-branch
|
||||||
|
takt list --non-interactive --action delete --branch takt/my-branch --yes
|
||||||
|
takt list --non-interactive --format json
|
||||||
|
```
|
||||||
|
|
||||||
|
### タスクディレクトリワークフロー(作成 / 実行 / 確認)
|
||||||
|
|
||||||
|
1. `takt add` を実行し、`.takt/tasks.yaml` に pending レコードが作成されたことを確認。
|
||||||
|
2. 生成された `.takt/tasks/{slug}/order.md` を開き、必要に応じて詳細な仕様や参考資料を追記。
|
||||||
|
3. `takt run`(または `takt watch`)を実行して `tasks.yaml` の pending タスクを実行。
|
||||||
|
4. `task_dir` と同じ slug の `.takt/runs/{slug}/reports/` で出力を確認。
|
||||||
|
|
||||||
|
## Pipeline モード
|
||||||
|
|
||||||
|
`--pipeline` を指定すると、非インタラクティブな pipeline モードが有効になります。ブランチの作成、piece の実行、コミットとプッシュを自動的に行います。CI/CD 自動化に適しています。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# pipeline モードでタスクを実行
|
||||||
|
takt --pipeline --task "Fix bug"
|
||||||
|
|
||||||
|
# pipeline 実行 + PR 自動作成
|
||||||
|
takt --pipeline --task "Fix bug" --auto-pr
|
||||||
|
|
||||||
|
# Issue 情報をリンク
|
||||||
|
takt --pipeline --issue 99 --auto-pr
|
||||||
|
|
||||||
|
# piece とブランチを指定
|
||||||
|
takt --pipeline --task "Fix bug" -w magi -b feat/fix-bug
|
||||||
|
|
||||||
|
# リポジトリを指定(PR 作成用)
|
||||||
|
takt --pipeline --task "Fix bug" --auto-pr --repo owner/repo
|
||||||
|
|
||||||
|
# piece のみ実行(ブランチ作成、コミット、プッシュをスキップ)
|
||||||
|
takt --pipeline --task "Fix bug" --skip-git
|
||||||
|
|
||||||
|
# 最小出力モード(CI 向け)
|
||||||
|
takt --pipeline --task "Fix bug" --quiet
|
||||||
|
```
|
||||||
|
|
||||||
|
Pipeline モードでは、`--auto-pr` を指定しない限り PR は作成されません。
|
||||||
|
|
||||||
|
**GitHub 連携:** GitHub Actions で TAKT を使用する場合は [takt-action](https://github.com/nrslib/takt-action) を参照してください。PR レビューやタスク実行を自動化できます。
|
||||||
|
|
||||||
|
## ユーティリティコマンド
|
||||||
|
|
||||||
|
### takt switch
|
||||||
|
|
||||||
|
アクティブな piece をインタラクティブに切り替えます。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt switch
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt eject
|
||||||
|
|
||||||
|
ビルトインの piece/persona をローカルディレクトリにコピーしてカスタマイズします。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# ビルトインの piece/persona をプロジェクト .takt/ にコピー
|
||||||
|
takt eject
|
||||||
|
|
||||||
|
# ~/.takt/(グローバル)にコピー
|
||||||
|
takt eject --global
|
||||||
|
|
||||||
|
# 特定のファセットをカスタマイズ用にエジェクト
|
||||||
|
takt eject persona coder
|
||||||
|
takt eject instruction plan --global
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt clear
|
||||||
|
|
||||||
|
エージェントの会話セッションをクリア(状態のリセット)します。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt clear
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt export-cc
|
||||||
|
|
||||||
|
ビルトインの piece/persona を Claude Code Skill としてデプロイします。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt export-cc
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt catalog
|
||||||
|
|
||||||
|
レイヤー間で利用可能なファセットの一覧を表示します。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt catalog
|
||||||
|
takt catalog personas
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt prompt
|
||||||
|
|
||||||
|
各 movement とフェーズの組み立て済みプロンプトをプレビューします。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt prompt [piece]
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt reset
|
||||||
|
|
||||||
|
設定をデフォルトにリセットします。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# グローバル設定をビルトインテンプレートにリセット(バックアップ付き)
|
||||||
|
takt reset config
|
||||||
|
|
||||||
|
# Piece カテゴリをビルトインのデフォルトにリセット
|
||||||
|
takt reset categories
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt metrics
|
||||||
|
|
||||||
|
アナリティクスメトリクスを表示します。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# レビュー品質メトリクスを表示(デフォルト: 直近30日)
|
||||||
|
takt metrics review
|
||||||
|
|
||||||
|
# 時間枠を指定
|
||||||
|
takt metrics review --since 7d
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt purge
|
||||||
|
|
||||||
|
古いアナリティクスイベントファイルを削除します。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 30日以上前のファイルを削除(デフォルト)
|
||||||
|
takt purge
|
||||||
|
|
||||||
|
# 保持期間を指定
|
||||||
|
takt purge --retention-days 14
|
||||||
|
```
|
||||||
310
docs/cli-reference.md
Normal file
310
docs/cli-reference.md
Normal file
@ -0,0 +1,310 @@
|
|||||||
|
# CLI Reference
|
||||||
|
|
||||||
|
[日本語](./cli-reference.ja.md)
|
||||||
|
|
||||||
|
This document provides a complete reference for all TAKT CLI commands and options.
|
||||||
|
|
||||||
|
## Global Options
|
||||||
|
|
||||||
|
| Option | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| `--pipeline` | Enable pipeline (non-interactive) mode -- required for CI/automation |
|
||||||
|
| `-t, --task <text>` | Task content (alternative to GitHub Issue) |
|
||||||
|
| `-i, --issue <N>` | GitHub issue number (same as `#N` in interactive mode) |
|
||||||
|
| `-w, --piece <name or path>` | Piece name or path to piece YAML file |
|
||||||
|
| `-b, --branch <name>` | Specify branch name (auto-generated if omitted) |
|
||||||
|
| `--auto-pr` | Create PR (interactive: skip confirmation, pipeline: enable PR) |
|
||||||
|
| `--skip-git` | Skip branch creation, commit, and push (pipeline mode, piece-only) |
|
||||||
|
| `--repo <owner/repo>` | Specify repository (for PR creation) |
|
||||||
|
| `--create-worktree <yes\|no>` | Skip worktree confirmation prompt |
|
||||||
|
| `-q, --quiet` | Minimal output mode: suppress AI output (for CI) |
|
||||||
|
| `--provider <name>` | Override agent provider (claude\|codex\|opencode\|mock) |
|
||||||
|
| `--model <name>` | Override agent model |
|
||||||
|
| `--config <path>` | Path to global config file (default: `~/.takt/config.yaml`) |
|
||||||
|
|
||||||
|
## Interactive Mode
|
||||||
|
|
||||||
|
A mode where you refine task content through conversation with AI before execution. Useful when task requirements are ambiguous or when you want to clarify content while consulting with AI.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start interactive mode (no arguments)
|
||||||
|
takt
|
||||||
|
|
||||||
|
# Specify initial message (short word only)
|
||||||
|
takt hello
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** `--task` option skips interactive mode and executes the task directly. Issue references (`#6`, `--issue`) are used as initial input in interactive mode.
|
||||||
|
|
||||||
|
### Flow
|
||||||
|
|
||||||
|
1. Select piece
|
||||||
|
2. Select interactive mode (assistant / persona / quiet / passthrough)
|
||||||
|
3. Refine task content through conversation with AI
|
||||||
|
4. Finalize task instructions with `/go` (you can also add additional instructions like `/go additional instructions`), or use `/play <task>` to execute a task immediately
|
||||||
|
5. Execute (create worktree, run piece, create PR)
|
||||||
|
|
||||||
|
### Interactive Mode Variants
|
||||||
|
|
||||||
|
| Mode | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `assistant` | Default. AI asks clarifying questions before generating task instructions. |
|
||||||
|
| `persona` | Conversation with the first movement's persona (uses its system prompt and tools). |
|
||||||
|
| `quiet` | Generates task instructions without asking questions (best-effort). |
|
||||||
|
| `passthrough` | Passes user input directly as task text without AI processing. |
|
||||||
|
|
||||||
|
Pieces can set a default mode via the `interactive_mode` field in YAML.
|
||||||
|
|
||||||
|
### Execution Example
|
||||||
|
|
||||||
|
```
|
||||||
|
$ takt
|
||||||
|
|
||||||
|
Select piece:
|
||||||
|
> default (current)
|
||||||
|
Development/
|
||||||
|
Research/
|
||||||
|
Cancel
|
||||||
|
|
||||||
|
Interactive mode - Enter task content. Commands: /go (execute), /cancel (exit)
|
||||||
|
|
||||||
|
> I want to add user authentication feature
|
||||||
|
|
||||||
|
[AI confirms and organizes requirements]
|
||||||
|
|
||||||
|
> /go
|
||||||
|
|
||||||
|
Proposed task instructions:
|
||||||
|
---
|
||||||
|
Implement user authentication feature.
|
||||||
|
|
||||||
|
Requirements:
|
||||||
|
- Login with email address and password
|
||||||
|
- JWT token-based authentication
|
||||||
|
- Password hashing (bcrypt)
|
||||||
|
- Login/logout API endpoints
|
||||||
|
---
|
||||||
|
|
||||||
|
Proceed with these task instructions? (Y/n) y
|
||||||
|
|
||||||
|
? Create worktree? (Y/n) y
|
||||||
|
|
||||||
|
[Piece execution starts...]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Direct Task Execution
|
||||||
|
|
||||||
|
Use the `--task` option to skip interactive mode and execute directly.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Specify task content with --task option
|
||||||
|
takt --task "Fix bug"
|
||||||
|
|
||||||
|
# Specify piece
|
||||||
|
takt --task "Add authentication" --piece expert
|
||||||
|
|
||||||
|
# Auto-create PR
|
||||||
|
takt --task "Fix bug" --auto-pr
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** Passing a string as an argument (e.g., `takt "Add login feature"`) enters interactive mode with it as the initial message.
|
||||||
|
|
||||||
|
## GitHub Issue Tasks
|
||||||
|
|
||||||
|
You can execute GitHub Issues directly as tasks. Issue title, body, labels, and comments are automatically incorporated as task content.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Execute by specifying issue number
|
||||||
|
takt #6
|
||||||
|
takt --issue 6
|
||||||
|
|
||||||
|
# Issue + piece specification
|
||||||
|
takt #6 --piece expert
|
||||||
|
|
||||||
|
# Issue + auto-create PR
|
||||||
|
takt #6 --auto-pr
|
||||||
|
```
|
||||||
|
|
||||||
|
**Requirements:** [GitHub CLI](https://cli.github.com/) (`gh`) must be installed and authenticated.
|
||||||
|
|
||||||
|
## Task Management Commands
|
||||||
|
|
||||||
|
Batch processing using `.takt/tasks.yaml` with task directories under `.takt/tasks/{slug}/`. Useful for accumulating multiple tasks and executing them together later.
|
||||||
|
|
||||||
|
### takt add
|
||||||
|
|
||||||
|
Refine task requirements through AI conversation, then add a task to `.takt/tasks.yaml`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Refine task requirements through AI conversation, then add task
|
||||||
|
takt add
|
||||||
|
|
||||||
|
# Add task from GitHub Issue (issue number reflected in branch name)
|
||||||
|
takt add #28
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt run
|
||||||
|
|
||||||
|
Execute all pending tasks from `.takt/tasks.yaml`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Execute all pending tasks in .takt/tasks.yaml
|
||||||
|
takt run
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt watch
|
||||||
|
|
||||||
|
Monitor `.takt/tasks.yaml` and auto-execute tasks as a resident process.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Monitor .takt/tasks.yaml and auto-execute tasks (resident process)
|
||||||
|
takt watch
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt list
|
||||||
|
|
||||||
|
List task branches and perform actions (merge, delete, etc.).
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List task branches (merge/delete)
|
||||||
|
takt list
|
||||||
|
|
||||||
|
# Non-interactive mode (for CI/scripts)
|
||||||
|
takt list --non-interactive
|
||||||
|
takt list --non-interactive --action diff --branch takt/my-branch
|
||||||
|
takt list --non-interactive --action delete --branch takt/my-branch --yes
|
||||||
|
takt list --non-interactive --format json
|
||||||
|
```
|
||||||
|
|
||||||
|
### Task Directory Workflow (Create / Run / Verify)
|
||||||
|
|
||||||
|
1. Run `takt add` and confirm a pending record is created in `.takt/tasks.yaml`.
|
||||||
|
2. Open the generated `.takt/tasks/{slug}/order.md` and add detailed specifications/references as needed.
|
||||||
|
3. Run `takt run` (or `takt watch`) to execute pending tasks from `tasks.yaml`.
|
||||||
|
4. Verify outputs in `.takt/runs/{slug}/reports/` using the same slug as `task_dir`.
|
||||||
|
|
||||||
|
## Pipeline Mode
|
||||||
|
|
||||||
|
Specifying `--pipeline` enables non-interactive pipeline mode. Automatically creates branch, runs piece, commits and pushes. Suitable for CI/CD automation.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Execute task in pipeline mode
|
||||||
|
takt --pipeline --task "Fix bug"
|
||||||
|
|
||||||
|
# Pipeline execution + auto-create PR
|
||||||
|
takt --pipeline --task "Fix bug" --auto-pr
|
||||||
|
|
||||||
|
# Link issue information
|
||||||
|
takt --pipeline --issue 99 --auto-pr
|
||||||
|
|
||||||
|
# Specify piece and branch
|
||||||
|
takt --pipeline --task "Fix bug" -w magi -b feat/fix-bug
|
||||||
|
|
||||||
|
# Specify repository (for PR creation)
|
||||||
|
takt --pipeline --task "Fix bug" --auto-pr --repo owner/repo
|
||||||
|
|
||||||
|
# Piece execution only (skip branch creation, commit, push)
|
||||||
|
takt --pipeline --task "Fix bug" --skip-git
|
||||||
|
|
||||||
|
# Minimal output mode (for CI)
|
||||||
|
takt --pipeline --task "Fix bug" --quiet
|
||||||
|
```
|
||||||
|
|
||||||
|
In pipeline mode, PRs are not created unless `--auto-pr` is specified.
|
||||||
|
|
||||||
|
**GitHub Integration:** When using TAKT in GitHub Actions, see [takt-action](https://github.com/nrslib/takt-action). You can automate PR reviews and task execution.
|
||||||
|
|
||||||
|
## Utility Commands
|
||||||
|
|
||||||
|
### takt switch
|
||||||
|
|
||||||
|
Interactively switch the active piece.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt switch
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt eject
|
||||||
|
|
||||||
|
Copy builtin pieces/personas to your local directory for customization.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Copy builtin pieces/personas to project .takt/ for customization
|
||||||
|
takt eject
|
||||||
|
|
||||||
|
# Copy to ~/.takt/ (global) instead
|
||||||
|
takt eject --global
|
||||||
|
|
||||||
|
# Eject a specific facet for customization
|
||||||
|
takt eject persona coder
|
||||||
|
takt eject instruction plan --global
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt clear
|
||||||
|
|
||||||
|
Clear agent conversation sessions (reset state).
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt clear
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt export-cc
|
||||||
|
|
||||||
|
Deploy builtin pieces/personas as a Claude Code Skill.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt export-cc
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt catalog
|
||||||
|
|
||||||
|
List available facets across layers.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt catalog
|
||||||
|
takt catalog personas
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt prompt
|
||||||
|
|
||||||
|
Preview assembled prompts for each movement and phase.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt prompt [piece]
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt reset
|
||||||
|
|
||||||
|
Reset settings to defaults.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Reset global config to builtin template (with backup)
|
||||||
|
takt reset config
|
||||||
|
|
||||||
|
# Reset piece categories to builtin defaults
|
||||||
|
takt reset categories
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt metrics
|
||||||
|
|
||||||
|
Show analytics metrics.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Show review quality metrics (default: last 30 days)
|
||||||
|
takt metrics review
|
||||||
|
|
||||||
|
# Specify time window
|
||||||
|
takt metrics review --since 7d
|
||||||
|
```
|
||||||
|
|
||||||
|
### takt purge
|
||||||
|
|
||||||
|
Purge old analytics event files.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Purge files older than 30 days (default)
|
||||||
|
takt purge
|
||||||
|
|
||||||
|
# Specify retention period
|
||||||
|
takt purge --retention-days 14
|
||||||
|
```
|
||||||
400
docs/configuration.ja.md
Normal file
400
docs/configuration.ja.md
Normal file
@ -0,0 +1,400 @@
|
|||||||
|
# 設定
|
||||||
|
|
||||||
|
[English](./configuration.md)
|
||||||
|
|
||||||
|
このドキュメントは TAKT の全設定オプションのリファレンスです。クイックスタートについては [README](../README.md) を参照してください。
|
||||||
|
|
||||||
|
## グローバル設定
|
||||||
|
|
||||||
|
`~/.takt/config.yaml` で TAKT のデフォルト設定を行います。このファイルは初回実行時に自動作成されます。すべてのフィールドは省略可能です。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
language: en # UI 言語: 'en' または 'ja'
|
||||||
|
default_piece: default # 新規プロジェクトのデフォルト piece
|
||||||
|
log_level: info # ログレベル: debug, info, warn, error
|
||||||
|
provider: claude # デフォルト provider: claude, codex, または opencode
|
||||||
|
model: sonnet # デフォルトモデル(省略可、provider にそのまま渡される)
|
||||||
|
branch_name_strategy: romaji # ブランチ名生成方式: 'romaji'(高速)または 'ai'(低速)
|
||||||
|
prevent_sleep: false # 実行中に macOS のアイドルスリープを防止(caffeinate)
|
||||||
|
notification_sound: true # 通知音の有効/無効
|
||||||
|
notification_sound_events: # イベントごとの通知音切り替え(省略可)
|
||||||
|
iteration_limit: false
|
||||||
|
piece_complete: true
|
||||||
|
piece_abort: true
|
||||||
|
run_complete: true # デフォルト有効。false で無効化
|
||||||
|
run_abort: true # デフォルト有効。false で無効化
|
||||||
|
concurrency: 1 # takt run の並列タスク数(1-10、デフォルト: 1 = 逐次実行)
|
||||||
|
task_poll_interval_ms: 500 # takt run での新規タスクポーリング間隔(100-5000、デフォルト: 500)
|
||||||
|
interactive_preview_movements: 3 # インタラクティブモードでの movement プレビュー数(0-10、デフォルト: 3)
|
||||||
|
|
||||||
|
# ランタイム環境デフォルト(piece_config.runtime で上書きしない限りすべての piece に適用)
|
||||||
|
# runtime:
|
||||||
|
# prepare:
|
||||||
|
# - gradle # .runtime/ に Gradle キャッシュ/設定を準備
|
||||||
|
# - node # .runtime/ に npm キャッシュを準備
|
||||||
|
|
||||||
|
# persona ごとの provider 上書き(省略可)
|
||||||
|
# piece を複製せずに特定の persona を別の provider にルーティング
|
||||||
|
# persona_providers:
|
||||||
|
# coder: codex # coder を Codex で実行
|
||||||
|
# ai-antipattern-reviewer: claude # レビュアーは Claude のまま
|
||||||
|
|
||||||
|
# provider 固有のパーミッションプロファイル(省略可)
|
||||||
|
# 優先順位: プロジェクト上書き > グローバル上書き > プロジェクトデフォルト > グローバルデフォルト > required_permission_mode(下限)
|
||||||
|
# provider_profiles:
|
||||||
|
# codex:
|
||||||
|
# default_permission_mode: full
|
||||||
|
# movement_permission_overrides:
|
||||||
|
# ai_review: readonly
|
||||||
|
# claude:
|
||||||
|
# default_permission_mode: edit
|
||||||
|
|
||||||
|
# API キー設定(省略可)
|
||||||
|
# 環境変数 TAKT_ANTHROPIC_API_KEY / TAKT_OPENAI_API_KEY / TAKT_OPENCODE_API_KEY で上書き可能
|
||||||
|
# anthropic_api_key: sk-ant-... # Claude(Anthropic)用
|
||||||
|
# openai_api_key: sk-... # Codex(OpenAI)用
|
||||||
|
# opencode_api_key: ... # OpenCode 用
|
||||||
|
|
||||||
|
# Codex CLI パス上書き(省略可)
|
||||||
|
# Codex SDK が使用する Codex CLI バイナリを上書き(実行可能ファイルの絶対パスが必要)
|
||||||
|
# 環境変数 TAKT_CODEX_CLI_PATH で上書き可能
|
||||||
|
# codex_cli_path: /usr/local/bin/codex
|
||||||
|
|
||||||
|
# ビルトイン piece フィルタリング(省略可)
|
||||||
|
# builtin_pieces_enabled: true # false ですべてのビルトインを無効化
|
||||||
|
# disabled_builtins: [magi, passthrough] # 特定のビルトイン piece を無効化
|
||||||
|
|
||||||
|
# pipeline 実行設定(省略可)
|
||||||
|
# ブランチ名、コミットメッセージ、PR 本文をカスタマイズ
|
||||||
|
# pipeline:
|
||||||
|
# default_branch_prefix: "takt/"
|
||||||
|
# commit_message_template: "feat: {title} (#{issue})"
|
||||||
|
# pr_body_template: |
|
||||||
|
# ## Summary
|
||||||
|
# {issue_body}
|
||||||
|
# Closes #{issue}
|
||||||
|
```
|
||||||
|
|
||||||
|
### グローバル設定フィールドリファレンス
|
||||||
|
|
||||||
|
| フィールド | 型 | デフォルト | 説明 |
|
||||||
|
|-----------|------|---------|------|
|
||||||
|
| `language` | `"en"` \| `"ja"` | `"en"` | UI 言語 |
|
||||||
|
| `default_piece` | string | `"default"` | 新規プロジェクトのデフォルト piece |
|
||||||
|
| `log_level` | `"debug"` \| `"info"` \| `"warn"` \| `"error"` | `"info"` | ログレベル |
|
||||||
|
| `provider` | `"claude"` \| `"codex"` \| `"opencode"` | `"claude"` | デフォルト AI provider |
|
||||||
|
| `model` | string | - | デフォルトモデル名(provider にそのまま渡される) |
|
||||||
|
| `branch_name_strategy` | `"romaji"` \| `"ai"` | `"romaji"` | ブランチ名生成方式 |
|
||||||
|
| `prevent_sleep` | boolean | `false` | macOS アイドルスリープ防止(caffeinate) |
|
||||||
|
| `notification_sound` | boolean | `true` | 通知音の有効化 |
|
||||||
|
| `notification_sound_events` | object | - | イベントごとの通知音切り替え |
|
||||||
|
| `concurrency` | number (1-10) | `1` | `takt run` の並列タスク数 |
|
||||||
|
| `task_poll_interval_ms` | number (100-5000) | `500` | 新規タスクのポーリング間隔 |
|
||||||
|
| `interactive_preview_movements` | number (0-10) | `3` | インタラクティブモードでの movement プレビュー数 |
|
||||||
|
| `worktree_dir` | string | - | 共有クローンのディレクトリ(デフォルトは `../{clone-name}`) |
|
||||||
|
| `auto_pr` | boolean | - | worktree 実行後に PR を自動作成 |
|
||||||
|
| `verbose` | boolean | - | 詳細出力モード |
|
||||||
|
| `minimal_output` | boolean | `false` | AI 出力を抑制(CI 向け) |
|
||||||
|
| `runtime` | object | - | ランタイム環境デフォルト(例: `prepare: [gradle, node]`) |
|
||||||
|
| `persona_providers` | object | - | persona ごとの provider 上書き(例: `coder: codex`) |
|
||||||
|
| `provider_options` | object | - | グローバルな provider 固有オプション |
|
||||||
|
| `provider_profiles` | object | - | provider 固有のパーミッションプロファイル |
|
||||||
|
| `anthropic_api_key` | string | - | Claude 用 Anthropic API キー |
|
||||||
|
| `openai_api_key` | string | - | Codex 用 OpenAI API キー |
|
||||||
|
| `opencode_api_key` | string | - | OpenCode API キー |
|
||||||
|
| `codex_cli_path` | string | - | Codex CLI バイナリパス上書き(絶対パス) |
|
||||||
|
| `enable_builtin_pieces` | boolean | `true` | ビルトイン piece の有効化 |
|
||||||
|
| `disabled_builtins` | string[] | `[]` | 無効化する特定のビルトイン piece |
|
||||||
|
| `pipeline` | object | - | pipeline テンプレート設定 |
|
||||||
|
| `bookmarks_file` | string | - | ブックマークファイルのパス |
|
||||||
|
| `piece_categories_file` | string | - | piece カテゴリファイルのパス |
|
||||||
|
|
||||||
|
## プロジェクト設定
|
||||||
|
|
||||||
|
`.takt/config.yaml` でプロジェクト固有の設定を行います。このファイルはプロジェクトディレクトリで初めて TAKT を使用した際に作成されます。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# .takt/config.yaml
|
||||||
|
piece: default # このプロジェクトの現在の piece
|
||||||
|
provider: claude # このプロジェクトの provider 上書き
|
||||||
|
auto_pr: true # worktree 実行後に PR を自動作成
|
||||||
|
verbose: false # 詳細出力モード
|
||||||
|
|
||||||
|
# provider 固有オプション(グローバルを上書き、piece/movement で上書き可能)
|
||||||
|
# provider_options:
|
||||||
|
# codex:
|
||||||
|
# network_access: true
|
||||||
|
|
||||||
|
# provider 固有パーミッションプロファイル(プロジェクトレベルの上書き)
|
||||||
|
# provider_profiles:
|
||||||
|
# codex:
|
||||||
|
# default_permission_mode: full
|
||||||
|
# movement_permission_overrides:
|
||||||
|
# ai_review: readonly
|
||||||
|
```
|
||||||
|
|
||||||
|
### プロジェクト設定フィールドリファレンス
|
||||||
|
|
||||||
|
| フィールド | 型 | デフォルト | 説明 |
|
||||||
|
|-----------|------|---------|------|
|
||||||
|
| `piece` | string | `"default"` | このプロジェクトの現在の piece 名 |
|
||||||
|
| `provider` | `"claude"` \| `"codex"` \| `"opencode"` \| `"mock"` | - | provider 上書き |
|
||||||
|
| `auto_pr` | boolean | - | worktree 実行後に PR を自動作成 |
|
||||||
|
| `verbose` | boolean | - | 詳細出力モード |
|
||||||
|
| `provider_options` | object | - | provider 固有オプション |
|
||||||
|
| `provider_profiles` | object | - | provider 固有のパーミッションプロファイル |
|
||||||
|
|
||||||
|
プロジェクト設定の値は、両方が設定されている場合にグローバル設定を上書きします。
|
||||||
|
|
||||||
|
## API キー設定
|
||||||
|
|
||||||
|
TAKT は3つの provider をサポートしており、それぞれに API キーが必要です。API キーは環境変数または `~/.takt/config.yaml` で設定できます。
|
||||||
|
|
||||||
|
### 環境変数(推奨)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Claude(Anthropic)用
|
||||||
|
export TAKT_ANTHROPIC_API_KEY=sk-ant-...
|
||||||
|
|
||||||
|
# Codex(OpenAI)用
|
||||||
|
export TAKT_OPENAI_API_KEY=sk-...
|
||||||
|
|
||||||
|
# OpenCode 用
|
||||||
|
export TAKT_OPENCODE_API_KEY=...
|
||||||
|
```
|
||||||
|
|
||||||
|
### 設定ファイル
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
anthropic_api_key: sk-ant-... # Claude 用
|
||||||
|
openai_api_key: sk-... # Codex 用
|
||||||
|
opencode_api_key: ... # OpenCode 用
|
||||||
|
```
|
||||||
|
|
||||||
|
### 優先順位
|
||||||
|
|
||||||
|
環境変数は `config.yaml` の設定よりも優先されます。
|
||||||
|
|
||||||
|
| Provider | 環境変数 | 設定キー |
|
||||||
|
|----------|---------|---------|
|
||||||
|
| Claude (Anthropic) | `TAKT_ANTHROPIC_API_KEY` | `anthropic_api_key` |
|
||||||
|
| Codex (OpenAI) | `TAKT_OPENAI_API_KEY` | `openai_api_key` |
|
||||||
|
| OpenCode | `TAKT_OPENCODE_API_KEY` | `opencode_api_key` |
|
||||||
|
|
||||||
|
### セキュリティ
|
||||||
|
|
||||||
|
- `config.yaml` に API キーを記載する場合、このファイルを Git にコミットしないよう注意してください。
|
||||||
|
- 環境変数の使用を検討してください。
|
||||||
|
- 必要に応じて `~/.takt/config.yaml` をグローバル `.gitignore` に追加してください。
|
||||||
|
- API キーを設定すれば、対応する CLI ツール(Claude Code、Codex、OpenCode)のインストールは不要です。TAKT が対応する API を直接呼び出します。
|
||||||
|
|
||||||
|
### Codex CLI パス上書き
|
||||||
|
|
||||||
|
Codex CLI バイナリパスは環境変数または設定ファイルで上書きできます。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export TAKT_CODEX_CLI_PATH=/usr/local/bin/codex
|
||||||
|
```
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
codex_cli_path: /usr/local/bin/codex
|
||||||
|
```
|
||||||
|
|
||||||
|
パスは実行可能ファイルの絶対パスである必要があります。`TAKT_CODEX_CLI_PATH` は設定ファイルの値よりも優先されます。
|
||||||
|
|
||||||
|
## モデル解決
|
||||||
|
|
||||||
|
各 movement で使用されるモデルは、次の優先順位(高い順)で解決されます。
|
||||||
|
|
||||||
|
1. **Piece movement の `model`** - piece YAML の movement 定義で指定
|
||||||
|
2. **カスタムエージェントの `model`** - `.takt/agents.yaml` のエージェントレベルのモデル
|
||||||
|
3. **グローバル設定の `model`** - `~/.takt/config.yaml` のデフォルトモデル
|
||||||
|
4. **Provider デフォルト** - provider のビルトインデフォルトにフォールバック(Claude: `sonnet`、Codex: `codex`、OpenCode: provider デフォルト)
|
||||||
|
|
||||||
|
### Provider 固有のモデルに関する注意
|
||||||
|
|
||||||
|
**Claude Code** はエイリアス(`opus`、`sonnet`、`haiku`、`opusplan`、`default`)と完全なモデル名(例: `claude-sonnet-4-5-20250929`)をサポートしています。`model` フィールドは provider CLI にそのまま渡されます。利用可能なモデルについては [Claude Code ドキュメント](https://docs.anthropic.com/en/docs/claude-code) を参照してください。
|
||||||
|
|
||||||
|
**Codex** は Codex SDK を通じてモデル文字列をそのまま使用します。未指定の場合、デフォルトは `codex` です。利用可能なモデルについては Codex のドキュメントを参照してください。
|
||||||
|
|
||||||
|
**OpenCode** は `provider/model` 形式のモデル(例: `opencode/big-pickle`)が必要です。OpenCode provider でモデルを省略すると設定エラーになります。
|
||||||
|
|
||||||
|
### 設定例
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
provider: claude
|
||||||
|
model: opus # すべての movement のデフォルトモデル(上書きされない限り)
|
||||||
|
```
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# piece.yaml - movement レベルの上書きが最高優先
|
||||||
|
movements:
|
||||||
|
- name: plan
|
||||||
|
model: opus # この movement はグローバル設定に関係なく opus を使用
|
||||||
|
...
|
||||||
|
- name: implement
|
||||||
|
# model 未指定 - グローバル設定(opus)にフォールバック
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Provider プロファイル
|
||||||
|
|
||||||
|
Provider プロファイルを使用すると、各 provider にデフォルトのパーミッションモードと movement ごとのパーミッション上書きを設定できます。異なる provider を異なるセキュリティポリシーで運用する場合に便利です。
|
||||||
|
|
||||||
|
### パーミッションモード
|
||||||
|
|
||||||
|
TAKT は provider 非依存の3つのパーミッションモードを使用します。
|
||||||
|
|
||||||
|
| モード | 説明 | Claude | Codex | OpenCode |
|
||||||
|
|--------|------|--------|-------|----------|
|
||||||
|
| `readonly` | 読み取り専用、ファイル変更不可 | `default` | `read-only` | `read-only` |
|
||||||
|
| `edit` | 確認付きでファイル編集を許可 | `acceptEdits` | `workspace-write` | `workspace-write` |
|
||||||
|
| `full` | すべてのパーミッションチェックをバイパス | `bypassPermissions` | `danger-full-access` | `danger-full-access` |
|
||||||
|
|
||||||
|
### 設定方法
|
||||||
|
|
||||||
|
Provider プロファイルはグローバルレベルとプロジェクトレベルの両方で設定できます。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml(グローバル)または .takt/config.yaml(プロジェクト)
|
||||||
|
provider_profiles:
|
||||||
|
codex:
|
||||||
|
default_permission_mode: full
|
||||||
|
movement_permission_overrides:
|
||||||
|
ai_review: readonly
|
||||||
|
claude:
|
||||||
|
default_permission_mode: edit
|
||||||
|
movement_permission_overrides:
|
||||||
|
implement: full
|
||||||
|
```
|
||||||
|
|
||||||
|
### パーミッション解決の優先順位
|
||||||
|
|
||||||
|
パーミッションモードは次の順序で解決されます(最初にマッチしたものが適用)。
|
||||||
|
|
||||||
|
1. **プロジェクト** `provider_profiles.<provider>.movement_permission_overrides.<movement>`
|
||||||
|
2. **グローバル** `provider_profiles.<provider>.movement_permission_overrides.<movement>`
|
||||||
|
3. **プロジェクト** `provider_profiles.<provider>.default_permission_mode`
|
||||||
|
4. **グローバル** `provider_profiles.<provider>.default_permission_mode`
|
||||||
|
5. **Movement** `required_permission_mode`(最低限の下限として機能)
|
||||||
|
|
||||||
|
movement の `required_permission_mode` は最低限の下限を設定します。provider プロファイルから解決されたモードが要求モードよりも低い場合、要求モードが使用されます。たとえば、movement が `edit` を要求しているがプロファイルが `readonly` に解決される場合、実効モードは `edit` になります。
|
||||||
|
|
||||||
|
### Persona Provider
|
||||||
|
|
||||||
|
piece を複製せずに、特定の persona を別の provider にルーティングできます。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
persona_providers:
|
||||||
|
coder: codex # coder persona を Codex で実行
|
||||||
|
ai-antipattern-reviewer: claude # レビュアーは Claude のまま
|
||||||
|
```
|
||||||
|
|
||||||
|
これにより、単一の piece 内で provider を混在させることができます。persona 名は movement 定義の `persona` キーに対してマッチされます。
|
||||||
|
|
||||||
|
## Piece カテゴリ
|
||||||
|
|
||||||
|
`takt switch` や piece 選択プロンプトでの UI 表示を改善するために、piece をカテゴリに整理できます。
|
||||||
|
|
||||||
|
### 設定方法
|
||||||
|
|
||||||
|
カテゴリは次の場所で設定できます。
|
||||||
|
- `builtins/{lang}/piece-categories.yaml` - デフォルトのビルトインカテゴリ
|
||||||
|
- `~/.takt/config.yaml` または `piece_categories_file` で指定した別のカテゴリファイル
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml または専用カテゴリファイル
|
||||||
|
piece_categories:
|
||||||
|
Development:
|
||||||
|
pieces: [default, simple]
|
||||||
|
children:
|
||||||
|
Backend:
|
||||||
|
pieces: [expert-cqrs]
|
||||||
|
Frontend:
|
||||||
|
pieces: [expert]
|
||||||
|
Research:
|
||||||
|
pieces: [research, magi]
|
||||||
|
|
||||||
|
show_others_category: true # 未分類の piece を表示(デフォルト: true)
|
||||||
|
others_category_name: "Other Pieces" # 未分類カテゴリの名前
|
||||||
|
```
|
||||||
|
|
||||||
|
### カテゴリ機能
|
||||||
|
|
||||||
|
- **ネストされたカテゴリ** - 階層的な整理のための無制限の深さ
|
||||||
|
- **カテゴリごとの piece リスト** - 特定のカテゴリに piece を割り当て
|
||||||
|
- **その他カテゴリ** - 未分類の piece を自動収集(`show_others_category: false` で無効化可能)
|
||||||
|
- **ビルトイン piece フィルタリング** - `enable_builtin_pieces: false` ですべてのビルトインを無効化、または `disabled_builtins: [name1, name2]` で選択的に無効化
|
||||||
|
|
||||||
|
### カテゴリのリセット
|
||||||
|
|
||||||
|
piece カテゴリをビルトインのデフォルトにリセットできます。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt reset categories
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pipeline テンプレート
|
||||||
|
|
||||||
|
Pipeline モード(`--pipeline`)では、ブランチ名、コミットメッセージ、PR 本文をカスタマイズするテンプレートをサポートしています。
|
||||||
|
|
||||||
|
### 設定方法
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
pipeline:
|
||||||
|
default_branch_prefix: "takt/"
|
||||||
|
commit_message_template: "feat: {title} (#{issue})"
|
||||||
|
pr_body_template: |
|
||||||
|
## Summary
|
||||||
|
{issue_body}
|
||||||
|
Closes #{issue}
|
||||||
|
```
|
||||||
|
|
||||||
|
### テンプレート変数
|
||||||
|
|
||||||
|
| 変数 | 使用可能な場所 | 説明 |
|
||||||
|
|------|--------------|------|
|
||||||
|
| `{title}` | コミットメッセージ | Issue タイトル |
|
||||||
|
| `{issue}` | コミットメッセージ、PR 本文 | Issue 番号 |
|
||||||
|
| `{issue_body}` | PR 本文 | Issue 本文 |
|
||||||
|
| `{report}` | PR 本文 | Piece 実行レポート |
|
||||||
|
|
||||||
|
### Pipeline CLI オプション
|
||||||
|
|
||||||
|
| オプション | 説明 |
|
||||||
|
|-----------|------|
|
||||||
|
| `--pipeline` | pipeline(非インタラクティブ)モードを有効化 |
|
||||||
|
| `--auto-pr` | 実行後に PR を作成 |
|
||||||
|
| `--skip-git` | ブランチ作成、コミット、プッシュをスキップ(piece のみ実行) |
|
||||||
|
| `--repo <owner/repo>` | PR 作成用のリポジトリを指定 |
|
||||||
|
| `-q, --quiet` | 最小出力モード(AI 出力を抑制) |
|
||||||
|
|
||||||
|
## デバッグ
|
||||||
|
|
||||||
|
### デバッグログ
|
||||||
|
|
||||||
|
`~/.takt/config.yaml` で `debug_enabled: true` を設定するか、`.takt/debug.yaml` ファイルを作成してデバッグログを有効化できます。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# .takt/debug.yaml
|
||||||
|
enabled: true
|
||||||
|
```
|
||||||
|
|
||||||
|
デバッグログは `.takt/logs/debug.log` に NDJSON 形式で出力されます。
|
||||||
|
|
||||||
|
### 詳細モード
|
||||||
|
|
||||||
|
空の `.takt/verbose` ファイルを作成すると、詳細なコンソール出力が有効になります。これにより、デバッグログも自動的に有効化されます。
|
||||||
|
|
||||||
|
または、設定ファイルで `verbose: true` を設定することもできます。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml または .takt/config.yaml
|
||||||
|
verbose: true
|
||||||
|
```
|
||||||
400
docs/configuration.md
Normal file
400
docs/configuration.md
Normal file
@ -0,0 +1,400 @@
|
|||||||
|
# Configuration
|
||||||
|
|
||||||
|
[日本語](./configuration.ja.md)
|
||||||
|
|
||||||
|
This document is a reference for all TAKT configuration options. For a quick start, see the main [README](../README.md).
|
||||||
|
|
||||||
|
## Global Configuration
|
||||||
|
|
||||||
|
Configure TAKT defaults in `~/.takt/config.yaml`. This file is created automatically on first run. All fields are optional.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
language: en # UI language: 'en' or 'ja'
|
||||||
|
default_piece: default # Default piece for new projects
|
||||||
|
log_level: info # Log level: debug, info, warn, error
|
||||||
|
provider: claude # Default provider: claude, codex, or opencode
|
||||||
|
model: sonnet # Default model (optional, passed to provider as-is)
|
||||||
|
branch_name_strategy: romaji # Branch name generation: 'romaji' (fast) or 'ai' (slow)
|
||||||
|
prevent_sleep: false # Prevent macOS idle sleep during execution (caffeinate)
|
||||||
|
notification_sound: true # Enable/disable notification sounds
|
||||||
|
notification_sound_events: # Optional per-event toggles
|
||||||
|
iteration_limit: false
|
||||||
|
piece_complete: true
|
||||||
|
piece_abort: true
|
||||||
|
run_complete: true # Enabled by default; set false to disable
|
||||||
|
run_abort: true # Enabled by default; set false to disable
|
||||||
|
concurrency: 1 # Parallel task count for takt run (1-10, default: 1 = sequential)
|
||||||
|
task_poll_interval_ms: 500 # Polling interval for new tasks during takt run (100-5000, default: 500)
|
||||||
|
interactive_preview_movements: 3 # Movement previews in interactive mode (0-10, default: 3)
|
||||||
|
|
||||||
|
# Runtime environment defaults (applies to all pieces unless piece_config.runtime overrides)
|
||||||
|
# runtime:
|
||||||
|
# prepare:
|
||||||
|
# - gradle # Prepare Gradle cache/config in .runtime/
|
||||||
|
# - node # Prepare npm cache in .runtime/
|
||||||
|
|
||||||
|
# Per-persona provider overrides (optional)
|
||||||
|
# Route specific personas to different providers without duplicating pieces
|
||||||
|
# persona_providers:
|
||||||
|
# coder: codex # Run coder on Codex
|
||||||
|
# ai-antipattern-reviewer: claude # Keep reviewers on Claude
|
||||||
|
|
||||||
|
# Provider-specific permission profiles (optional)
|
||||||
|
# Priority: project override > global override > project default > global default > required_permission_mode (floor)
|
||||||
|
# provider_profiles:
|
||||||
|
# codex:
|
||||||
|
# default_permission_mode: full
|
||||||
|
# movement_permission_overrides:
|
||||||
|
# ai_review: readonly
|
||||||
|
# claude:
|
||||||
|
# default_permission_mode: edit
|
||||||
|
|
||||||
|
# API Key configuration (optional)
|
||||||
|
# Can be overridden by environment variables TAKT_ANTHROPIC_API_KEY / TAKT_OPENAI_API_KEY / TAKT_OPENCODE_API_KEY
|
||||||
|
# anthropic_api_key: sk-ant-... # For Claude (Anthropic)
|
||||||
|
# openai_api_key: sk-... # For Codex (OpenAI)
|
||||||
|
# opencode_api_key: ... # For OpenCode
|
||||||
|
|
||||||
|
# Codex CLI path override (optional)
|
||||||
|
# Override the Codex CLI binary used by the Codex SDK (must be an absolute path to an executable file)
|
||||||
|
# Can be overridden by TAKT_CODEX_CLI_PATH environment variable
|
||||||
|
# codex_cli_path: /usr/local/bin/codex
|
||||||
|
|
||||||
|
# Builtin piece filtering (optional)
|
||||||
|
# builtin_pieces_enabled: true # Set false to disable all builtins
|
||||||
|
# disabled_builtins: [magi, passthrough] # Disable specific builtin pieces
|
||||||
|
|
||||||
|
# Pipeline execution configuration (optional)
|
||||||
|
# Customize branch names, commit messages, and PR body.
|
||||||
|
# pipeline:
|
||||||
|
# default_branch_prefix: "takt/"
|
||||||
|
# commit_message_template: "feat: {title} (#{issue})"
|
||||||
|
# pr_body_template: |
|
||||||
|
# ## Summary
|
||||||
|
# {issue_body}
|
||||||
|
# Closes #{issue}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Global Config Field Reference
|
||||||
|
|
||||||
|
| Field | Type | Default | Description |
|
||||||
|
|-------|------|---------|-------------|
|
||||||
|
| `language` | `"en"` \| `"ja"` | `"en"` | UI language |
|
||||||
|
| `default_piece` | string | `"default"` | Default piece for new projects |
|
||||||
|
| `log_level` | `"debug"` \| `"info"` \| `"warn"` \| `"error"` | `"info"` | Log level |
|
||||||
|
| `provider` | `"claude"` \| `"codex"` \| `"opencode"` | `"claude"` | Default AI provider |
|
||||||
|
| `model` | string | - | Default model name (passed to provider as-is) |
|
||||||
|
| `branch_name_strategy` | `"romaji"` \| `"ai"` | `"romaji"` | Branch name generation strategy |
|
||||||
|
| `prevent_sleep` | boolean | `false` | Prevent macOS idle sleep (caffeinate) |
|
||||||
|
| `notification_sound` | boolean | `true` | Enable notification sounds |
|
||||||
|
| `notification_sound_events` | object | - | Per-event notification sound toggles |
|
||||||
|
| `concurrency` | number (1-10) | `1` | Parallel task count for `takt run` |
|
||||||
|
| `task_poll_interval_ms` | number (100-5000) | `500` | Polling interval for new tasks |
|
||||||
|
| `interactive_preview_movements` | number (0-10) | `3` | Movement previews in interactive mode |
|
||||||
|
| `worktree_dir` | string | - | Directory for shared clones (defaults to `../{clone-name}`) |
|
||||||
|
| `auto_pr` | boolean | - | Auto-create PR after worktree execution |
|
||||||
|
| `verbose` | boolean | - | Verbose output mode |
|
||||||
|
| `minimal_output` | boolean | `false` | Suppress AI output (for CI) |
|
||||||
|
| `runtime` | object | - | Runtime environment defaults (e.g., `prepare: [gradle, node]`) |
|
||||||
|
| `persona_providers` | object | - | Per-persona provider overrides (e.g., `coder: codex`) |
|
||||||
|
| `provider_options` | object | - | Global provider-specific options |
|
||||||
|
| `provider_profiles` | object | - | Provider-specific permission profiles |
|
||||||
|
| `anthropic_api_key` | string | - | Anthropic API key for Claude |
|
||||||
|
| `openai_api_key` | string | - | OpenAI API key for Codex |
|
||||||
|
| `opencode_api_key` | string | - | OpenCode API key |
|
||||||
|
| `codex_cli_path` | string | - | Codex CLI binary path override (absolute) |
|
||||||
|
| `enable_builtin_pieces` | boolean | `true` | Enable builtin pieces |
|
||||||
|
| `disabled_builtins` | string[] | `[]` | Specific builtin pieces to disable |
|
||||||
|
| `pipeline` | object | - | Pipeline template settings |
|
||||||
|
| `bookmarks_file` | string | - | Path to bookmarks file |
|
||||||
|
| `piece_categories_file` | string | - | Path to piece categories file |
|
||||||
|
|
||||||
|
## Project Configuration
|
||||||
|
|
||||||
|
Configure project-specific settings in `.takt/config.yaml`. This file is created when you first use TAKT in a project directory.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# .takt/config.yaml
|
||||||
|
piece: default # Current piece for this project
|
||||||
|
provider: claude # Override provider for this project
|
||||||
|
auto_pr: true # Auto-create PR after worktree execution
|
||||||
|
verbose: false # Verbose output mode
|
||||||
|
|
||||||
|
# Provider-specific options (overrides global, overridden by piece/movement)
|
||||||
|
# provider_options:
|
||||||
|
# codex:
|
||||||
|
# network_access: true
|
||||||
|
|
||||||
|
# Provider-specific permission profiles (project-level override)
|
||||||
|
# provider_profiles:
|
||||||
|
# codex:
|
||||||
|
# default_permission_mode: full
|
||||||
|
# movement_permission_overrides:
|
||||||
|
# ai_review: readonly
|
||||||
|
```
|
||||||
|
|
||||||
|
### Project Config Field Reference
|
||||||
|
|
||||||
|
| Field | Type | Default | Description |
|
||||||
|
|-------|------|---------|-------------|
|
||||||
|
| `piece` | string | `"default"` | Current piece name for this project |
|
||||||
|
| `provider` | `"claude"` \| `"codex"` \| `"opencode"` \| `"mock"` | - | Override provider |
|
||||||
|
| `auto_pr` | boolean | - | Auto-create PR after worktree execution |
|
||||||
|
| `verbose` | boolean | - | Verbose output mode |
|
||||||
|
| `provider_options` | object | - | Provider-specific options |
|
||||||
|
| `provider_profiles` | object | - | Provider-specific permission profiles |
|
||||||
|
|
||||||
|
Project config values override global config when both are set.
|
||||||
|
|
||||||
|
## API Key Configuration
|
||||||
|
|
||||||
|
TAKT supports three providers, each with its own API key. API keys can be configured via environment variables or `~/.takt/config.yaml`.
|
||||||
|
|
||||||
|
### Environment Variables (Recommended)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For Claude (Anthropic)
|
||||||
|
export TAKT_ANTHROPIC_API_KEY=sk-ant-...
|
||||||
|
|
||||||
|
# For Codex (OpenAI)
|
||||||
|
export TAKT_OPENAI_API_KEY=sk-...
|
||||||
|
|
||||||
|
# For OpenCode
|
||||||
|
export TAKT_OPENCODE_API_KEY=...
|
||||||
|
```
|
||||||
|
|
||||||
|
### Config File
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
anthropic_api_key: sk-ant-... # For Claude
|
||||||
|
openai_api_key: sk-... # For Codex
|
||||||
|
opencode_api_key: ... # For OpenCode
|
||||||
|
```
|
||||||
|
|
||||||
|
### Priority
|
||||||
|
|
||||||
|
Environment variables take precedence over `config.yaml` settings.
|
||||||
|
|
||||||
|
| Provider | Environment Variable | Config Key |
|
||||||
|
|----------|---------------------|------------|
|
||||||
|
| Claude (Anthropic) | `TAKT_ANTHROPIC_API_KEY` | `anthropic_api_key` |
|
||||||
|
| Codex (OpenAI) | `TAKT_OPENAI_API_KEY` | `openai_api_key` |
|
||||||
|
| OpenCode | `TAKT_OPENCODE_API_KEY` | `opencode_api_key` |
|
||||||
|
|
||||||
|
### Security
|
||||||
|
|
||||||
|
- If you write API keys in `config.yaml`, be careful not to commit this file to Git.
|
||||||
|
- Consider using environment variables instead.
|
||||||
|
- Add `~/.takt/config.yaml` to your global `.gitignore` if needed.
|
||||||
|
- If you set an API key, installing the corresponding CLI tool (Claude Code, Codex, OpenCode) is not necessary. TAKT directly calls the respective API.
|
||||||
|
|
||||||
|
### Codex CLI Path Override
|
||||||
|
|
||||||
|
You can override the Codex CLI binary path using either an environment variable or config:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export TAKT_CODEX_CLI_PATH=/usr/local/bin/codex
|
||||||
|
```
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
codex_cli_path: /usr/local/bin/codex
|
||||||
|
```
|
||||||
|
|
||||||
|
The path must be an absolute path to an executable file. `TAKT_CODEX_CLI_PATH` takes precedence over the config file value.
|
||||||
|
|
||||||
|
## Model Resolution
|
||||||
|
|
||||||
|
The model used for each movement is resolved with the following priority order (highest first):
|
||||||
|
|
||||||
|
1. **Piece movement `model`** - Specified in the movement definition in piece YAML
|
||||||
|
2. **Custom agent `model`** - Agent-level model in `.takt/agents.yaml`
|
||||||
|
3. **Global config `model`** - Default model in `~/.takt/config.yaml`
|
||||||
|
4. **Provider default** - Falls back to the provider's built-in default (Claude: `sonnet`, Codex: `codex`, OpenCode: provider default)
|
||||||
|
|
||||||
|
### Provider-specific Model Notes
|
||||||
|
|
||||||
|
**Claude Code** supports aliases (`opus`, `sonnet`, `haiku`, `opusplan`, `default`) and full model names (e.g., `claude-sonnet-4-5-20250929`). The `model` field is passed directly to the provider CLI. Refer to the [Claude Code documentation](https://docs.anthropic.com/en/docs/claude-code) for available models.
|
||||||
|
|
||||||
|
**Codex** uses the model string as-is via the Codex SDK. If unspecified, defaults to `codex`. Refer to Codex documentation for available models.
|
||||||
|
|
||||||
|
**OpenCode** requires a model in `provider/model` format (e.g., `opencode/big-pickle`). Omitting the model for the OpenCode provider will result in a configuration error.
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
provider: claude
|
||||||
|
model: opus # Default model for all movements (unless overridden)
|
||||||
|
```
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# piece.yaml - movement-level override takes highest priority
|
||||||
|
movements:
|
||||||
|
- name: plan
|
||||||
|
model: opus # This movement uses opus regardless of global config
|
||||||
|
...
|
||||||
|
- name: implement
|
||||||
|
# No model specified - falls back to global config (opus)
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Provider Profiles
|
||||||
|
|
||||||
|
Provider profiles allow you to set default permission modes and per-movement permission overrides for each provider. This is useful when running different providers with different security postures.
|
||||||
|
|
||||||
|
### Permission Modes
|
||||||
|
|
||||||
|
TAKT uses three provider-independent permission modes:
|
||||||
|
|
||||||
|
| Mode | Description | Claude | Codex | OpenCode |
|
||||||
|
|------|-------------|--------|-------|----------|
|
||||||
|
| `readonly` | Read-only access, no file modifications | `default` | `read-only` | `read-only` |
|
||||||
|
| `edit` | Allow file edits with confirmation | `acceptEdits` | `workspace-write` | `workspace-write` |
|
||||||
|
| `full` | Bypass all permission checks | `bypassPermissions` | `danger-full-access` | `danger-full-access` |
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
Provider profiles can be set at both global and project levels:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml (global) or .takt/config.yaml (project)
|
||||||
|
provider_profiles:
|
||||||
|
codex:
|
||||||
|
default_permission_mode: full
|
||||||
|
movement_permission_overrides:
|
||||||
|
ai_review: readonly
|
||||||
|
claude:
|
||||||
|
default_permission_mode: edit
|
||||||
|
movement_permission_overrides:
|
||||||
|
implement: full
|
||||||
|
```
|
||||||
|
|
||||||
|
### Permission Resolution Priority
|
||||||
|
|
||||||
|
Permission mode is resolved in the following order (first match wins):
|
||||||
|
|
||||||
|
1. **Project** `provider_profiles.<provider>.movement_permission_overrides.<movement>`
|
||||||
|
2. **Global** `provider_profiles.<provider>.movement_permission_overrides.<movement>`
|
||||||
|
3. **Project** `provider_profiles.<provider>.default_permission_mode`
|
||||||
|
4. **Global** `provider_profiles.<provider>.default_permission_mode`
|
||||||
|
5. **Movement** `required_permission_mode` (acts as a minimum floor)
|
||||||
|
|
||||||
|
The `required_permission_mode` on a movement sets the minimum floor. If the resolved mode from provider profiles is lower than the required mode, the required mode is used instead. For example, if a movement requires `edit` but the profile resolves to `readonly`, the effective mode will be `edit`.
|
||||||
|
|
||||||
|
### Persona Providers
|
||||||
|
|
||||||
|
Route specific personas to different providers without duplicating pieces:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
persona_providers:
|
||||||
|
coder: codex # Run coder persona on Codex
|
||||||
|
ai-antipattern-reviewer: claude # Keep reviewers on Claude
|
||||||
|
```
|
||||||
|
|
||||||
|
This allows mixing providers within a single piece. The persona name is matched against the `persona` key in the movement definition.
|
||||||
|
|
||||||
|
## Piece Categories
|
||||||
|
|
||||||
|
Organize pieces into categories for better UI presentation in `takt switch` and piece selection prompts.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
Categories can be configured in:
|
||||||
|
- `builtins/{lang}/piece-categories.yaml` - Default builtin categories
|
||||||
|
- `~/.takt/config.yaml` or a separate categories file specified by `piece_categories_file`
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml or dedicated categories file
|
||||||
|
piece_categories:
|
||||||
|
Development:
|
||||||
|
pieces: [default, simple]
|
||||||
|
children:
|
||||||
|
Backend:
|
||||||
|
pieces: [expert-cqrs]
|
||||||
|
Frontend:
|
||||||
|
pieces: [expert]
|
||||||
|
Research:
|
||||||
|
pieces: [research, magi]
|
||||||
|
|
||||||
|
show_others_category: true # Show uncategorized pieces (default: true)
|
||||||
|
others_category_name: "Other Pieces" # Name for uncategorized category
|
||||||
|
```
|
||||||
|
|
||||||
|
### Category Features
|
||||||
|
|
||||||
|
- **Nested categories** - Unlimited depth for hierarchical organization
|
||||||
|
- **Per-category piece lists** - Assign pieces to specific categories
|
||||||
|
- **Others category** - Automatically collects uncategorized pieces (can be disabled via `show_others_category: false`)
|
||||||
|
- **Builtin piece filtering** - Disable all builtins via `enable_builtin_pieces: false`, or selectively via `disabled_builtins: [name1, name2]`
|
||||||
|
|
||||||
|
### Resetting Categories
|
||||||
|
|
||||||
|
Reset piece categories to builtin defaults:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt reset categories
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pipeline Templates
|
||||||
|
|
||||||
|
Pipeline mode (`--pipeline`) supports customizable templates for branch names, commit messages, and PR bodies.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml
|
||||||
|
pipeline:
|
||||||
|
default_branch_prefix: "takt/"
|
||||||
|
commit_message_template: "feat: {title} (#{issue})"
|
||||||
|
pr_body_template: |
|
||||||
|
## Summary
|
||||||
|
{issue_body}
|
||||||
|
Closes #{issue}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Template Variables
|
||||||
|
|
||||||
|
| Variable | Available In | Description |
|
||||||
|
|----------|-------------|-------------|
|
||||||
|
| `{title}` | Commit message | Issue title |
|
||||||
|
| `{issue}` | Commit message, PR body | Issue number |
|
||||||
|
| `{issue_body}` | PR body | Issue body |
|
||||||
|
| `{report}` | PR body | Piece execution report |
|
||||||
|
|
||||||
|
### Pipeline CLI Options
|
||||||
|
|
||||||
|
| Option | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| `--pipeline` | Enable pipeline (non-interactive) mode |
|
||||||
|
| `--auto-pr` | Create PR after execution |
|
||||||
|
| `--skip-git` | Skip branch creation, commit, and push (piece-only) |
|
||||||
|
| `--repo <owner/repo>` | Repository for PR creation |
|
||||||
|
| `-q, --quiet` | Minimal output mode (suppress AI output) |
|
||||||
|
|
||||||
|
## Debugging
|
||||||
|
|
||||||
|
### Debug Logging
|
||||||
|
|
||||||
|
Enable debug logging by setting `debug_enabled: true` in `~/.takt/config.yaml` or by creating a `.takt/debug.yaml` file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# .takt/debug.yaml
|
||||||
|
enabled: true
|
||||||
|
```
|
||||||
|
|
||||||
|
Debug logs are written to `.takt/logs/debug.log` in NDJSON format.
|
||||||
|
|
||||||
|
### Verbose Mode
|
||||||
|
|
||||||
|
Create an empty `.takt/verbose` file to enable verbose console output. This automatically enables debug logging.
|
||||||
|
|
||||||
|
Alternatively, set `verbose: true` in your config:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ~/.takt/config.yaml or .takt/config.yaml
|
||||||
|
verbose: true
|
||||||
|
```
|
||||||
42
docs/plan.md
42
docs/plan.md
@ -1,42 +0,0 @@
|
|||||||
- perform_phase1_message.md
|
|
||||||
- ここから status Rule を排除する(phase3に書けばいい)
|
|
||||||
- perform_phase2_message.md
|
|
||||||
- 「上記のReport Directory内のファイルのみ使用してください。** 他のレポートディレクトリは検索/参照しないでください。」は上記ってのがいらないのではないか
|
|
||||||
- 「**このフェーズではツールは使えません。レポート内容をテキストとして直接回答してください。**」が重複することがあるので削除せよ。
|
|
||||||
- JSON形式について触れる必要はない。
|
|
||||||
- perform_phase3_message.md
|
|
||||||
- status Rule を追加する聞く
|
|
||||||
- perform_agent_system_prompt.md
|
|
||||||
- これ、エージェントのデータを挿入してないの……?
|
|
||||||
- 全体的に
|
|
||||||
- 音楽にひもづける
|
|
||||||
- つまり、piecesをやめて pieces にする
|
|
||||||
- 現pieceファイルにあるstepsもmovementsにする(全ファイルの修正)
|
|
||||||
- stepという言葉はmovementになる。phaseもmovementが適しているだろう(これは interactive における phase のことをいっていない)
|
|
||||||
- _language パラメータは消せ
|
|
||||||
- ピースを指定すると実際に送られるプロンプトを組み立てて表示する機能かツールを作れるか
|
|
||||||
- メタ領域を用意して説明、どこで利用されるかの説明、使えるテンプレートとその説明をかいて、その他必要な情報あれば入れて。
|
|
||||||
- 英語と日本語が共通でもかならずファイルはわけて同じ文章を書いておく
|
|
||||||
- 無駄な空行とか消してほしい
|
|
||||||
```
|
|
||||||
{{#if hasPreviousResponse}}
|
|
||||||
|
|
||||||
## Previous Response
|
|
||||||
{{previousResponse}}
|
|
||||||
{{/if}}
|
|
||||||
{{#if hasUserInputs}}
|
|
||||||
|
|
||||||
## Additional User Inputs
|
|
||||||
{{userInputs}}
|
|
||||||
```
|
|
||||||
これは↓のがいいんじゃない?
|
|
||||||
```
|
|
||||||
{{#if hasPreviousResponse}}
|
|
||||||
## Previous Response
|
|
||||||
{{previousResponse}}
|
|
||||||
{{/if}}
|
|
||||||
|
|
||||||
{{#if hasUserInputs}}
|
|
||||||
## Additional User Inputs
|
|
||||||
{{userInputs}}
|
|
||||||
```
|
|
||||||
323
docs/task-management.ja.md
Normal file
323
docs/task-management.ja.md
Normal file
@ -0,0 +1,323 @@
|
|||||||
|
[English](./task-management.md)
|
||||||
|
|
||||||
|
# タスク管理
|
||||||
|
|
||||||
|
## 概要
|
||||||
|
|
||||||
|
TAKT は複数のタスクを蓄積してバッチ実行するためのタスク管理ワークフローを提供します。基本的な流れは次の通りです。
|
||||||
|
|
||||||
|
1. **`takt add`** -- AI との会話でタスク要件を精緻化し、`.takt/tasks.yaml` に保存
|
||||||
|
2. **タスクの蓄積** -- `order.md` ファイルを編集し、参考資料を添付
|
||||||
|
3. **`takt run`** -- すべての pending タスクを一括実行(逐次または並列)
|
||||||
|
4. **`takt list`** -- 結果を確認し、ブランチのマージ、失敗のリトライ、指示の追加
|
||||||
|
|
||||||
|
各タスクは隔離された共有クローン(オプション)で実行され、レポートを生成し、`takt list` でマージまたは破棄できるブランチを作成します。
|
||||||
|
|
||||||
|
## タスクの追加(`takt add`)
|
||||||
|
|
||||||
|
`takt add` を使用して `.takt/tasks.yaml` に新しいタスクエントリを作成します。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# インラインテキストでタスクを追加
|
||||||
|
takt add "Implement user authentication"
|
||||||
|
|
||||||
|
# GitHub Issue からタスクを追加
|
||||||
|
takt add #28
|
||||||
|
```
|
||||||
|
|
||||||
|
タスク追加時に次の項目を確認されます。
|
||||||
|
|
||||||
|
- **Piece** -- 実行に使用する piece(ワークフロー)
|
||||||
|
- **Worktree パス** -- 隔離クローンの作成場所(Enter で自動、またはパスを指定)
|
||||||
|
- **ブランチ名** -- カスタムブランチ名(Enter で `takt/{timestamp}-{slug}` が自動生成)
|
||||||
|
- **Auto-PR** -- 実行成功後に PR を自動作成するかどうか
|
||||||
|
|
||||||
|
### GitHub Issue 連携
|
||||||
|
|
||||||
|
Issue 参照(例: `#28`)を渡すと、TAKT は GitHub CLI(`gh`)を介して Issue のタイトル、本文、ラベル、コメントを取得し、タスク内容として使用します。Issue 番号は `tasks.yaml` に記録され、ブランチ名にも反映されます。
|
||||||
|
|
||||||
|
**要件:** [GitHub CLI](https://cli.github.com/)(`gh`)がインストールされ、認証済みである必要があります。
|
||||||
|
|
||||||
|
### インタラクティブモードからのタスク保存
|
||||||
|
|
||||||
|
インタラクティブモードからもタスクを保存できます。会話で要件を精緻化した後、`/save`(またはプロンプト時の save アクション)を使用して、即座に実行する代わりに `tasks.yaml` にタスクを永続化できます。
|
||||||
|
|
||||||
|
## タスクディレクトリ形式
|
||||||
|
|
||||||
|
TAKT はタスクのメタデータを `.takt/tasks.yaml` に、各タスクの詳細仕様を `.takt/tasks/{slug}/` に保存します。
|
||||||
|
|
||||||
|
### `tasks.yaml` スキーマ
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
tasks:
|
||||||
|
- name: add-auth-feature
|
||||||
|
status: pending
|
||||||
|
task_dir: .takt/tasks/20260201-015714-foptng
|
||||||
|
piece: default
|
||||||
|
created_at: "2026-02-01T01:57:14.000Z"
|
||||||
|
started_at: null
|
||||||
|
completed_at: null
|
||||||
|
```
|
||||||
|
|
||||||
|
フィールドの説明は次の通りです。
|
||||||
|
|
||||||
|
| フィールド | 説明 |
|
||||||
|
|-----------|------|
|
||||||
|
| `name` | AI が生成したタスクスラグ |
|
||||||
|
| `status` | `pending`、`running`、`completed`、または `failed` |
|
||||||
|
| `task_dir` | `order.md` を含むタスクディレクトリのパス |
|
||||||
|
| `piece` | 実行に使用する piece 名 |
|
||||||
|
| `worktree` | `true`(自動)、パス文字列、または省略(カレントディレクトリで実行) |
|
||||||
|
| `branch` | ブランチ名(省略時は自動生成) |
|
||||||
|
| `auto_pr` | 実行後に PR を自動作成するかどうか |
|
||||||
|
| `issue` | GitHub Issue 番号(該当する場合) |
|
||||||
|
| `created_at` | ISO 8601 タイムスタンプ |
|
||||||
|
| `started_at` | ISO 8601 タイムスタンプ(実行開始時に設定) |
|
||||||
|
| `completed_at` | ISO 8601 タイムスタンプ(実行完了時に設定) |
|
||||||
|
|
||||||
|
### タスクディレクトリのレイアウト
|
||||||
|
|
||||||
|
```text
|
||||||
|
.takt/
|
||||||
|
tasks/
|
||||||
|
20260201-015714-foptng/
|
||||||
|
order.md # タスク仕様(自動生成、編集可能)
|
||||||
|
schema.sql # 添付の参考資料(任意)
|
||||||
|
wireframe.png # 添付の参考資料(任意)
|
||||||
|
tasks.yaml # タスクメタデータレコード
|
||||||
|
runs/
|
||||||
|
20260201-015714-foptng/
|
||||||
|
reports/ # 実行レポート(自動生成)
|
||||||
|
logs/ # NDJSON セッションログ
|
||||||
|
context/ # スナップショット(previous_responses など)
|
||||||
|
meta.json # 実行メタデータ
|
||||||
|
```
|
||||||
|
|
||||||
|
`takt add` は `.takt/tasks/{slug}/order.md` を自動作成し、`task_dir` への参照を `tasks.yaml` に保存します。実行前に `order.md` を自由に編集したり、タスクディレクトリに補足ファイル(SQL スキーマ、ワイヤーフレーム、API 仕様など)を追加したりできます。
|
||||||
|
|
||||||
|
## タスクの実行(`takt run`)
|
||||||
|
|
||||||
|
`.takt/tasks.yaml` のすべての pending タスクを実行します。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt run
|
||||||
|
```
|
||||||
|
|
||||||
|
`run` コマンドは pending タスクを取得して、設定された piece を通じて実行します。各タスクは次の処理を経ます。
|
||||||
|
|
||||||
|
1. クローン作成(`worktree` が設定されている場合)
|
||||||
|
2. クローン/プロジェクトディレクトリでの piece 実行
|
||||||
|
3. 自動コミットとプッシュ(worktree 実行の場合)
|
||||||
|
4. 実行後フロー(`auto_pr` 設定時は PR 作成)
|
||||||
|
5. `tasks.yaml` のステータス更新(`completed` または `failed`)
|
||||||
|
|
||||||
|
### 並列実行(Concurrency)
|
||||||
|
|
||||||
|
デフォルトではタスクは逐次実行されます(`concurrency: 1`)。`~/.takt/config.yaml` で並列実行を設定できます。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
concurrency: 3 # 最大3タスクを並列実行(1-10)
|
||||||
|
task_poll_interval_ms: 500 # 新規タスクのポーリング間隔(100-5000ms)
|
||||||
|
```
|
||||||
|
|
||||||
|
concurrency が 1 より大きい場合、TAKT はワーカープールを使用して次のように動作します。
|
||||||
|
|
||||||
|
- 最大 N タスクを同時実行
|
||||||
|
- 設定された間隔で新規タスクをポーリング
|
||||||
|
- ワーカーが空き次第、新しいタスクを取得
|
||||||
|
- タスクごとに色分けされたプレフィックス付き出力で読みやすさを確保
|
||||||
|
- Ctrl+C でのグレースフルシャットダウン(実行中タスクの完了を待機)
|
||||||
|
|
||||||
|
### 中断されたタスクの復旧
|
||||||
|
|
||||||
|
`takt run` が中断された場合(プロセスクラッシュ、Ctrl+C など)、`running` ステータスのまま残ったタスクは次回の `takt run` または `takt watch` 起動時に自動的に `pending` に復旧されます。
|
||||||
|
|
||||||
|
## タスクの監視(`takt watch`)
|
||||||
|
|
||||||
|
`.takt/tasks.yaml` を監視し、タスクが追加されると自動実行する常駐プロセスを起動します。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt watch
|
||||||
|
```
|
||||||
|
|
||||||
|
watch コマンドの動作は次の通りです。
|
||||||
|
|
||||||
|
- Ctrl+C(SIGINT)まで実行を継続
|
||||||
|
- `tasks.yaml` の新しい `pending` タスクを監視
|
||||||
|
- タスクが現れるたびに実行
|
||||||
|
- 起動時に中断された `running` タスクを復旧
|
||||||
|
- 終了時に合計/成功/失敗タスク数のサマリを表示
|
||||||
|
|
||||||
|
これは「プロデューサー-コンシューマー」ワークフローに便利です。一方のターミナルで `takt add` でタスクを追加し、もう一方で `takt watch` がそれらを自動実行します。
|
||||||
|
|
||||||
|
## タスクブランチの管理(`takt list`)
|
||||||
|
|
||||||
|
タスクブランチの一覧表示とインタラクティブな管理を行います。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt list
|
||||||
|
```
|
||||||
|
|
||||||
|
リストビューでは、すべてのタスクがステータス別(pending、running、completed、failed)に作成日とサマリ付きで表示されます。タスクを選択すると、そのステータスに応じた操作が表示されます。
|
||||||
|
|
||||||
|
### 完了タスクの操作
|
||||||
|
|
||||||
|
| 操作 | 説明 |
|
||||||
|
|------|------|
|
||||||
|
| **View diff** | デフォルトブランチとの差分をページャで表示 |
|
||||||
|
| **Instruct** | AI との会話で追加指示を作成し、再実行 |
|
||||||
|
| **Try merge** | スカッシュマージ(コミットせずにステージング、手動レビュー用) |
|
||||||
|
| **Merge & cleanup** | スカッシュマージしてブランチを削除 |
|
||||||
|
| **Delete** | すべての変更を破棄してブランチを削除 |
|
||||||
|
|
||||||
|
### 失敗タスクの操作
|
||||||
|
|
||||||
|
| 操作 | 説明 |
|
||||||
|
|------|------|
|
||||||
|
| **Retry** | 失敗コンテキスト付きのリトライ会話を開き、再実行 |
|
||||||
|
| **Delete** | 失敗したタスクレコードを削除 |
|
||||||
|
|
||||||
|
### Pending タスクの操作
|
||||||
|
|
||||||
|
| 操作 | 説明 |
|
||||||
|
|------|------|
|
||||||
|
| **Delete** | `tasks.yaml` から pending タスクを削除 |
|
||||||
|
|
||||||
|
### Instruct モード
|
||||||
|
|
||||||
|
完了タスクで **Instruct** を選択すると、TAKT は AI とのインタラクティブな会話ループを開きます。会話には次の情報がプリロードされます。
|
||||||
|
|
||||||
|
- ブランチコンテキスト(デフォルトブランチとの差分統計、コミット履歴)
|
||||||
|
- 前回の実行セッションデータ(movement ログ、レポート)
|
||||||
|
- Piece 構造と movement プレビュー
|
||||||
|
- 前回の order 内容
|
||||||
|
|
||||||
|
どのような追加変更が必要かを議論し、AI が指示の精緻化を支援します。準備ができたら次の操作を選択できます。
|
||||||
|
|
||||||
|
- **Execute** -- 新しい指示でタスクを即座に再実行
|
||||||
|
- **Save task** -- 新しい指示でタスクを `pending` として再キューイングし、後で実行
|
||||||
|
- **Cancel** -- 破棄してリストに戻る
|
||||||
|
|
||||||
|
### Retry モード
|
||||||
|
|
||||||
|
失敗タスクで **Retry** を選択すると、TAKT は次の処理を行います。
|
||||||
|
|
||||||
|
1. 失敗の詳細を表示(失敗した movement、エラーメッセージ、最後のエージェントメッセージ)
|
||||||
|
2. Piece の選択を促す
|
||||||
|
3. どの movement から開始するかの選択を促す(デフォルトは失敗した movement)
|
||||||
|
4. 失敗コンテキスト、実行セッションデータ、piece 構造がプリロードされたリトライ会話を開く
|
||||||
|
5. AI の支援で指示を精緻化
|
||||||
|
|
||||||
|
リトライ会話は Instruct モードと同じ操作(実行、タスク保存、キャンセル)をサポートします。リトライのメモは複数のリトライ試行にわたってタスクレコードに蓄積されます。
|
||||||
|
|
||||||
|
### 非インタラクティブモード(`--non-interactive`)
|
||||||
|
|
||||||
|
CI/CD スクリプト向けの非インタラクティブモードを使用できます。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# すべてのタスクをテキストで一覧表示
|
||||||
|
takt list --non-interactive
|
||||||
|
|
||||||
|
# すべてのタスクを JSON で一覧表示
|
||||||
|
takt list --non-interactive --format json
|
||||||
|
|
||||||
|
# 特定ブランチの差分統計を表示
|
||||||
|
takt list --non-interactive --action diff --branch takt/my-branch
|
||||||
|
|
||||||
|
# 特定ブランチをマージ
|
||||||
|
takt list --non-interactive --action merge --branch takt/my-branch
|
||||||
|
|
||||||
|
# ブランチを削除(--yes が必要)
|
||||||
|
takt list --non-interactive --action delete --branch takt/my-branch --yes
|
||||||
|
|
||||||
|
# Try merge(コミットせずにステージング)
|
||||||
|
takt list --non-interactive --action try --branch takt/my-branch
|
||||||
|
```
|
||||||
|
|
||||||
|
利用可能なアクションは `diff`、`try`、`merge`、`delete` です。
|
||||||
|
|
||||||
|
## タスクディレクトリワークフロー
|
||||||
|
|
||||||
|
推奨されるエンドツーエンドのワークフローは次の通りです。
|
||||||
|
|
||||||
|
1. **`takt add`** -- タスクを作成。`.takt/tasks.yaml` に pending レコードが追加され、`.takt/tasks/{slug}/` に `order.md` が生成される。
|
||||||
|
2. **`order.md` を編集** -- 生成されたファイルを開き、必要に応じて詳細な仕様、参考資料、補足ファイルを追加。
|
||||||
|
3. **`takt run`**(または `takt watch`)-- `tasks.yaml` の pending タスクを実行。各タスクは設定された piece ワークフローを通じて実行される。
|
||||||
|
4. **出力を確認** -- `.takt/runs/{slug}/reports/` の実行レポートを確認(slug はタスクディレクトリと一致)。
|
||||||
|
5. **`takt list`** -- 結果を確認し、成功したブランチのマージ、失敗のリトライ、追加指示を行う。
|
||||||
|
|
||||||
|
## 隔離実行(共有クローン)
|
||||||
|
|
||||||
|
タスク設定で `worktree` を指定すると、各タスクは `git clone --shared` で作成された隔離クローン内で実行され、メインの作業ディレクトリをクリーンに保ちます。
|
||||||
|
|
||||||
|
### 設定オプション
|
||||||
|
|
||||||
|
| 設定 | 説明 |
|
||||||
|
|------|------|
|
||||||
|
| `worktree: true` | 隣接ディレクトリ(または `worktree_dir` 設定で指定した場所)に共有クローンを自動作成 |
|
||||||
|
| `worktree: "/path/to/dir"` | 指定パスにクローンを作成 |
|
||||||
|
| `branch: "feat/xxx"` | 指定ブランチを使用(省略時は `takt/{timestamp}-{slug}` が自動生成) |
|
||||||
|
| *(worktree を省略)* | カレントディレクトリで実行(デフォルト) |
|
||||||
|
|
||||||
|
### 仕組み
|
||||||
|
|
||||||
|
TAKT は `git worktree` の代わりに `git clone --shared` を使用して、独立した `.git` ディレクトリを持つ軽量クローンを作成します。これが重要な理由は次の通りです。
|
||||||
|
|
||||||
|
- **独立した `.git`**: 共有クローンは独自の `.git` ディレクトリを持ち、エージェントツール(Claude Code など)が `gitdir:` 参照をたどってメインリポジトリに戻ることを防ぎます。
|
||||||
|
- **完全な隔離**: エージェントはクローンディレクトリ内でのみ作業し、メインリポジトリを認識しません。
|
||||||
|
|
||||||
|
> **注意**: YAML フィールド名は後方互換性のため `worktree` のままです。内部的には `git worktree` ではなく `git clone --shared` を使用しています。
|
||||||
|
|
||||||
|
### エフェメラルなライフサイクル
|
||||||
|
|
||||||
|
クローンはエフェメラルなライフサイクルに従います。
|
||||||
|
|
||||||
|
1. **作成** -- タスク実行前にクローンを作成
|
||||||
|
2. **実行** -- クローンディレクトリ内でタスクを実行
|
||||||
|
3. **コミット & プッシュ** -- 成功時に変更を自動コミットしてブランチにプッシュ
|
||||||
|
4. **保持** -- 実行後もクローンを保持(instruct/retry 操作用)
|
||||||
|
5. **クリーンアップ** -- ブランチが永続的な成果物。`takt list` でマージまたは削除
|
||||||
|
|
||||||
|
### デュアルワーキングディレクトリ
|
||||||
|
|
||||||
|
worktree 実行中、TAKT は2つのディレクトリ参照を管理します。
|
||||||
|
|
||||||
|
| ディレクトリ | 用途 |
|
||||||
|
|------------|------|
|
||||||
|
| `cwd`(クローンパス) | エージェントの実行場所、レポートの書き込み先 |
|
||||||
|
| `projectCwd`(プロジェクトルート) | ログとセッションデータの保存先 |
|
||||||
|
|
||||||
|
レポートは `cwd/.takt/runs/{slug}/reports/`(クローン内)に書き込まれ、エージェントがメインリポジトリのパスを発見することを防ぎます。`cwd !== projectCwd` の場合、クロスディレクトリ汚染を避けるためセッション再開はスキップされます。
|
||||||
|
|
||||||
|
## セッションログ
|
||||||
|
|
||||||
|
TAKT は NDJSON(改行区切り JSON、`.jsonl`)形式でセッションログを書き込みます。各レコードはアトミックに追加されるため、プロセスがクラッシュしても部分的なログは保存されます。
|
||||||
|
|
||||||
|
### ログの場所
|
||||||
|
|
||||||
|
```text
|
||||||
|
.takt/runs/{slug}/
|
||||||
|
logs/{sessionId}.jsonl # piece 実行ごとの NDJSON セッションログ
|
||||||
|
meta.json # 実行メタデータ(タスク、piece、開始/終了、ステータスなど)
|
||||||
|
context/
|
||||||
|
previous_responses/
|
||||||
|
latest.md # 最新の previous response(自動継承)
|
||||||
|
```
|
||||||
|
|
||||||
|
### レコードタイプ
|
||||||
|
|
||||||
|
| レコードタイプ | 説明 |
|
||||||
|
|--------------|------|
|
||||||
|
| `piece_start` | タスクと piece 名による piece の初期化 |
|
||||||
|
| `step_start` | Movement の実行開始 |
|
||||||
|
| `step_complete` | ステータス、内容、マッチしたルール情報を含む movement 結果 |
|
||||||
|
| `piece_complete` | Piece の正常完了 |
|
||||||
|
| `piece_abort` | 理由を伴う中断 |
|
||||||
|
|
||||||
|
### リアルタイム監視
|
||||||
|
|
||||||
|
実行中にログをリアルタイムで監視できます。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tail -f .takt/runs/{slug}/logs/{sessionId}.jsonl
|
||||||
|
```
|
||||||
323
docs/task-management.md
Normal file
323
docs/task-management.md
Normal file
@ -0,0 +1,323 @@
|
|||||||
|
[日本語](./task-management.ja.md)
|
||||||
|
|
||||||
|
# Task Management
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
TAKT provides a task management workflow for accumulating multiple tasks and executing them in batch. The basic flow is:
|
||||||
|
|
||||||
|
1. **`takt add`** -- Refine task requirements through AI conversation and save to `.takt/tasks.yaml`
|
||||||
|
2. **Tasks accumulate** -- Edit `order.md` files, attach reference materials
|
||||||
|
3. **`takt run`** -- Execute all pending tasks at once (sequential or parallel)
|
||||||
|
4. **`takt list`** -- Review results, merge branches, retry failures, or add instructions
|
||||||
|
|
||||||
|
Each task executes in an isolated shared clone (optional), produces reports, and creates a branch that can be merged or discarded via `takt list`.
|
||||||
|
|
||||||
|
## Adding Tasks (`takt add`)
|
||||||
|
|
||||||
|
Use `takt add` to create a new task entry in `.takt/tasks.yaml`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Add a task with inline text
|
||||||
|
takt add "Implement user authentication"
|
||||||
|
|
||||||
|
# Add a task from a GitHub Issue
|
||||||
|
takt add #28
|
||||||
|
```
|
||||||
|
|
||||||
|
When adding a task, you are prompted for:
|
||||||
|
|
||||||
|
- **Piece** -- Which piece (workflow) to use for execution
|
||||||
|
- **Worktree path** -- Where to create the isolated clone (Enter for auto, or specify a path)
|
||||||
|
- **Branch name** -- Custom branch name (Enter for auto-generated `takt/{timestamp}-{slug}`)
|
||||||
|
- **Auto-PR** -- Whether to automatically create a pull request after successful execution
|
||||||
|
|
||||||
|
### GitHub Issue Integration
|
||||||
|
|
||||||
|
When you pass an issue reference (e.g., `#28`), TAKT fetches the issue title, body, labels, and comments via the GitHub CLI (`gh`) and uses them as the task content. The issue number is recorded in `tasks.yaml` and reflected in the branch name.
|
||||||
|
|
||||||
|
**Requirement:** [GitHub CLI](https://cli.github.com/) (`gh`) must be installed and authenticated.
|
||||||
|
|
||||||
|
### Saving Tasks from Interactive Mode
|
||||||
|
|
||||||
|
You can also save tasks from interactive mode. After refining requirements through conversation, use `/save` (or the save action when prompted) to persist the task to `tasks.yaml` instead of executing immediately.
|
||||||
|
|
||||||
|
## Task Directory Format
|
||||||
|
|
||||||
|
TAKT stores task metadata in `.takt/tasks.yaml` and each task's detailed specification in `.takt/tasks/{slug}/`.
|
||||||
|
|
||||||
|
### `tasks.yaml` Schema
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
tasks:
|
||||||
|
- name: add-auth-feature
|
||||||
|
status: pending
|
||||||
|
task_dir: .takt/tasks/20260201-015714-foptng
|
||||||
|
piece: default
|
||||||
|
created_at: "2026-02-01T01:57:14.000Z"
|
||||||
|
started_at: null
|
||||||
|
completed_at: null
|
||||||
|
```
|
||||||
|
|
||||||
|
Fields:
|
||||||
|
|
||||||
|
| Field | Description |
|
||||||
|
|-------|-------------|
|
||||||
|
| `name` | AI-generated task slug |
|
||||||
|
| `status` | `pending`, `running`, `completed`, or `failed` |
|
||||||
|
| `task_dir` | Path to the task directory containing `order.md` |
|
||||||
|
| `piece` | Piece name to use for execution |
|
||||||
|
| `worktree` | `true` (auto), a path string, or omitted (run in current directory) |
|
||||||
|
| `branch` | Branch name (auto-generated if omitted) |
|
||||||
|
| `auto_pr` | Whether to auto-create a PR after execution |
|
||||||
|
| `issue` | GitHub Issue number (if applicable) |
|
||||||
|
| `created_at` | ISO 8601 timestamp |
|
||||||
|
| `started_at` | ISO 8601 timestamp (set when execution begins) |
|
||||||
|
| `completed_at` | ISO 8601 timestamp (set when execution finishes) |
|
||||||
|
|
||||||
|
### Task Directory Layout
|
||||||
|
|
||||||
|
```text
|
||||||
|
.takt/
|
||||||
|
tasks/
|
||||||
|
20260201-015714-foptng/
|
||||||
|
order.md # Task specification (auto-generated, editable)
|
||||||
|
schema.sql # Attached reference materials (optional)
|
||||||
|
wireframe.png # Attached reference materials (optional)
|
||||||
|
tasks.yaml # Task metadata records
|
||||||
|
runs/
|
||||||
|
20260201-015714-foptng/
|
||||||
|
reports/ # Execution reports (auto-generated)
|
||||||
|
logs/ # NDJSON session logs
|
||||||
|
context/ # Snapshots (previous_responses, etc.)
|
||||||
|
meta.json # Run metadata
|
||||||
|
```
|
||||||
|
|
||||||
|
`takt add` creates `.takt/tasks/{slug}/order.md` automatically and saves the `task_dir` reference to `tasks.yaml`. You can freely edit `order.md` and add supplementary files (SQL schemas, wireframes, API specs, etc.) to the task directory before execution.
|
||||||
|
|
||||||
|
## Executing Tasks (`takt run`)
|
||||||
|
|
||||||
|
Execute all pending tasks from `.takt/tasks.yaml`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt run
|
||||||
|
```
|
||||||
|
|
||||||
|
The `run` command claims pending tasks and executes them through the configured piece. Each task goes through:
|
||||||
|
|
||||||
|
1. Clone creation (if `worktree` is set)
|
||||||
|
2. Piece execution in the clone/project directory
|
||||||
|
3. Auto-commit and push (if worktree execution)
|
||||||
|
4. Post-execution flow (PR creation if `auto_pr` is set)
|
||||||
|
5. Status update in `tasks.yaml` (`completed` or `failed`)
|
||||||
|
|
||||||
|
### Parallel Execution (Concurrency)
|
||||||
|
|
||||||
|
By default, tasks run sequentially (`concurrency: 1`). Configure parallel execution in `~/.takt/config.yaml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
concurrency: 3 # Run up to 3 tasks in parallel (1-10)
|
||||||
|
task_poll_interval_ms: 500 # Polling interval for new tasks (100-5000ms)
|
||||||
|
```
|
||||||
|
|
||||||
|
When concurrency is greater than 1, TAKT uses a worker pool that:
|
||||||
|
|
||||||
|
- Runs up to N tasks simultaneously
|
||||||
|
- Polls for newly added tasks at the configured interval
|
||||||
|
- Picks up new tasks as workers become available
|
||||||
|
- Displays color-coded prefixed output per task for readability
|
||||||
|
- Supports graceful shutdown on Ctrl+C (waits for in-flight tasks to complete)
|
||||||
|
|
||||||
|
### Interrupted Task Recovery
|
||||||
|
|
||||||
|
If `takt run` is interrupted (e.g., process crash, Ctrl+C), tasks left in `running` status are automatically recovered to `pending` on the next `takt run` or `takt watch` invocation.
|
||||||
|
|
||||||
|
## Watching Tasks (`takt watch`)
|
||||||
|
|
||||||
|
Run a resident process that monitors `.takt/tasks.yaml` and auto-executes tasks as they appear:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt watch
|
||||||
|
```
|
||||||
|
|
||||||
|
The watch command:
|
||||||
|
|
||||||
|
- Stays running until Ctrl+C (SIGINT)
|
||||||
|
- Monitors `tasks.yaml` for new `pending` tasks
|
||||||
|
- Executes each task as it appears
|
||||||
|
- Recovers interrupted `running` tasks on startup
|
||||||
|
- Displays a summary of total/success/failed tasks on exit
|
||||||
|
|
||||||
|
This is useful for a "producer-consumer" workflow where you add tasks with `takt add` in one terminal and let `takt watch` execute them automatically in another.
|
||||||
|
|
||||||
|
## Managing Task Branches (`takt list`)
|
||||||
|
|
||||||
|
List and manage task branches interactively:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
takt list
|
||||||
|
```
|
||||||
|
|
||||||
|
The list view shows all tasks organized by status (pending, running, completed, failed) with creation dates and summaries. Selecting a task shows available actions depending on its status.
|
||||||
|
|
||||||
|
### Actions for Completed Tasks
|
||||||
|
|
||||||
|
| Action | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| **View diff** | Show full diff against the default branch in a pager |
|
||||||
|
| **Instruct** | Open an AI conversation to craft additional instructions, then re-execute |
|
||||||
|
| **Try merge** | Squash merge (stages changes without committing, for manual review) |
|
||||||
|
| **Merge & cleanup** | Squash merge and delete the branch |
|
||||||
|
| **Delete** | Discard all changes and delete the branch |
|
||||||
|
|
||||||
|
### Actions for Failed Tasks
|
||||||
|
|
||||||
|
| Action | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| **Retry** | Open a retry conversation with failure context, then re-execute |
|
||||||
|
| **Delete** | Remove the failed task record |
|
||||||
|
|
||||||
|
### Actions for Pending Tasks
|
||||||
|
|
||||||
|
| Action | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| **Delete** | Remove the pending task from `tasks.yaml` |
|
||||||
|
|
||||||
|
### Instruct Mode
|
||||||
|
|
||||||
|
When you select **Instruct** on a completed task, TAKT opens an interactive conversation loop with the AI. The conversation is pre-loaded with:
|
||||||
|
|
||||||
|
- Branch context (diff stat against default branch, commit history)
|
||||||
|
- Previous run session data (movement logs, reports)
|
||||||
|
- Piece structure and movement previews
|
||||||
|
- Previous order content
|
||||||
|
|
||||||
|
You can discuss what additional changes are needed, and the AI helps refine the instructions. When ready, choose:
|
||||||
|
|
||||||
|
- **Execute** -- Re-execute the task immediately with the new instructions
|
||||||
|
- **Save task** -- Requeue the task as `pending` with the new instructions for later execution
|
||||||
|
- **Cancel** -- Discard and return to the list
|
||||||
|
|
||||||
|
### Retry Mode
|
||||||
|
|
||||||
|
When you select **Retry** on a failed task, TAKT:
|
||||||
|
|
||||||
|
1. Displays failure details (failed movement, error message, last agent message)
|
||||||
|
2. Prompts you to select a piece
|
||||||
|
3. Prompts you to select which movement to start from (defaults to the failed movement)
|
||||||
|
4. Opens a retry conversation pre-loaded with failure context, run session data, and piece structure
|
||||||
|
5. Lets you refine instructions with AI assistance
|
||||||
|
|
||||||
|
The retry conversation supports the same actions as Instruct mode (execute, save task, cancel). Retry notes are appended to the task record, accumulating across multiple retry attempts.
|
||||||
|
|
||||||
|
### Non-Interactive Mode (`--non-interactive`)
|
||||||
|
|
||||||
|
For CI/CD scripts, use non-interactive mode:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List all tasks as text
|
||||||
|
takt list --non-interactive
|
||||||
|
|
||||||
|
# List all tasks as JSON
|
||||||
|
takt list --non-interactive --format json
|
||||||
|
|
||||||
|
# Show diff stat for a specific branch
|
||||||
|
takt list --non-interactive --action diff --branch takt/my-branch
|
||||||
|
|
||||||
|
# Merge a specific branch
|
||||||
|
takt list --non-interactive --action merge --branch takt/my-branch
|
||||||
|
|
||||||
|
# Delete a branch (requires --yes)
|
||||||
|
takt list --non-interactive --action delete --branch takt/my-branch --yes
|
||||||
|
|
||||||
|
# Try merge (stage without commit)
|
||||||
|
takt list --non-interactive --action try --branch takt/my-branch
|
||||||
|
```
|
||||||
|
|
||||||
|
Available actions: `diff`, `try`, `merge`, `delete`.
|
||||||
|
|
||||||
|
## Task Directory Workflow
|
||||||
|
|
||||||
|
The recommended end-to-end workflow:
|
||||||
|
|
||||||
|
1. **`takt add`** -- Create a task. A pending record is added to `.takt/tasks.yaml` and `order.md` is generated in `.takt/tasks/{slug}/`.
|
||||||
|
2. **Edit `order.md`** -- Open the generated file and add detailed specifications, reference materials, or supplementary files as needed.
|
||||||
|
3. **`takt run`** (or `takt watch`) -- Execute pending tasks from `tasks.yaml`. Each task runs through the configured piece workflow.
|
||||||
|
4. **Verify outputs** -- Check execution reports in `.takt/runs/{slug}/reports/` (the slug matches the task directory).
|
||||||
|
5. **`takt list`** -- Review results, merge successful branches, retry failures, or add further instructions.
|
||||||
|
|
||||||
|
## Isolated Execution (Shared Clone)
|
||||||
|
|
||||||
|
Specifying `worktree` in task configuration executes each task in an isolated clone created with `git clone --shared`, keeping your main working directory clean.
|
||||||
|
|
||||||
|
### Configuration Options
|
||||||
|
|
||||||
|
| Setting | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `worktree: true` | Auto-create shared clone in adjacent directory (or location specified by `worktree_dir` config) |
|
||||||
|
| `worktree: "/path/to/dir"` | Create clone at the specified path |
|
||||||
|
| `branch: "feat/xxx"` | Use specified branch (auto-generated as `takt/{timestamp}-{slug}` if omitted) |
|
||||||
|
| *(omit `worktree`)* | Execute in current directory (default) |
|
||||||
|
|
||||||
|
### How It Works
|
||||||
|
|
||||||
|
TAKT uses `git clone --shared` instead of `git worktree` to create lightweight clones with an independent `.git` directory. This is important because:
|
||||||
|
|
||||||
|
- **Independent `.git`**: Shared clones have their own `.git` directory, preventing agent tools (like Claude Code) from traversing `gitdir:` references back to the main repository.
|
||||||
|
- **Full isolation**: Agents work entirely within the clone directory, unaware of the main repository.
|
||||||
|
|
||||||
|
> **Note**: The YAML field name remains `worktree` for backward compatibility. Internally, it uses `git clone --shared` instead of `git worktree`.
|
||||||
|
|
||||||
|
### Ephemeral Lifecycle
|
||||||
|
|
||||||
|
Clones follow an ephemeral lifecycle:
|
||||||
|
|
||||||
|
1. **Create** -- Clone is created before task execution
|
||||||
|
2. **Execute** -- Task runs inside the clone directory
|
||||||
|
3. **Commit & Push** -- On success, changes are auto-committed and pushed to the branch
|
||||||
|
4. **Preserve** -- Clone is preserved after execution (for instruct/retry operations)
|
||||||
|
5. **Cleanup** -- Branches are the persistent artifacts; use `takt list` to merge or delete
|
||||||
|
|
||||||
|
### Dual Working Directory
|
||||||
|
|
||||||
|
During worktree execution, TAKT maintains two directory references:
|
||||||
|
|
||||||
|
| Directory | Purpose |
|
||||||
|
|-----------|---------|
|
||||||
|
| `cwd` (clone path) | Where agents run, where reports are written |
|
||||||
|
| `projectCwd` (project root) | Where logs and session data are stored |
|
||||||
|
|
||||||
|
Reports are written to `cwd/.takt/runs/{slug}/reports/` (inside the clone) to prevent agents from discovering the main repository path. Session resume is skipped when `cwd !== projectCwd` to avoid cross-directory contamination.
|
||||||
|
|
||||||
|
## Session Logs
|
||||||
|
|
||||||
|
TAKT writes session logs in NDJSON (Newline-Delimited JSON, `.jsonl`) format. Each record is atomically appended, so partial logs are preserved even if the process crashes.
|
||||||
|
|
||||||
|
### Log Location
|
||||||
|
|
||||||
|
```text
|
||||||
|
.takt/runs/{slug}/
|
||||||
|
logs/{sessionId}.jsonl # NDJSON session log per piece execution
|
||||||
|
meta.json # Run metadata (task, piece, start/end, status, etc.)
|
||||||
|
context/
|
||||||
|
previous_responses/
|
||||||
|
latest.md # Latest previous response (inherited automatically)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Record Types
|
||||||
|
|
||||||
|
| Record Type | Description |
|
||||||
|
|-------------|-------------|
|
||||||
|
| `piece_start` | Piece initialization with task and piece name |
|
||||||
|
| `step_start` | Movement execution start |
|
||||||
|
| `step_complete` | Movement result with status, content, matched rule info |
|
||||||
|
| `piece_complete` | Successful piece completion |
|
||||||
|
| `piece_abort` | Abort with reason |
|
||||||
|
|
||||||
|
### Real-Time Monitoring
|
||||||
|
|
||||||
|
You can monitor logs in real-time during execution:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tail -f .takt/runs/{slug}/logs/{sessionId}.jsonl
|
||||||
|
```
|
||||||
@ -103,6 +103,20 @@ E2Eテストを追加・変更した場合は、このドキュメントも更
|
|||||||
- `.takt/tasks.yaml` に pending タスクを追加する(`piece` に `e2e/fixtures/pieces/mock-single-step.yaml` を指定)。
|
- `.takt/tasks.yaml` に pending タスクを追加する(`piece` に `e2e/fixtures/pieces/mock-single-step.yaml` を指定)。
|
||||||
- 出力に `Task "watch-task" completed` が含まれることを確認する。
|
- 出力に `Task "watch-task" completed` が含まれることを確認する。
|
||||||
- `Ctrl+C` で終了する。
|
- `Ctrl+C` で終了する。
|
||||||
|
- Run recovery and high-priority run flows(`e2e/specs/run-recovery.e2e.ts`)
|
||||||
|
- 目的: 高優先度ユースケース(異常終了リカバリー、並列実行、初期化〜add〜run)をまとめて確認。
|
||||||
|
- LLM: 呼び出さない(`--provider mock` 固定)
|
||||||
|
- 手順(ユーザー行動/コマンド):
|
||||||
|
- 異常終了リカバリー:
|
||||||
|
- `.takt/tasks.yaml` に pending タスク2件を投入し、`takt run --provider mock` 実行中にプロセスを強制終了する。
|
||||||
|
- 再度 `takt run --provider mock` を実行し、`Recovered 1 interrupted running task(s) to pending.` が出力されることを確認する。
|
||||||
|
- 復旧対象を含む全タスクが完了し、`.takt/tasks.yaml` が空になることを確認する。
|
||||||
|
- 高並列実行:
|
||||||
|
- `concurrency: 10` を設定し、pending タスク12件を投入して `takt run --provider mock` を実行する。
|
||||||
|
- 出力に `Concurrency: 10` と `Tasks Summary` が含まれること、および `.takt/tasks.yaml` が空になることを確認する。
|
||||||
|
- 初期化〜add〜run:
|
||||||
|
- グローバル `config.yaml` 不在の環境で `takt add` を2回実行し、`takt run --provider mock` を実行する。
|
||||||
|
- タスク実行完了後に `.takt/tasks/` 配下の2タスクディレクトリ生成、`.takt/.gitignore` 生成、`.takt/tasks.yaml` の空状態を確認する。
|
||||||
- Run tasks graceful shutdown on SIGINT(`e2e/specs/run-sigint-graceful.e2e.ts`)
|
- Run tasks graceful shutdown on SIGINT(`e2e/specs/run-sigint-graceful.e2e.ts`)
|
||||||
- 目的: `takt run` を並列実行中に `Ctrl+C` した際、新規クローン投入を止めてグレースフルに終了することを確認。
|
- 目的: `takt run` を並列実行中に `Ctrl+C` した際、新規クローン投入を止めてグレースフルに終了することを確認。
|
||||||
- LLM: 呼び出さない(`--provider mock` 固定)
|
- LLM: 呼び出さない(`--provider mock` 固定)
|
||||||
@ -130,14 +144,6 @@ E2Eテストを追加・変更した場合は、このドキュメントも更
|
|||||||
- `takt list --non-interactive --action diff --branch <branch>` で差分統計が出力されることを確認する。
|
- `takt list --non-interactive --action diff --branch <branch>` で差分統計が出力されることを確認する。
|
||||||
- `takt list --non-interactive --action try --branch <branch>` で変更がステージされることを確認する。
|
- `takt list --non-interactive --action try --branch <branch>` で変更がステージされることを確認する。
|
||||||
- `takt list --non-interactive --action merge --branch <branch>` でブランチがマージされ削除されることを確認する。
|
- `takt list --non-interactive --action merge --branch <branch>` でブランチがマージされ削除されることを確認する。
|
||||||
- Config permission mode(`e2e/specs/cli-config.e2e.ts`)
|
|
||||||
- 目的: `takt config` でパーミッションモードの切り替えと永続化を確認。
|
|
||||||
- LLM: 呼び出さない(LLM不使用の操作のみ)
|
|
||||||
- 手順(ユーザー行動/コマンド):
|
|
||||||
- `takt config default` を実行し、`Switched to: default` が出力されることを確認する。
|
|
||||||
- `takt config sacrifice-my-pc` を実行し、`Switched to: sacrifice-my-pc` が出力されることを確認する。
|
|
||||||
- `takt config sacrifice-my-pc` 実行後、`.takt/config.yaml` に `permissionMode: sacrifice-my-pc` が保存されていることを確認する。
|
|
||||||
- `takt config invalid-mode` を実行し、`Invalid mode` が出力されることを確認する。
|
|
||||||
- Reset categories(`e2e/specs/cli-reset-categories.e2e.ts`)
|
- Reset categories(`e2e/specs/cli-reset-categories.e2e.ts`)
|
||||||
- 目的: `takt reset categories` でカテゴリオーバーレイのリセットを確認。
|
- 目的: `takt reset categories` でカテゴリオーバーレイのリセットを確認。
|
||||||
- LLM: 呼び出さない(LLM不使用の操作のみ)
|
- LLM: 呼び出さない(LLM不使用の操作のみ)
|
||||||
@ -145,6 +151,15 @@ E2Eテストを追加・変更した場合は、このドキュメントも更
|
|||||||
- `takt reset categories` を実行する。
|
- `takt reset categories` を実行する。
|
||||||
- 出力に `reset` を含むことを確認する。
|
- 出力に `reset` を含むことを確認する。
|
||||||
- `$TAKT_CONFIG_DIR/preferences/piece-categories.yaml` が存在し `piece_categories: {}` を含むことを確認する。
|
- `$TAKT_CONFIG_DIR/preferences/piece-categories.yaml` が存在し `piece_categories: {}` を含むことを確認する。
|
||||||
|
- Reset config(`e2e/specs/cli-reset-config.e2e.ts`)
|
||||||
|
- 目的: `takt reset config` でグローバル設定をテンプレートへ戻し、旧設定をバックアップすることを確認。
|
||||||
|
- LLM: 呼び出さない(LLM不使用の操作のみ)
|
||||||
|
- 手順(ユーザー行動/コマンド):
|
||||||
|
- `$TAKT_CONFIG_DIR/config.yaml` に任意の設定を書き込む(例: `language: ja`, `provider: mock`)。
|
||||||
|
- `takt reset config` を実行する。
|
||||||
|
- 出力に `reset` と `backup:` を含むことを確認する。
|
||||||
|
- `$TAKT_CONFIG_DIR/config.yaml` がテンプレート内容(例: `branch_name_strategy: ai`, `concurrency: 2`)に置き換わっていることを確認する。
|
||||||
|
- `$TAKT_CONFIG_DIR/` 直下に `config.yaml.YYYYMMDD-HHmmss.old` 形式のバックアップファイルが1件作成されることを確認する。
|
||||||
- Export Claude Code Skill(`e2e/specs/cli-export-cc.e2e.ts`)
|
- Export Claude Code Skill(`e2e/specs/cli-export-cc.e2e.ts`)
|
||||||
- 目的: `takt export-cc` でClaude Code Skillのデプロイを確認。
|
- 目的: `takt export-cc` でClaude Code Skillのデプロイを確認。
|
||||||
- LLM: 呼び出さない(LLM不使用の操作のみ)
|
- LLM: 呼び出さない(LLM不使用の操作のみ)
|
||||||
@ -154,3 +169,53 @@ E2Eテストを追加・変更した場合は、このドキュメントも更
|
|||||||
- 出力に `ファイルをデプロイしました` を含むことを確認する。
|
- 出力に `ファイルをデプロイしました` を含むことを確認する。
|
||||||
- `$HOME/.claude/skills/takt/SKILL.md` が存在することを確認する。
|
- `$HOME/.claude/skills/takt/SKILL.md` が存在することを確認する。
|
||||||
- `$HOME/.claude/skills/takt/pieces/` および `$HOME/.claude/skills/takt/personas/` ディレクトリが存在し、それぞれ少なくとも1ファイルを含むことを確認する。
|
- `$HOME/.claude/skills/takt/pieces/` および `$HOME/.claude/skills/takt/personas/` ディレクトリが存在し、それぞれ少なくとも1ファイルを含むことを確認する。
|
||||||
|
|
||||||
|
## 追記シナリオ(2026-02-19)
|
||||||
|
過去にドキュメント未反映だったシナリオを以下に追記する。
|
||||||
|
|
||||||
|
- Config priority(`e2e/specs/config-priority.e2e.ts`)
|
||||||
|
- 目的: `piece` と `auto_pr` の優先順位(config/env/CLI)を検証。
|
||||||
|
- 手順(要約):
|
||||||
|
- `--pipeline` で `--piece` 未指定時に設定値の `piece` が使われることを確認。
|
||||||
|
- `auto_pr` 未設定時は確認デフォルト `true` が反映されることを確認。
|
||||||
|
- `config` と `TAKT_AUTO_PR` の優先を確認。
|
||||||
|
- Pipeline --skip-git on local/non-git directories(`e2e/specs/pipeline-local-repo.e2e.ts`)
|
||||||
|
- 目的: ローカルGitリポジトリおよび非Gitディレクトリで `--pipeline --skip-git` が動作することを確認。
|
||||||
|
- Task content_file reference(`e2e/specs/task-content-file.e2e.ts`)
|
||||||
|
- 目的: `tasks.yaml` の `content_file` 参照が解決されること、および不正参照時エラーを確認。
|
||||||
|
- Task status persistence(`e2e/specs/task-status-persistence.e2e.ts`)
|
||||||
|
- 目的: 成功時/失敗時の `tasks.yaml` 状態遷移(完了消込・失敗記録)を確認。
|
||||||
|
- Run multiple tasks(`e2e/specs/run-multiple-tasks.e2e.ts`)
|
||||||
|
- 目的: 複数pendingタスクの連続実行、途中失敗時継続、タスク空時の終了挙動を確認。
|
||||||
|
- Session NDJSON log output(`e2e/specs/session-log.e2e.ts`)
|
||||||
|
- 目的: NDJSONログの主要イベント(`piece_complete` / `piece_abort` 等)出力を確認。
|
||||||
|
- Structured output rule matching(`e2e/specs/structured-output.e2e.ts`)
|
||||||
|
- 目的: structured output によるルール判定(Phase 3)を確認。
|
||||||
|
- Piece error handling(`e2e/specs/piece-error-handling.e2e.ts`)
|
||||||
|
- 目的: エージェントエラー、最大反復到達、前回応答受け渡しの挙動を確認。
|
||||||
|
- Multi-step with parallel movements(`e2e/specs/multi-step-parallel.e2e.ts`)
|
||||||
|
- 目的: 並列ムーブメントを含む複数ステップ遷移を確認。
|
||||||
|
- Sequential multi-step session log transitions(`e2e/specs/multi-step-sequential.e2e.ts`)
|
||||||
|
- 目的: 逐次ステップでのセッションログ遷移を確認。
|
||||||
|
- Cycle detection via loop_monitors(`e2e/specs/cycle-detection.e2e.ts`)
|
||||||
|
- 目的: ループ監視設定による abort/continue の境界を確認。
|
||||||
|
- Provider error handling(`e2e/specs/provider-error.e2e.ts`)
|
||||||
|
- 目的: provider上書き、mockシナリオ不足時の挙動、シナリオ不在時エラーを確認。
|
||||||
|
- Model override(`e2e/specs/model-override.e2e.ts`)
|
||||||
|
- 目的: `--model` オプションが通常実行/`--pipeline --skip-git` で反映されることを確認。
|
||||||
|
- Error handling edge cases(`e2e/specs/error-handling.e2e.ts`)
|
||||||
|
- 目的: 不正引数・存在しないpiece・不正YAMLなど代表エラーケースを確認。
|
||||||
|
- Quiet mode(`e2e/specs/quiet-mode.e2e.ts`)
|
||||||
|
- 目的: `--quiet` でAIストリーム出力が抑制されることを確認。
|
||||||
|
- Catalog command(`e2e/specs/cli-catalog.e2e.ts`)
|
||||||
|
- 目的: `takt catalog` の一覧表示・型指定・不正型エラーを確認。
|
||||||
|
- Prompt preview command(`e2e/specs/cli-prompt.e2e.ts`)
|
||||||
|
- 目的: `takt prompt` のプレビュー出力と不正piece時エラーを確認。
|
||||||
|
- Switch piece command(`e2e/specs/cli-switch.e2e.ts`)
|
||||||
|
- 目的: `takt switch` の切替成功・不正piece時エラーを確認。
|
||||||
|
- Clear sessions command(`e2e/specs/cli-clear.e2e.ts`)
|
||||||
|
- 目的: `takt clear` でセッション情報が削除されることを確認。
|
||||||
|
- Help command(`e2e/specs/cli-help.e2e.ts`)
|
||||||
|
- 目的: `takt --help` と `takt run --help` の表示内容を確認。
|
||||||
|
- Eject builtin pieces(`e2e/specs/eject.e2e.ts`)
|
||||||
|
- 目的: `takt eject` のproject/global出力、既存時スキップ、facet個別ejectを確認。
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
provider: claude
|
provider: claude
|
||||||
language: en
|
language: en
|
||||||
log_level: info
|
log_level: info
|
||||||
default_piece: default
|
|
||||||
notification_sound: false
|
notification_sound: false
|
||||||
notification_sound_events:
|
notification_sound_events:
|
||||||
iteration_limit: false
|
iteration_limit: false
|
||||||
|
|||||||
@ -1,85 +0,0 @@
|
|||||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
|
||||||
import { readFileSync } from 'node:fs';
|
|
||||||
import { join } from 'node:path';
|
|
||||||
import { createIsolatedEnv, type IsolatedEnv } from '../helpers/isolated-env';
|
|
||||||
import { runTakt } from '../helpers/takt-runner';
|
|
||||||
import { createLocalRepo, type LocalRepo } from '../helpers/test-repo';
|
|
||||||
|
|
||||||
// E2E更新時は docs/testing/e2e.md も更新すること
|
|
||||||
describe('E2E: Config command (takt config)', () => {
|
|
||||||
let isolatedEnv: IsolatedEnv;
|
|
||||||
let repo: LocalRepo;
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
isolatedEnv = createIsolatedEnv();
|
|
||||||
repo = createLocalRepo();
|
|
||||||
});
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
try { repo.cleanup(); } catch { /* best-effort */ }
|
|
||||||
try { isolatedEnv.cleanup(); } catch { /* best-effort */ }
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should switch to default mode with explicit argument', () => {
|
|
||||||
// Given: a local repo with isolated env
|
|
||||||
|
|
||||||
// When: running takt config default
|
|
||||||
const result = runTakt({
|
|
||||||
args: ['config', 'default'],
|
|
||||||
cwd: repo.path,
|
|
||||||
env: isolatedEnv.env,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Then: exits successfully and outputs switched message
|
|
||||||
expect(result.exitCode).toBe(0);
|
|
||||||
const output = result.stdout;
|
|
||||||
expect(output).toMatch(/Switched to: default/);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should switch to sacrifice-my-pc mode with explicit argument', () => {
|
|
||||||
// Given: a local repo with isolated env
|
|
||||||
|
|
||||||
// When: running takt config sacrifice-my-pc
|
|
||||||
const result = runTakt({
|
|
||||||
args: ['config', 'sacrifice-my-pc'],
|
|
||||||
cwd: repo.path,
|
|
||||||
env: isolatedEnv.env,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Then: exits successfully and outputs switched message
|
|
||||||
expect(result.exitCode).toBe(0);
|
|
||||||
const output = result.stdout;
|
|
||||||
expect(output).toMatch(/Switched to: sacrifice-my-pc/);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should persist permission mode to project config', () => {
|
|
||||||
// Given: a local repo with isolated env
|
|
||||||
|
|
||||||
// When: running takt config sacrifice-my-pc
|
|
||||||
runTakt({
|
|
||||||
args: ['config', 'sacrifice-my-pc'],
|
|
||||||
cwd: repo.path,
|
|
||||||
env: isolatedEnv.env,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Then: .takt/config.yaml contains permissionMode: sacrifice-my-pc
|
|
||||||
const configPath = join(repo.path, '.takt', 'config.yaml');
|
|
||||||
const content = readFileSync(configPath, 'utf-8');
|
|
||||||
expect(content).toMatch(/permissionMode:\s*sacrifice-my-pc/);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should report error for invalid mode name', () => {
|
|
||||||
// Given: a local repo with isolated env
|
|
||||||
|
|
||||||
// When: running takt config with an invalid mode
|
|
||||||
const result = runTakt({
|
|
||||||
args: ['config', 'invalid-mode'],
|
|
||||||
cwd: repo.path,
|
|
||||||
env: isolatedEnv.env,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Then: output contains invalid mode message
|
|
||||||
const combined = result.stdout + result.stderr;
|
|
||||||
expect(combined).toMatch(/Invalid mode/);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
48
e2e/specs/cli-reset-config.e2e.ts
Normal file
48
e2e/specs/cli-reset-config.e2e.ts
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { readdirSync, readFileSync, writeFileSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { createIsolatedEnv, type IsolatedEnv } from '../helpers/isolated-env';
|
||||||
|
import { runTakt } from '../helpers/takt-runner';
|
||||||
|
import { createLocalRepo, type LocalRepo } from '../helpers/test-repo';
|
||||||
|
|
||||||
|
// E2E更新時は docs/testing/e2e.md も更新すること
|
||||||
|
describe('E2E: Reset config command (takt reset config)', () => {
|
||||||
|
let isolatedEnv: IsolatedEnv;
|
||||||
|
let repo: LocalRepo;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
isolatedEnv = createIsolatedEnv();
|
||||||
|
repo = createLocalRepo();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
try { repo.cleanup(); } catch { /* best-effort */ }
|
||||||
|
try { isolatedEnv.cleanup(); } catch { /* best-effort */ }
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should backup current config and replace with builtin template', () => {
|
||||||
|
const configPath = join(isolatedEnv.taktDir, 'config.yaml');
|
||||||
|
writeFileSync(configPath, ['language: ja', 'provider: mock'].join('\n'), 'utf-8');
|
||||||
|
|
||||||
|
const result = runTakt({
|
||||||
|
args: ['reset', 'config'],
|
||||||
|
cwd: repo.path,
|
||||||
|
env: isolatedEnv.env,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.exitCode).toBe(0);
|
||||||
|
const output = result.stdout;
|
||||||
|
expect(output).toMatch(/reset/i);
|
||||||
|
expect(output).toMatch(/backup:/i);
|
||||||
|
|
||||||
|
const config = readFileSync(configPath, 'utf-8');
|
||||||
|
expect(config).toContain('language: ja');
|
||||||
|
expect(config).toContain('branch_name_strategy: ai');
|
||||||
|
expect(config).toContain('concurrency: 2');
|
||||||
|
|
||||||
|
const backups = readdirSync(isolatedEnv.taktDir).filter((name) =>
|
||||||
|
/^config\.yaml\.\d{8}-\d{6}\.old(\.\d+)?$/.test(name),
|
||||||
|
);
|
||||||
|
expect(backups.length).toBe(1);
|
||||||
|
});
|
||||||
|
});
|
||||||
152
e2e/specs/config-priority.e2e.ts
Normal file
152
e2e/specs/config-priority.e2e.ts
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { dirname, join, resolve } from 'node:path';
|
||||||
|
import { fileURLToPath } from 'node:url';
|
||||||
|
import { mkdirSync, readFileSync, writeFileSync } from 'node:fs';
|
||||||
|
import { parse as parseYaml } from 'yaml';
|
||||||
|
import { createIsolatedEnv, updateIsolatedConfig, type IsolatedEnv } from '../helpers/isolated-env';
|
||||||
|
import { createTestRepo, type TestRepo } from '../helpers/test-repo';
|
||||||
|
import { runTakt } from '../helpers/takt-runner';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = dirname(__filename);
|
||||||
|
|
||||||
|
function readFirstTask(repoPath: string): Record<string, unknown> {
|
||||||
|
const tasksPath = join(repoPath, '.takt', 'tasks.yaml');
|
||||||
|
const raw = readFileSync(tasksPath, 'utf-8');
|
||||||
|
const parsed = parseYaml(raw) as { tasks?: Array<Record<string, unknown>> } | null;
|
||||||
|
const first = parsed?.tasks?.[0];
|
||||||
|
if (!first) {
|
||||||
|
throw new Error(`No task record found in ${tasksPath}`);
|
||||||
|
}
|
||||||
|
return first;
|
||||||
|
}
|
||||||
|
|
||||||
|
// E2E更新時は docs/testing/e2e.md も更新すること
|
||||||
|
describe('E2E: Config priority (piece / autoPr)', () => {
|
||||||
|
let isolatedEnv: IsolatedEnv;
|
||||||
|
let testRepo: TestRepo;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
isolatedEnv = createIsolatedEnv();
|
||||||
|
testRepo = createTestRepo();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
try {
|
||||||
|
testRepo.cleanup();
|
||||||
|
} catch {
|
||||||
|
// best-effort
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
isolatedEnv.cleanup();
|
||||||
|
} catch {
|
||||||
|
// best-effort
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use configured piece in pipeline when --piece is omitted', () => {
|
||||||
|
const configuredPiecePath = resolve(__dirname, '../fixtures/pieces/mock-single-step.yaml');
|
||||||
|
const scenarioPath = resolve(__dirname, '../fixtures/scenarios/execute-done.json');
|
||||||
|
const projectConfigDir = join(testRepo.path, '.takt');
|
||||||
|
mkdirSync(projectConfigDir, { recursive: true });
|
||||||
|
writeFileSync(
|
||||||
|
join(projectConfigDir, 'config.yaml'),
|
||||||
|
`piece: ${JSON.stringify(configuredPiecePath)}\n`,
|
||||||
|
'utf-8',
|
||||||
|
);
|
||||||
|
|
||||||
|
const result = runTakt({
|
||||||
|
args: [
|
||||||
|
'--pipeline',
|
||||||
|
'--task', 'Pipeline run should resolve piece from config',
|
||||||
|
'--skip-git',
|
||||||
|
'--provider', 'mock',
|
||||||
|
],
|
||||||
|
cwd: testRepo.path,
|
||||||
|
env: {
|
||||||
|
...isolatedEnv.env,
|
||||||
|
TAKT_MOCK_SCENARIO: scenarioPath,
|
||||||
|
},
|
||||||
|
timeout: 240_000,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.exitCode).toBe(0);
|
||||||
|
expect(result.stdout).toContain(`Running piece: ${configuredPiecePath}`);
|
||||||
|
expect(result.stdout).toContain(`Piece '${configuredPiecePath}' completed`);
|
||||||
|
}, 240_000);
|
||||||
|
|
||||||
|
it('should default auto_pr to true when unset in config/env', () => {
|
||||||
|
const piecePath = resolve(__dirname, '../fixtures/pieces/mock-single-step.yaml');
|
||||||
|
const scenarioPath = resolve(__dirname, '../fixtures/scenarios/execute-done.json');
|
||||||
|
|
||||||
|
const result = runTakt({
|
||||||
|
args: [
|
||||||
|
'--task', 'Auto PR default behavior',
|
||||||
|
'--piece', piecePath,
|
||||||
|
'--create-worktree', 'yes',
|
||||||
|
'--provider', 'mock',
|
||||||
|
],
|
||||||
|
cwd: testRepo.path,
|
||||||
|
env: {
|
||||||
|
...isolatedEnv.env,
|
||||||
|
TAKT_MOCK_SCENARIO: scenarioPath,
|
||||||
|
},
|
||||||
|
timeout: 240_000,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.exitCode).toBe(0);
|
||||||
|
const task = readFirstTask(testRepo.path);
|
||||||
|
expect(task['auto_pr']).toBe(true);
|
||||||
|
}, 240_000);
|
||||||
|
|
||||||
|
it('should use auto_pr from config when set', () => {
|
||||||
|
const piecePath = resolve(__dirname, '../fixtures/pieces/mock-single-step.yaml');
|
||||||
|
const scenarioPath = resolve(__dirname, '../fixtures/scenarios/execute-done.json');
|
||||||
|
updateIsolatedConfig(isolatedEnv.taktDir, { auto_pr: false });
|
||||||
|
|
||||||
|
const result = runTakt({
|
||||||
|
args: [
|
||||||
|
'--task', 'Auto PR from config',
|
||||||
|
'--piece', piecePath,
|
||||||
|
'--create-worktree', 'yes',
|
||||||
|
'--provider', 'mock',
|
||||||
|
],
|
||||||
|
cwd: testRepo.path,
|
||||||
|
env: {
|
||||||
|
...isolatedEnv.env,
|
||||||
|
TAKT_MOCK_SCENARIO: scenarioPath,
|
||||||
|
},
|
||||||
|
timeout: 240_000,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.exitCode).toBe(0);
|
||||||
|
const task = readFirstTask(testRepo.path);
|
||||||
|
expect(task['auto_pr']).toBe(false);
|
||||||
|
}, 240_000);
|
||||||
|
|
||||||
|
it('should prioritize env auto_pr over config', () => {
|
||||||
|
const piecePath = resolve(__dirname, '../fixtures/pieces/mock-single-step.yaml');
|
||||||
|
const scenarioPath = resolve(__dirname, '../fixtures/scenarios/execute-done.json');
|
||||||
|
updateIsolatedConfig(isolatedEnv.taktDir, { auto_pr: false });
|
||||||
|
|
||||||
|
const result = runTakt({
|
||||||
|
args: [
|
||||||
|
'--task', 'Auto PR from env override',
|
||||||
|
'--piece', piecePath,
|
||||||
|
'--create-worktree', 'yes',
|
||||||
|
'--provider', 'mock',
|
||||||
|
],
|
||||||
|
cwd: testRepo.path,
|
||||||
|
env: {
|
||||||
|
...isolatedEnv.env,
|
||||||
|
TAKT_AUTO_PR: 'true',
|
||||||
|
TAKT_MOCK_SCENARIO: scenarioPath,
|
||||||
|
},
|
||||||
|
timeout: 240_000,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.exitCode).toBe(0);
|
||||||
|
const task = readFirstTask(testRepo.path);
|
||||||
|
expect(task['auto_pr']).toBe(true);
|
||||||
|
}, 240_000);
|
||||||
|
});
|
||||||
325
e2e/specs/run-recovery.e2e.ts
Normal file
325
e2e/specs/run-recovery.e2e.ts
Normal file
@ -0,0 +1,325 @@
|
|||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { spawn, execFileSync } from 'node:child_process';
|
||||||
|
import { resolve, dirname, join } from 'node:path';
|
||||||
|
import { fileURLToPath } from 'node:url';
|
||||||
|
import {
|
||||||
|
mkdtempSync,
|
||||||
|
mkdirSync,
|
||||||
|
writeFileSync,
|
||||||
|
readFileSync,
|
||||||
|
rmSync,
|
||||||
|
existsSync,
|
||||||
|
readdirSync,
|
||||||
|
} from 'node:fs';
|
||||||
|
import { tmpdir } from 'node:os';
|
||||||
|
import { parse as parseYaml, stringify as stringifyYaml } from 'yaml';
|
||||||
|
import {
|
||||||
|
createIsolatedEnv,
|
||||||
|
updateIsolatedConfig,
|
||||||
|
type IsolatedEnv,
|
||||||
|
} from '../helpers/isolated-env';
|
||||||
|
import { runTakt } from '../helpers/takt-runner';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = dirname(__filename);
|
||||||
|
|
||||||
|
interface LocalRepo {
|
||||||
|
path: string;
|
||||||
|
cleanup: () => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface TaskRecord {
|
||||||
|
name: string;
|
||||||
|
status: 'pending' | 'running' | 'failed' | 'completed';
|
||||||
|
owner_pid?: number | null;
|
||||||
|
piece?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
function createLocalRepo(): LocalRepo {
|
||||||
|
const repoPath = mkdtempSync(join(tmpdir(), 'takt-e2e-run-recovery-'));
|
||||||
|
execFileSync('git', ['init'], { cwd: repoPath, stdio: 'pipe' });
|
||||||
|
execFileSync('git', ['config', 'user.email', 'test@example.com'], { cwd: repoPath, stdio: 'pipe' });
|
||||||
|
execFileSync('git', ['config', 'user.name', 'Test'], { cwd: repoPath, stdio: 'pipe' });
|
||||||
|
writeFileSync(join(repoPath, 'README.md'), '# test\n');
|
||||||
|
execFileSync('git', ['add', '.'], { cwd: repoPath, stdio: 'pipe' });
|
||||||
|
execFileSync('git', ['commit', '-m', 'init'], { cwd: repoPath, stdio: 'pipe' });
|
||||||
|
return {
|
||||||
|
path: repoPath,
|
||||||
|
cleanup: () => {
|
||||||
|
rmSync(repoPath, { recursive: true, force: true });
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function readTasks(tasksFile: string): TaskRecord[] {
|
||||||
|
const raw = readFileSync(tasksFile, 'utf-8');
|
||||||
|
const parsed = parseYaml(raw) as { tasks?: TaskRecord[] };
|
||||||
|
return parsed.tasks ?? [];
|
||||||
|
}
|
||||||
|
|
||||||
|
function waitFor(
|
||||||
|
predicate: () => boolean,
|
||||||
|
timeoutMs: number,
|
||||||
|
intervalMs: number,
|
||||||
|
): Promise<boolean> {
|
||||||
|
return new Promise((resolvePromise) => {
|
||||||
|
const startedAt = Date.now();
|
||||||
|
const timer = setInterval(() => {
|
||||||
|
if (predicate()) {
|
||||||
|
clearInterval(timer);
|
||||||
|
resolvePromise(true);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (Date.now() - startedAt >= timeoutMs) {
|
||||||
|
clearInterval(timer);
|
||||||
|
resolvePromise(false);
|
||||||
|
}
|
||||||
|
}, intervalMs);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function createPendingTasksYaml(
|
||||||
|
count: number,
|
||||||
|
piecePath: string,
|
||||||
|
prefix: string,
|
||||||
|
): string {
|
||||||
|
const now = new Date().toISOString();
|
||||||
|
const tasks = Array.from({ length: count }, (_, index) => ({
|
||||||
|
name: `${prefix}-${String(index + 1)}`,
|
||||||
|
status: 'pending' as const,
|
||||||
|
content: `${prefix} task ${String(index + 1)}`,
|
||||||
|
piece: piecePath,
|
||||||
|
created_at: now,
|
||||||
|
started_at: null,
|
||||||
|
completed_at: null,
|
||||||
|
owner_pid: null,
|
||||||
|
}));
|
||||||
|
return stringifyYaml({ tasks });
|
||||||
|
}
|
||||||
|
|
||||||
|
function createEnvWithoutGlobalConfig(): {
|
||||||
|
env: NodeJS.ProcessEnv;
|
||||||
|
cleanup: () => void;
|
||||||
|
globalConfigPath: string;
|
||||||
|
} {
|
||||||
|
const baseDir = mkdtempSync(join(tmpdir(), 'takt-e2e-init-flow-'));
|
||||||
|
const globalConfigDir = join(baseDir, '.takt-global');
|
||||||
|
const globalGitConfigPath = join(baseDir, '.gitconfig');
|
||||||
|
const globalConfigPath = join(globalConfigDir, 'config.yaml');
|
||||||
|
|
||||||
|
writeFileSync(
|
||||||
|
globalGitConfigPath,
|
||||||
|
['[user]', ' name = TAKT E2E Test', ' email = e2e@example.com'].join('\n'),
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
env: {
|
||||||
|
...process.env,
|
||||||
|
TAKT_CONFIG_DIR: globalConfigDir,
|
||||||
|
GIT_CONFIG_GLOBAL: globalGitConfigPath,
|
||||||
|
TAKT_NO_TTY: '1',
|
||||||
|
},
|
||||||
|
globalConfigPath,
|
||||||
|
cleanup: () => {
|
||||||
|
rmSync(baseDir, { recursive: true, force: true });
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// E2E更新時は docs/testing/e2e.md も更新すること
|
||||||
|
describe('E2E: Run interrupted task recovery and high-priority run flows', () => {
|
||||||
|
let isolatedEnv: IsolatedEnv;
|
||||||
|
let repo: LocalRepo;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
isolatedEnv = createIsolatedEnv();
|
||||||
|
repo = createLocalRepo();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
repo.cleanup();
|
||||||
|
isolatedEnv.cleanup();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should recover stale running task generated by forced process termination', async () => {
|
||||||
|
// Given: 2 pending tasks exist, then first run is force-killed while task is running
|
||||||
|
updateIsolatedConfig(isolatedEnv.taktDir, {
|
||||||
|
provider: 'mock',
|
||||||
|
model: 'mock-model',
|
||||||
|
concurrency: 1,
|
||||||
|
task_poll_interval_ms: 50,
|
||||||
|
});
|
||||||
|
|
||||||
|
const piecePath = resolve(__dirname, '../fixtures/pieces/mock-slow-multi-step.yaml');
|
||||||
|
const scenarioPath = resolve(__dirname, '../fixtures/scenarios/run-sigint-parallel.json');
|
||||||
|
const tasksFile = join(repo.path, '.takt', 'tasks.yaml');
|
||||||
|
|
||||||
|
mkdirSync(join(repo.path, '.takt'), { recursive: true });
|
||||||
|
writeFileSync(tasksFile, createPendingTasksYaml(2, piecePath, 'recovery-target'), 'utf-8');
|
||||||
|
|
||||||
|
const binPath = resolve(__dirname, '../../bin/takt');
|
||||||
|
const child = spawn('node', [binPath, 'run', '--provider', 'mock'], {
|
||||||
|
cwd: repo.path,
|
||||||
|
env: {
|
||||||
|
...isolatedEnv.env,
|
||||||
|
TAKT_MOCK_SCENARIO: scenarioPath,
|
||||||
|
},
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
|
});
|
||||||
|
|
||||||
|
let firstStdout = '';
|
||||||
|
let firstStderr = '';
|
||||||
|
child.stdout?.on('data', (chunk) => {
|
||||||
|
firstStdout += chunk.toString();
|
||||||
|
});
|
||||||
|
child.stderr?.on('data', (chunk) => {
|
||||||
|
firstStderr += chunk.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
const runningObserved = await waitFor(() => {
|
||||||
|
if (!existsSync(tasksFile)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
const tasks = readTasks(tasksFile);
|
||||||
|
return tasks.some((task) => task.status === 'running');
|
||||||
|
}, 30_000, 20);
|
||||||
|
|
||||||
|
expect(runningObserved, `stdout:\n${firstStdout}\n\nstderr:\n${firstStderr}`).toBe(true);
|
||||||
|
|
||||||
|
child.kill('SIGKILL');
|
||||||
|
|
||||||
|
await new Promise<void>((resolvePromise) => {
|
||||||
|
child.once('close', () => {
|
||||||
|
resolvePromise();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
const staleTasks = readTasks(tasksFile);
|
||||||
|
const runningTask = staleTasks.find((task) => task.status === 'running');
|
||||||
|
expect(runningTask).toBeDefined();
|
||||||
|
expect(runningTask?.owner_pid).toBeTypeOf('number');
|
||||||
|
|
||||||
|
// When: run is executed again
|
||||||
|
const rerunResult = runTakt({
|
||||||
|
args: ['run', '--provider', 'mock'],
|
||||||
|
cwd: repo.path,
|
||||||
|
env: {
|
||||||
|
...isolatedEnv.env,
|
||||||
|
TAKT_MOCK_SCENARIO: scenarioPath,
|
||||||
|
},
|
||||||
|
timeout: 240_000,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Then: stale running task is recovered and all tasks complete
|
||||||
|
expect(rerunResult.exitCode).toBe(0);
|
||||||
|
const combined = rerunResult.stdout + rerunResult.stderr;
|
||||||
|
expect(combined).toContain('Recovered 1 interrupted running task(s) to pending.');
|
||||||
|
expect(combined).toContain('recovery-target-1');
|
||||||
|
expect(combined).toContain('recovery-target-2');
|
||||||
|
|
||||||
|
const finalTasks = readTasks(tasksFile);
|
||||||
|
expect(finalTasks).toEqual([]);
|
||||||
|
}, 240_000);
|
||||||
|
|
||||||
|
it('should process high-concurrency batch without leaving inconsistent task state', () => {
|
||||||
|
// Given: 12 pending tasks with concurrency=10
|
||||||
|
updateIsolatedConfig(isolatedEnv.taktDir, {
|
||||||
|
provider: 'mock',
|
||||||
|
model: 'mock-model',
|
||||||
|
concurrency: 10,
|
||||||
|
task_poll_interval_ms: 50,
|
||||||
|
});
|
||||||
|
|
||||||
|
const piecePath = resolve(__dirname, '../fixtures/pieces/mock-single-step.yaml');
|
||||||
|
const scenarioPath = resolve(__dirname, '../fixtures/scenarios/execute-done.json');
|
||||||
|
const tasksFile = join(repo.path, '.takt', 'tasks.yaml');
|
||||||
|
|
||||||
|
mkdirSync(join(repo.path, '.takt'), { recursive: true });
|
||||||
|
writeFileSync(tasksFile, createPendingTasksYaml(12, piecePath, 'parallel-load'), 'utf-8');
|
||||||
|
|
||||||
|
// When: run all tasks
|
||||||
|
const result = runTakt({
|
||||||
|
args: ['run', '--provider', 'mock'],
|
||||||
|
cwd: repo.path,
|
||||||
|
env: {
|
||||||
|
...isolatedEnv.env,
|
||||||
|
TAKT_MOCK_SCENARIO: scenarioPath,
|
||||||
|
},
|
||||||
|
timeout: 240_000,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Then: all tasks complete and queue becomes empty
|
||||||
|
expect(result.exitCode).toBe(0);
|
||||||
|
expect(result.stdout).toContain('Concurrency: 10');
|
||||||
|
expect(result.stdout).toContain('Tasks Summary');
|
||||||
|
const finalTasks = readTasks(tasksFile);
|
||||||
|
expect(finalTasks).toEqual([]);
|
||||||
|
}, 240_000);
|
||||||
|
|
||||||
|
it('should initialize project dirs and execute tasks after add+run when global config is absent', () => {
|
||||||
|
const envWithoutConfig = createEnvWithoutGlobalConfig();
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Given: global config.yaml is absent and project config points to a mock piece path
|
||||||
|
const piecePath = resolve(__dirname, '../fixtures/pieces/mock-single-step.yaml');
|
||||||
|
const scenarioPath = resolve(__dirname, '../fixtures/scenarios/execute-done.json');
|
||||||
|
const projectConfigDir = join(repo.path, '.takt');
|
||||||
|
const projectConfigPath = join(projectConfigDir, 'config.yaml');
|
||||||
|
mkdirSync(projectConfigDir, { recursive: true });
|
||||||
|
writeFileSync(projectConfigPath, `piece: ${piecePath}\npermissionMode: default\n`, 'utf-8');
|
||||||
|
|
||||||
|
expect(existsSync(envWithoutConfig.globalConfigPath)).toBe(false);
|
||||||
|
|
||||||
|
// When: add 2 tasks and run once
|
||||||
|
const addResult1 = runTakt({
|
||||||
|
args: ['--provider', 'mock', 'add', 'Initialize flow task 1'],
|
||||||
|
cwd: repo.path,
|
||||||
|
env: {
|
||||||
|
...envWithoutConfig.env,
|
||||||
|
TAKT_MOCK_SCENARIO: scenarioPath,
|
||||||
|
},
|
||||||
|
timeout: 240_000,
|
||||||
|
});
|
||||||
|
|
||||||
|
const addResult2 = runTakt({
|
||||||
|
args: ['--provider', 'mock', 'add', 'Initialize flow task 2'],
|
||||||
|
cwd: repo.path,
|
||||||
|
env: {
|
||||||
|
...envWithoutConfig.env,
|
||||||
|
TAKT_MOCK_SCENARIO: scenarioPath,
|
||||||
|
},
|
||||||
|
timeout: 240_000,
|
||||||
|
});
|
||||||
|
|
||||||
|
const runResult = runTakt({
|
||||||
|
args: ['--provider', 'mock', 'run'],
|
||||||
|
cwd: repo.path,
|
||||||
|
env: {
|
||||||
|
...envWithoutConfig.env,
|
||||||
|
TAKT_MOCK_SCENARIO: scenarioPath,
|
||||||
|
},
|
||||||
|
timeout: 240_000,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Then: tasks are persisted/executed correctly and project init artifacts exist
|
||||||
|
expect(addResult1.exitCode).toBe(0);
|
||||||
|
expect(addResult2.exitCode).toBe(0);
|
||||||
|
expect(runResult.exitCode).toBe(0);
|
||||||
|
|
||||||
|
const tasksFile = join(repo.path, '.takt', 'tasks.yaml');
|
||||||
|
const parsedFinal = parseYaml(readFileSync(tasksFile, 'utf-8')) as { tasks?: TaskRecord[] };
|
||||||
|
expect(parsedFinal.tasks).toEqual([]);
|
||||||
|
|
||||||
|
const taskDirsRoot = join(repo.path, '.takt', 'tasks');
|
||||||
|
const taskDirs = readdirSync(taskDirsRoot, { withFileTypes: true })
|
||||||
|
.filter((entry) => entry.isDirectory())
|
||||||
|
.map((entry) => entry.name);
|
||||||
|
expect(taskDirs.length).toBe(2);
|
||||||
|
|
||||||
|
expect(existsSync(join(projectConfigDir, '.gitignore'))).toBe(true);
|
||||||
|
expect(existsSync(envWithoutConfig.globalConfigPath)).toBe(false);
|
||||||
|
} finally {
|
||||||
|
envWithoutConfig.cleanup();
|
||||||
|
}
|
||||||
|
}, 240_000);
|
||||||
|
});
|
||||||
167
package-lock.json
generated
167
package-lock.json
generated
@ -1,15 +1,15 @@
|
|||||||
{
|
{
|
||||||
"name": "takt",
|
"name": "takt",
|
||||||
"version": "0.19.0",
|
"version": "0.20.0",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "takt",
|
"name": "takt",
|
||||||
"version": "0.19.0",
|
"version": "0.20.0",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@anthropic-ai/claude-agent-sdk": "^0.2.37",
|
"@anthropic-ai/claude-agent-sdk": "^0.2.47",
|
||||||
"@openai/codex-sdk": "^0.103.0",
|
"@openai/codex-sdk": "^0.103.0",
|
||||||
"@opencode-ai/sdk": "^1.1.53",
|
"@opencode-ai/sdk": "^1.1.53",
|
||||||
"chalk": "^5.3.0",
|
"chalk": "^5.3.0",
|
||||||
@ -40,22 +40,23 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@anthropic-ai/claude-agent-sdk": {
|
"node_modules/@anthropic-ai/claude-agent-sdk": {
|
||||||
"version": "0.2.37",
|
"version": "0.2.47",
|
||||||
"resolved": "https://registry.npmjs.org/@anthropic-ai/claude-agent-sdk/-/claude-agent-sdk-0.2.37.tgz",
|
"resolved": "https://registry.npmjs.org/@anthropic-ai/claude-agent-sdk/-/claude-agent-sdk-0.2.47.tgz",
|
||||||
"integrity": "sha512-0TCAUuGXiWYV2JK+j2SiakGzPA7aoR5DNRxZ0EA571loGIqN3FRfiO1kipeBpEc+cRQ03a/4Kt5YAjMx0KBW+A==",
|
"integrity": "sha512-tcptBQwLnaUv6f5KiiUUtGduiLUhwV/xT0kPxVG+K2Wws1T/2MLViwIoti3AkJuNJ2qZ5FOwl1YQLHPMeHlYVQ==",
|
||||||
"license": "SEE LICENSE IN README.md",
|
"license": "SEE LICENSE IN README.md",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=18.0.0"
|
"node": ">=18.0.0"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@img/sharp-darwin-arm64": "^0.33.5",
|
"@img/sharp-darwin-arm64": "^0.34.2",
|
||||||
"@img/sharp-darwin-x64": "^0.33.5",
|
"@img/sharp-darwin-x64": "^0.34.2",
|
||||||
"@img/sharp-linux-arm": "^0.33.5",
|
"@img/sharp-linux-arm": "^0.34.2",
|
||||||
"@img/sharp-linux-arm64": "^0.33.5",
|
"@img/sharp-linux-arm64": "^0.34.2",
|
||||||
"@img/sharp-linux-x64": "^0.33.5",
|
"@img/sharp-linux-x64": "^0.34.2",
|
||||||
"@img/sharp-linuxmusl-arm64": "^0.33.5",
|
"@img/sharp-linuxmusl-arm64": "^0.34.2",
|
||||||
"@img/sharp-linuxmusl-x64": "^0.33.5",
|
"@img/sharp-linuxmusl-x64": "^0.34.2",
|
||||||
"@img/sharp-win32-x64": "^0.33.5"
|
"@img/sharp-win32-arm64": "^0.34.2",
|
||||||
|
"@img/sharp-win32-x64": "^0.34.2"
|
||||||
},
|
},
|
||||||
"peerDependencies": {
|
"peerDependencies": {
|
||||||
"zod": "^4.0.0"
|
"zod": "^4.0.0"
|
||||||
@ -653,12 +654,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-darwin-arm64": {
|
"node_modules/@img/sharp-darwin-arm64": {
|
||||||
"version": "0.33.5",
|
"version": "0.34.5",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.33.5.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.5.tgz",
|
||||||
"integrity": "sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==",
|
"integrity": "sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
|
"license": "Apache-2.0",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"darwin"
|
"darwin"
|
||||||
@ -670,16 +672,17 @@
|
|||||||
"url": "https://opencollective.com/libvips"
|
"url": "https://opencollective.com/libvips"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@img/sharp-libvips-darwin-arm64": "1.0.4"
|
"@img/sharp-libvips-darwin-arm64": "1.2.4"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-darwin-x64": {
|
"node_modules/@img/sharp-darwin-x64": {
|
||||||
"version": "0.33.5",
|
"version": "0.34.5",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.33.5.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.5.tgz",
|
||||||
"integrity": "sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q==",
|
"integrity": "sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
|
"license": "Apache-2.0",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"darwin"
|
"darwin"
|
||||||
@ -691,16 +694,17 @@
|
|||||||
"url": "https://opencollective.com/libvips"
|
"url": "https://opencollective.com/libvips"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@img/sharp-libvips-darwin-x64": "1.0.4"
|
"@img/sharp-libvips-darwin-x64": "1.2.4"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-libvips-darwin-arm64": {
|
"node_modules/@img/sharp-libvips-darwin-arm64": {
|
||||||
"version": "1.0.4",
|
"version": "1.2.4",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.0.4.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.2.4.tgz",
|
||||||
"integrity": "sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg==",
|
"integrity": "sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
|
"license": "LGPL-3.0-or-later",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"darwin"
|
"darwin"
|
||||||
@ -710,12 +714,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-libvips-darwin-x64": {
|
"node_modules/@img/sharp-libvips-darwin-x64": {
|
||||||
"version": "1.0.4",
|
"version": "1.2.4",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.0.4.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.2.4.tgz",
|
||||||
"integrity": "sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ==",
|
"integrity": "sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
|
"license": "LGPL-3.0-or-later",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"darwin"
|
"darwin"
|
||||||
@ -725,12 +730,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-libvips-linux-arm": {
|
"node_modules/@img/sharp-libvips-linux-arm": {
|
||||||
"version": "1.0.5",
|
"version": "1.2.4",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.0.5.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.2.4.tgz",
|
||||||
"integrity": "sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g==",
|
"integrity": "sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm"
|
"arm"
|
||||||
],
|
],
|
||||||
|
"license": "LGPL-3.0-or-later",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"linux"
|
"linux"
|
||||||
@ -740,12 +746,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-libvips-linux-arm64": {
|
"node_modules/@img/sharp-libvips-linux-arm64": {
|
||||||
"version": "1.0.4",
|
"version": "1.2.4",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.0.4.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.2.4.tgz",
|
||||||
"integrity": "sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA==",
|
"integrity": "sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
|
"license": "LGPL-3.0-or-later",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"linux"
|
"linux"
|
||||||
@ -755,12 +762,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-libvips-linux-x64": {
|
"node_modules/@img/sharp-libvips-linux-x64": {
|
||||||
"version": "1.0.4",
|
"version": "1.2.4",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.0.4.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.2.4.tgz",
|
||||||
"integrity": "sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw==",
|
"integrity": "sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
|
"license": "LGPL-3.0-or-later",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"linux"
|
"linux"
|
||||||
@ -770,12 +778,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-libvips-linuxmusl-arm64": {
|
"node_modules/@img/sharp-libvips-linuxmusl-arm64": {
|
||||||
"version": "1.0.4",
|
"version": "1.2.4",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.0.4.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.2.4.tgz",
|
||||||
"integrity": "sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA==",
|
"integrity": "sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
|
"license": "LGPL-3.0-or-later",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"linux"
|
"linux"
|
||||||
@ -785,12 +794,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-libvips-linuxmusl-x64": {
|
"node_modules/@img/sharp-libvips-linuxmusl-x64": {
|
||||||
"version": "1.0.4",
|
"version": "1.2.4",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.0.4.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.2.4.tgz",
|
||||||
"integrity": "sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw==",
|
"integrity": "sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
|
"license": "LGPL-3.0-or-later",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"linux"
|
"linux"
|
||||||
@ -800,12 +810,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-linux-arm": {
|
"node_modules/@img/sharp-linux-arm": {
|
||||||
"version": "0.33.5",
|
"version": "0.34.5",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.33.5.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.5.tgz",
|
||||||
"integrity": "sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ==",
|
"integrity": "sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm"
|
"arm"
|
||||||
],
|
],
|
||||||
|
"license": "Apache-2.0",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"linux"
|
"linux"
|
||||||
@ -817,16 +828,17 @@
|
|||||||
"url": "https://opencollective.com/libvips"
|
"url": "https://opencollective.com/libvips"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@img/sharp-libvips-linux-arm": "1.0.5"
|
"@img/sharp-libvips-linux-arm": "1.2.4"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-linux-arm64": {
|
"node_modules/@img/sharp-linux-arm64": {
|
||||||
"version": "0.33.5",
|
"version": "0.34.5",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.33.5.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.5.tgz",
|
||||||
"integrity": "sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA==",
|
"integrity": "sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
|
"license": "Apache-2.0",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"linux"
|
"linux"
|
||||||
@ -838,16 +850,17 @@
|
|||||||
"url": "https://opencollective.com/libvips"
|
"url": "https://opencollective.com/libvips"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@img/sharp-libvips-linux-arm64": "1.0.4"
|
"@img/sharp-libvips-linux-arm64": "1.2.4"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-linux-x64": {
|
"node_modules/@img/sharp-linux-x64": {
|
||||||
"version": "0.33.5",
|
"version": "0.34.5",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.33.5.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.5.tgz",
|
||||||
"integrity": "sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA==",
|
"integrity": "sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
|
"license": "Apache-2.0",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"linux"
|
"linux"
|
||||||
@ -859,16 +872,17 @@
|
|||||||
"url": "https://opencollective.com/libvips"
|
"url": "https://opencollective.com/libvips"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@img/sharp-libvips-linux-x64": "1.0.4"
|
"@img/sharp-libvips-linux-x64": "1.2.4"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-linuxmusl-arm64": {
|
"node_modules/@img/sharp-linuxmusl-arm64": {
|
||||||
"version": "0.33.5",
|
"version": "0.34.5",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.33.5.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.5.tgz",
|
||||||
"integrity": "sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g==",
|
"integrity": "sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
|
"license": "Apache-2.0",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"linux"
|
"linux"
|
||||||
@ -880,16 +894,17 @@
|
|||||||
"url": "https://opencollective.com/libvips"
|
"url": "https://opencollective.com/libvips"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@img/sharp-libvips-linuxmusl-arm64": "1.0.4"
|
"@img/sharp-libvips-linuxmusl-arm64": "1.2.4"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-linuxmusl-x64": {
|
"node_modules/@img/sharp-linuxmusl-x64": {
|
||||||
"version": "0.33.5",
|
"version": "0.34.5",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.33.5.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.5.tgz",
|
||||||
"integrity": "sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw==",
|
"integrity": "sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
|
"license": "Apache-2.0",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"linux"
|
"linux"
|
||||||
@ -901,16 +916,36 @@
|
|||||||
"url": "https://opencollective.com/libvips"
|
"url": "https://opencollective.com/libvips"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@img/sharp-libvips-linuxmusl-x64": "1.0.4"
|
"@img/sharp-libvips-linuxmusl-x64": "1.2.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@img/sharp-win32-arm64": {
|
||||||
|
"version": "0.34.5",
|
||||||
|
"resolved": "https://registry.npmjs.org/@img/sharp-win32-arm64/-/sharp-win32-arm64-0.34.5.tgz",
|
||||||
|
"integrity": "sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"license": "Apache-2.0 AND LGPL-3.0-or-later",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"win32"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://opencollective.com/libvips"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@img/sharp-win32-x64": {
|
"node_modules/@img/sharp-win32-x64": {
|
||||||
"version": "0.33.5",
|
"version": "0.34.5",
|
||||||
"resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.33.5.tgz",
|
"resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.5.tgz",
|
||||||
"integrity": "sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg==",
|
"integrity": "sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
|
"license": "Apache-2.0 AND LGPL-3.0-or-later",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"os": [
|
"os": [
|
||||||
"win32"
|
"win32"
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "takt",
|
"name": "takt",
|
||||||
"version": "0.19.0",
|
"version": "0.20.0",
|
||||||
"description": "TAKT: TAKT Agent Koordination Topology - AI Agent Piece Orchestration",
|
"description": "TAKT: TAKT Agent Koordination Topology - AI Agent Piece Orchestration",
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
"types": "dist/index.d.ts",
|
"types": "dist/index.d.ts",
|
||||||
@ -60,7 +60,7 @@
|
|||||||
"builtins/"
|
"builtins/"
|
||||||
],
|
],
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@anthropic-ai/claude-agent-sdk": "^0.2.37",
|
"@anthropic-ai/claude-agent-sdk": "^0.2.47",
|
||||||
"@openai/codex-sdk": "^0.103.0",
|
"@openai/codex-sdk": "^0.103.0",
|
||||||
"@opencode-ai/sdk": "^1.1.53",
|
"@opencode-ai/sdk": "^1.1.53",
|
||||||
"chalk": "^5.3.0",
|
"chalk": "^5.3.0",
|
||||||
|
|||||||
111
src/__tests__/analytics-cli-commands.test.ts
Normal file
111
src/__tests__/analytics-cli-commands.test.ts
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
/**
|
||||||
|
* Tests for analytics CLI command logic — metrics review and purge.
|
||||||
|
*
|
||||||
|
* Tests the command action logic by calling the underlying functions
|
||||||
|
* with appropriate parameters, verifying the integration between
|
||||||
|
* config loading, eventsDir resolution, and the analytics functions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { mkdirSync, rmSync, writeFileSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { tmpdir } from 'node:os';
|
||||||
|
import {
|
||||||
|
computeReviewMetrics,
|
||||||
|
formatReviewMetrics,
|
||||||
|
parseSinceDuration,
|
||||||
|
purgeOldEvents,
|
||||||
|
} from '../features/analytics/index.js';
|
||||||
|
import type { ReviewFindingEvent } from '../features/analytics/index.js';
|
||||||
|
|
||||||
|
describe('metrics review command logic', () => {
|
||||||
|
let eventsDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
eventsDir = join(tmpdir(), `takt-test-cli-metrics-${Date.now()}`);
|
||||||
|
mkdirSync(eventsDir, { recursive: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
rmSync(eventsDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should compute and format metrics from resolved eventsDir', () => {
|
||||||
|
const events: ReviewFindingEvent[] = [
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-001', status: 'new', ruleId: 'r-1',
|
||||||
|
severity: 'error', decision: 'reject', file: 'a.ts', line: 1, iteration: 1,
|
||||||
|
runId: 'r', timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
writeFileSync(
|
||||||
|
join(eventsDir, '2026-02-18.jsonl'),
|
||||||
|
events.map((e) => JSON.stringify(e)).join('\n') + '\n',
|
||||||
|
'utf-8',
|
||||||
|
);
|
||||||
|
|
||||||
|
const durationMs = parseSinceDuration('30d');
|
||||||
|
const sinceMs = new Date('2026-02-18T00:00:00Z').getTime();
|
||||||
|
const result = computeReviewMetrics(eventsDir, sinceMs);
|
||||||
|
const output = formatReviewMetrics(result);
|
||||||
|
|
||||||
|
expect(output).toContain('Review Metrics');
|
||||||
|
expect(result.rejectCountsByRule.get('r-1')).toBe(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should parse since duration and compute correct time window', () => {
|
||||||
|
const durationMs = parseSinceDuration('7d');
|
||||||
|
const now = new Date('2026-02-18T12:00:00Z').getTime();
|
||||||
|
const sinceMs = now - durationMs;
|
||||||
|
|
||||||
|
expect(sinceMs).toBe(new Date('2026-02-11T12:00:00Z').getTime());
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('purge command logic', () => {
|
||||||
|
let eventsDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
eventsDir = join(tmpdir(), `takt-test-cli-purge-${Date.now()}`);
|
||||||
|
mkdirSync(eventsDir, { recursive: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
rmSync(eventsDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should purge files using eventsDir from config and retentionDays from config', () => {
|
||||||
|
writeFileSync(join(eventsDir, '2025-12-01.jsonl'), '{}', 'utf-8');
|
||||||
|
writeFileSync(join(eventsDir, '2026-02-18.jsonl'), '{}', 'utf-8');
|
||||||
|
|
||||||
|
const retentionDays = 30;
|
||||||
|
const deleted = purgeOldEvents(eventsDir, retentionDays, new Date('2026-02-18T12:00:00Z'));
|
||||||
|
|
||||||
|
expect(deleted).toContain('2025-12-01.jsonl');
|
||||||
|
expect(deleted).not.toContain('2026-02-18.jsonl');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should fallback to CLI retentionDays when config has no retentionDays', () => {
|
||||||
|
writeFileSync(join(eventsDir, '2025-01-01.jsonl'), '{}', 'utf-8');
|
||||||
|
|
||||||
|
const cliRetentionDays = parseInt('30', 10);
|
||||||
|
const configRetentionDays = undefined;
|
||||||
|
const retentionDays = configRetentionDays ?? cliRetentionDays;
|
||||||
|
const deleted = purgeOldEvents(eventsDir, retentionDays, new Date('2026-02-18T12:00:00Z'));
|
||||||
|
|
||||||
|
expect(deleted).toContain('2025-01-01.jsonl');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use config retentionDays when specified', () => {
|
||||||
|
writeFileSync(join(eventsDir, '2026-02-10.jsonl'), '{}', 'utf-8');
|
||||||
|
writeFileSync(join(eventsDir, '2026-02-18.jsonl'), '{}', 'utf-8');
|
||||||
|
|
||||||
|
const cliRetentionDays = parseInt('30', 10);
|
||||||
|
const configRetentionDays = 5;
|
||||||
|
const retentionDays = configRetentionDays ?? cliRetentionDays;
|
||||||
|
const deleted = purgeOldEvents(eventsDir, retentionDays, new Date('2026-02-18T12:00:00Z'));
|
||||||
|
|
||||||
|
expect(deleted).toContain('2026-02-10.jsonl');
|
||||||
|
expect(deleted).not.toContain('2026-02-18.jsonl');
|
||||||
|
});
|
||||||
|
});
|
||||||
132
src/__tests__/analytics-events.test.ts
Normal file
132
src/__tests__/analytics-events.test.ts
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
/**
|
||||||
|
* Tests for analytics event type definitions.
|
||||||
|
*
|
||||||
|
* Validates that event objects conform to the expected shape.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import type {
|
||||||
|
ReviewFindingEvent,
|
||||||
|
FixActionEvent,
|
||||||
|
MovementResultEvent,
|
||||||
|
AnalyticsEvent,
|
||||||
|
} from '../features/analytics/index.js';
|
||||||
|
|
||||||
|
describe('analytics event types', () => {
|
||||||
|
it('should create a valid ReviewFindingEvent', () => {
|
||||||
|
const event: ReviewFindingEvent = {
|
||||||
|
type: 'review_finding',
|
||||||
|
findingId: 'f-001',
|
||||||
|
status: 'new',
|
||||||
|
ruleId: 'no-console-log',
|
||||||
|
severity: 'warning',
|
||||||
|
decision: 'reject',
|
||||||
|
file: 'src/main.ts',
|
||||||
|
line: 42,
|
||||||
|
iteration: 1,
|
||||||
|
runId: 'run-abc',
|
||||||
|
timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(event.type).toBe('review_finding');
|
||||||
|
expect(event.findingId).toBe('f-001');
|
||||||
|
expect(event.status).toBe('new');
|
||||||
|
expect(event.severity).toBe('warning');
|
||||||
|
expect(event.decision).toBe('reject');
|
||||||
|
expect(event.file).toBe('src/main.ts');
|
||||||
|
expect(event.line).toBe(42);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create a valid FixActionEvent with fixed action', () => {
|
||||||
|
const event: FixActionEvent = {
|
||||||
|
type: 'fix_action',
|
||||||
|
findingId: 'f-001',
|
||||||
|
action: 'fixed',
|
||||||
|
iteration: 2,
|
||||||
|
runId: 'run-abc',
|
||||||
|
timestamp: '2026-02-18T10:01:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(event.type).toBe('fix_action');
|
||||||
|
expect(event.action).toBe('fixed');
|
||||||
|
expect(event.findingId).toBe('f-001');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create a valid FixActionEvent with rebutted action', () => {
|
||||||
|
const event: FixActionEvent = {
|
||||||
|
type: 'fix_action',
|
||||||
|
findingId: 'f-002',
|
||||||
|
action: 'rebutted',
|
||||||
|
iteration: 3,
|
||||||
|
runId: 'run-abc',
|
||||||
|
timestamp: '2026-02-18T10:02:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(event.type).toBe('fix_action');
|
||||||
|
expect(event.action).toBe('rebutted');
|
||||||
|
expect(event.findingId).toBe('f-002');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create a valid MovementResultEvent', () => {
|
||||||
|
const event: MovementResultEvent = {
|
||||||
|
type: 'movement_result',
|
||||||
|
movement: 'implement',
|
||||||
|
provider: 'claude',
|
||||||
|
model: 'sonnet',
|
||||||
|
decisionTag: 'approved',
|
||||||
|
iteration: 3,
|
||||||
|
runId: 'run-abc',
|
||||||
|
timestamp: '2026-02-18T10:02:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(event.type).toBe('movement_result');
|
||||||
|
expect(event.movement).toBe('implement');
|
||||||
|
expect(event.provider).toBe('claude');
|
||||||
|
expect(event.decisionTag).toBe('approved');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should discriminate event types via the type field', () => {
|
||||||
|
const events: AnalyticsEvent[] = [
|
||||||
|
{
|
||||||
|
type: 'review_finding',
|
||||||
|
findingId: 'f-001',
|
||||||
|
status: 'new',
|
||||||
|
ruleId: 'r-1',
|
||||||
|
severity: 'error',
|
||||||
|
decision: 'reject',
|
||||||
|
file: 'a.ts',
|
||||||
|
line: 1,
|
||||||
|
iteration: 1,
|
||||||
|
runId: 'r',
|
||||||
|
timestamp: '2026-01-01T00:00:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'fix_action',
|
||||||
|
findingId: 'f-001',
|
||||||
|
action: 'fixed',
|
||||||
|
iteration: 2,
|
||||||
|
runId: 'r',
|
||||||
|
timestamp: '2026-01-01T00:01:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'movement_result',
|
||||||
|
movement: 'plan',
|
||||||
|
provider: 'claude',
|
||||||
|
model: 'opus',
|
||||||
|
decisionTag: 'done',
|
||||||
|
iteration: 1,
|
||||||
|
runId: 'r',
|
||||||
|
timestamp: '2026-01-01T00:02:00.000Z',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const reviewEvents = events.filter((e) => e.type === 'review_finding');
|
||||||
|
expect(reviewEvents).toHaveLength(1);
|
||||||
|
|
||||||
|
const fixEvents = events.filter((e) => e.type === 'fix_action');
|
||||||
|
expect(fixEvents).toHaveLength(1);
|
||||||
|
|
||||||
|
const movementEvents = events.filter((e) => e.type === 'movement_result');
|
||||||
|
expect(movementEvents).toHaveLength(1);
|
||||||
|
});
|
||||||
|
});
|
||||||
344
src/__tests__/analytics-metrics.test.ts
Normal file
344
src/__tests__/analytics-metrics.test.ts
Normal file
@ -0,0 +1,344 @@
|
|||||||
|
/**
|
||||||
|
* Tests for analytics metrics computation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { mkdirSync, rmSync, writeFileSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { tmpdir } from 'node:os';
|
||||||
|
import {
|
||||||
|
computeReviewMetrics,
|
||||||
|
formatReviewMetrics,
|
||||||
|
parseSinceDuration,
|
||||||
|
} from '../features/analytics/index.js';
|
||||||
|
import type {
|
||||||
|
ReviewFindingEvent,
|
||||||
|
FixActionEvent,
|
||||||
|
MovementResultEvent,
|
||||||
|
} from '../features/analytics/index.js';
|
||||||
|
|
||||||
|
describe('analytics metrics', () => {
|
||||||
|
let eventsDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
eventsDir = join(tmpdir(), `takt-test-analytics-metrics-${Date.now()}`);
|
||||||
|
mkdirSync(eventsDir, { recursive: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
rmSync(eventsDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
function writeEvents(date: string, events: Array<ReviewFindingEvent | FixActionEvent | MovementResultEvent>): void {
|
||||||
|
const lines = events.map((e) => JSON.stringify(e)).join('\n') + '\n';
|
||||||
|
writeFileSync(join(eventsDir, `${date}.jsonl`), lines, 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('computeReviewMetrics', () => {
|
||||||
|
it('should return empty metrics when no events exist', () => {
|
||||||
|
const sinceMs = new Date('2026-01-01T00:00:00Z').getTime();
|
||||||
|
const metrics = computeReviewMetrics(eventsDir, sinceMs);
|
||||||
|
|
||||||
|
expect(metrics.reReportCounts.size).toBe(0);
|
||||||
|
expect(metrics.roundTripRatio).toBe(0);
|
||||||
|
expect(metrics.averageResolutionIterations).toBe(0);
|
||||||
|
expect(metrics.rejectCountsByRule.size).toBe(0);
|
||||||
|
expect(metrics.rebuttalResolvedRatio).toBe(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty metrics when directory does not exist', () => {
|
||||||
|
const nonExistent = join(eventsDir, 'does-not-exist');
|
||||||
|
const sinceMs = new Date('2026-01-01T00:00:00Z').getTime();
|
||||||
|
const metrics = computeReviewMetrics(nonExistent, sinceMs);
|
||||||
|
|
||||||
|
expect(metrics.reReportCounts.size).toBe(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should compute re-report counts for findings appearing 2+ times', () => {
|
||||||
|
const events: ReviewFindingEvent[] = [
|
||||||
|
{
|
||||||
|
type: 'review_finding',
|
||||||
|
findingId: 'f-001',
|
||||||
|
status: 'new',
|
||||||
|
ruleId: 'r-1',
|
||||||
|
severity: 'error',
|
||||||
|
decision: 'reject',
|
||||||
|
file: 'a.ts',
|
||||||
|
line: 1,
|
||||||
|
iteration: 1,
|
||||||
|
runId: 'run-1',
|
||||||
|
timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'review_finding',
|
||||||
|
findingId: 'f-001',
|
||||||
|
status: 'persists',
|
||||||
|
ruleId: 'r-1',
|
||||||
|
severity: 'error',
|
||||||
|
decision: 'reject',
|
||||||
|
file: 'a.ts',
|
||||||
|
line: 1,
|
||||||
|
iteration: 3,
|
||||||
|
runId: 'run-1',
|
||||||
|
timestamp: '2026-02-18T11:00:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'review_finding',
|
||||||
|
findingId: 'f-002',
|
||||||
|
status: 'new',
|
||||||
|
ruleId: 'r-2',
|
||||||
|
severity: 'warning',
|
||||||
|
decision: 'approve',
|
||||||
|
file: 'b.ts',
|
||||||
|
line: 5,
|
||||||
|
iteration: 1,
|
||||||
|
runId: 'run-1',
|
||||||
|
timestamp: '2026-02-18T10:01:00.000Z',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
writeEvents('2026-02-18', events);
|
||||||
|
|
||||||
|
const sinceMs = new Date('2026-02-18T00:00:00Z').getTime();
|
||||||
|
const metrics = computeReviewMetrics(eventsDir, sinceMs);
|
||||||
|
|
||||||
|
expect(metrics.reReportCounts.size).toBe(1);
|
||||||
|
expect(metrics.reReportCounts.get('f-001')).toBe(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should compute round-trip ratio correctly', () => {
|
||||||
|
const events: ReviewFindingEvent[] = [
|
||||||
|
// f-001: appears in iterations 1 and 3 → multi-iteration
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-001', status: 'new', ruleId: 'r-1', severity: 'error',
|
||||||
|
decision: 'reject', file: 'a.ts', line: 1, iteration: 1, runId: 'r', timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-001', status: 'persists', ruleId: 'r-1', severity: 'error',
|
||||||
|
decision: 'reject', file: 'a.ts', line: 1, iteration: 3, runId: 'r', timestamp: '2026-02-18T11:00:00.000Z',
|
||||||
|
},
|
||||||
|
// f-002: appears only in iteration 1 → single-iteration
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-002', status: 'new', ruleId: 'r-2', severity: 'warning',
|
||||||
|
decision: 'approve', file: 'b.ts', line: 5, iteration: 1, runId: 'r', timestamp: '2026-02-18T10:01:00.000Z',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
writeEvents('2026-02-18', events);
|
||||||
|
|
||||||
|
const sinceMs = new Date('2026-02-18T00:00:00Z').getTime();
|
||||||
|
const metrics = computeReviewMetrics(eventsDir, sinceMs);
|
||||||
|
|
||||||
|
// 1 out of 2 unique findings had multi-iteration → 50%
|
||||||
|
expect(metrics.roundTripRatio).toBe(0.5);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should compute average resolution iterations', () => {
|
||||||
|
const events: ReviewFindingEvent[] = [
|
||||||
|
// f-001: first in iteration 1, resolved in iteration 3 → 3 iterations
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-001', status: 'new', ruleId: 'r-1', severity: 'error',
|
||||||
|
decision: 'reject', file: 'a.ts', line: 1, iteration: 1, runId: 'r', timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-001', status: 'resolved', ruleId: 'r-1', severity: 'error',
|
||||||
|
decision: 'approve', file: 'a.ts', line: 1, iteration: 3, runId: 'r', timestamp: '2026-02-18T12:00:00.000Z',
|
||||||
|
},
|
||||||
|
// f-002: first in iteration 2, resolved in iteration 2 → 1 iteration
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-002', status: 'new', ruleId: 'r-2', severity: 'warning',
|
||||||
|
decision: 'reject', file: 'b.ts', line: 5, iteration: 2, runId: 'r', timestamp: '2026-02-18T11:00:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-002', status: 'resolved', ruleId: 'r-2', severity: 'warning',
|
||||||
|
decision: 'approve', file: 'b.ts', line: 5, iteration: 2, runId: 'r', timestamp: '2026-02-18T11:30:00.000Z',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
writeEvents('2026-02-18', events);
|
||||||
|
|
||||||
|
const sinceMs = new Date('2026-02-18T00:00:00Z').getTime();
|
||||||
|
const metrics = computeReviewMetrics(eventsDir, sinceMs);
|
||||||
|
|
||||||
|
// (3 + 1) / 2 = 2.0
|
||||||
|
expect(metrics.averageResolutionIterations).toBe(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should compute reject counts by rule', () => {
|
||||||
|
const events: ReviewFindingEvent[] = [
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-001', status: 'new', ruleId: 'no-any',
|
||||||
|
severity: 'error', decision: 'reject', file: 'a.ts', line: 1, iteration: 1,
|
||||||
|
runId: 'r', timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-002', status: 'new', ruleId: 'no-any',
|
||||||
|
severity: 'error', decision: 'reject', file: 'b.ts', line: 2, iteration: 1,
|
||||||
|
runId: 'r', timestamp: '2026-02-18T10:01:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-003', status: 'new', ruleId: 'no-console',
|
||||||
|
severity: 'warning', decision: 'reject', file: 'c.ts', line: 3, iteration: 1,
|
||||||
|
runId: 'r', timestamp: '2026-02-18T10:02:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-004', status: 'new', ruleId: 'no-any',
|
||||||
|
severity: 'error', decision: 'approve', file: 'd.ts', line: 4, iteration: 2,
|
||||||
|
runId: 'r', timestamp: '2026-02-18T10:03:00.000Z',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
writeEvents('2026-02-18', events);
|
||||||
|
|
||||||
|
const sinceMs = new Date('2026-02-18T00:00:00Z').getTime();
|
||||||
|
const metrics = computeReviewMetrics(eventsDir, sinceMs);
|
||||||
|
|
||||||
|
expect(metrics.rejectCountsByRule.get('no-any')).toBe(2);
|
||||||
|
expect(metrics.rejectCountsByRule.get('no-console')).toBe(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should compute rebuttal resolved ratio', () => {
|
||||||
|
const events: Array<ReviewFindingEvent | FixActionEvent> = [
|
||||||
|
// f-001: rebutted, then resolved → counts toward resolved
|
||||||
|
{
|
||||||
|
type: 'fix_action', findingId: 'AA-NEW-f001', action: 'rebutted',
|
||||||
|
iteration: 2, runId: 'r', timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'AA-NEW-f001', status: 'resolved', ruleId: 'r-1',
|
||||||
|
severity: 'warning', decision: 'approve', file: 'a.ts', line: 1,
|
||||||
|
iteration: 3, runId: 'r', timestamp: '2026-02-18T11:00:00.000Z',
|
||||||
|
},
|
||||||
|
// f-002: rebutted, never resolved → not counted
|
||||||
|
{
|
||||||
|
type: 'fix_action', findingId: 'AA-NEW-f002', action: 'rebutted',
|
||||||
|
iteration: 2, runId: 'r', timestamp: '2026-02-18T10:01:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'AA-NEW-f002', status: 'persists', ruleId: 'r-2',
|
||||||
|
severity: 'error', decision: 'reject', file: 'b.ts', line: 5,
|
||||||
|
iteration: 3, runId: 'r', timestamp: '2026-02-18T11:01:00.000Z',
|
||||||
|
},
|
||||||
|
// f-003: fixed (not rebutted), resolved → does not affect rebuttal metric
|
||||||
|
{
|
||||||
|
type: 'fix_action', findingId: 'AA-NEW-f003', action: 'fixed',
|
||||||
|
iteration: 2, runId: 'r', timestamp: '2026-02-18T10:02:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'AA-NEW-f003', status: 'resolved', ruleId: 'r-3',
|
||||||
|
severity: 'warning', decision: 'approve', file: 'c.ts', line: 10,
|
||||||
|
iteration: 3, runId: 'r', timestamp: '2026-02-18T11:02:00.000Z',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
writeEvents('2026-02-18', events);
|
||||||
|
|
||||||
|
const sinceMs = new Date('2026-02-18T00:00:00Z').getTime();
|
||||||
|
const metrics = computeReviewMetrics(eventsDir, sinceMs);
|
||||||
|
|
||||||
|
// 1 out of 2 rebutted findings was resolved → 50%
|
||||||
|
expect(metrics.rebuttalResolvedRatio).toBe(0.5);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return 0 rebuttal resolved ratio when no rebutted events exist', () => {
|
||||||
|
const events: ReviewFindingEvent[] = [
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-001', status: 'new', ruleId: 'r-1',
|
||||||
|
severity: 'error', decision: 'reject', file: 'a.ts', line: 1, iteration: 1,
|
||||||
|
runId: 'r', timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
writeEvents('2026-02-18', events);
|
||||||
|
|
||||||
|
const sinceMs = new Date('2026-02-18T00:00:00Z').getTime();
|
||||||
|
const metrics = computeReviewMetrics(eventsDir, sinceMs);
|
||||||
|
|
||||||
|
expect(metrics.rebuttalResolvedRatio).toBe(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should only include events after the since timestamp', () => {
|
||||||
|
const events: ReviewFindingEvent[] = [
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-old', status: 'new', ruleId: 'r-1',
|
||||||
|
severity: 'error', decision: 'reject', file: 'old.ts', line: 1, iteration: 1,
|
||||||
|
runId: 'r', timestamp: '2026-02-10T10:00:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-new', status: 'new', ruleId: 'r-1',
|
||||||
|
severity: 'error', decision: 'reject', file: 'new.ts', line: 1, iteration: 1,
|
||||||
|
runId: 'r', timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
// Write both events to the same date file for simplicity (old event in same file)
|
||||||
|
writeEvents('2026-02-10', [events[0]]);
|
||||||
|
writeEvents('2026-02-18', [events[1]]);
|
||||||
|
|
||||||
|
// Since Feb 15 — should only include f-new
|
||||||
|
const sinceMs = new Date('2026-02-15T00:00:00Z').getTime();
|
||||||
|
const metrics = computeReviewMetrics(eventsDir, sinceMs);
|
||||||
|
|
||||||
|
expect(metrics.rejectCountsByRule.get('r-1')).toBe(1);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('formatReviewMetrics', () => {
|
||||||
|
it('should format empty metrics', () => {
|
||||||
|
const metrics = computeReviewMetrics(eventsDir, 0);
|
||||||
|
const output = formatReviewMetrics(metrics);
|
||||||
|
|
||||||
|
expect(output).toContain('=== Review Metrics ===');
|
||||||
|
expect(output).toContain('(none)');
|
||||||
|
expect(output).toContain('Round-trip ratio');
|
||||||
|
expect(output).toContain('Average resolution iterations');
|
||||||
|
expect(output).toContain('Rebuttal');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should format metrics with data', () => {
|
||||||
|
const events: ReviewFindingEvent[] = [
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-001', status: 'new', ruleId: 'r-1',
|
||||||
|
severity: 'error', decision: 'reject', file: 'a.ts', line: 1, iteration: 1,
|
||||||
|
runId: 'r', timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'review_finding', findingId: 'f-001', status: 'persists', ruleId: 'r-1',
|
||||||
|
severity: 'error', decision: 'reject', file: 'a.ts', line: 1, iteration: 3,
|
||||||
|
runId: 'r', timestamp: '2026-02-18T11:00:00.000Z',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
writeEvents('2026-02-18', events);
|
||||||
|
|
||||||
|
const sinceMs = new Date('2026-02-18T00:00:00Z').getTime();
|
||||||
|
const metrics = computeReviewMetrics(eventsDir, sinceMs);
|
||||||
|
const output = formatReviewMetrics(metrics);
|
||||||
|
|
||||||
|
expect(output).toContain('f-001: 2');
|
||||||
|
expect(output).toContain('r-1: 2');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('parseSinceDuration', () => {
|
||||||
|
it('should parse "7d" to 7 days in milliseconds', () => {
|
||||||
|
const ms = parseSinceDuration('7d');
|
||||||
|
expect(ms).toBe(7 * 24 * 60 * 60 * 1000);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should parse "30d" to 30 days in milliseconds', () => {
|
||||||
|
const ms = parseSinceDuration('30d');
|
||||||
|
expect(ms).toBe(30 * 24 * 60 * 60 * 1000);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should parse "1d" to 1 day in milliseconds', () => {
|
||||||
|
const ms = parseSinceDuration('1d');
|
||||||
|
expect(ms).toBe(24 * 60 * 60 * 1000);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw on invalid format', () => {
|
||||||
|
expect(() => parseSinceDuration('7h')).toThrow('Invalid duration format');
|
||||||
|
expect(() => parseSinceDuration('abc')).toThrow('Invalid duration format');
|
||||||
|
expect(() => parseSinceDuration('')).toThrow('Invalid duration format');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
205
src/__tests__/analytics-pieceExecution.test.ts
Normal file
205
src/__tests__/analytics-pieceExecution.test.ts
Normal file
@ -0,0 +1,205 @@
|
|||||||
|
/**
|
||||||
|
* Tests for analytics integration in pieceExecution.
|
||||||
|
*
|
||||||
|
* Validates the analytics initialization logic (analytics.enabled gate)
|
||||||
|
* and event firing for review_finding and fix_action events.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { mkdirSync, rmSync, readFileSync, existsSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { tmpdir } from 'node:os';
|
||||||
|
import { resetAnalyticsWriter } from '../features/analytics/writer.js';
|
||||||
|
import {
|
||||||
|
initAnalyticsWriter,
|
||||||
|
isAnalyticsEnabled,
|
||||||
|
writeAnalyticsEvent,
|
||||||
|
} from '../features/analytics/index.js';
|
||||||
|
import type {
|
||||||
|
MovementResultEvent,
|
||||||
|
ReviewFindingEvent,
|
||||||
|
FixActionEvent,
|
||||||
|
} from '../features/analytics/index.js';
|
||||||
|
|
||||||
|
describe('pieceExecution analytics initialization', () => {
|
||||||
|
let testDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
testDir = join(tmpdir(), `takt-test-analytics-init-${Date.now()}`);
|
||||||
|
mkdirSync(testDir, { recursive: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
rmSync(testDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should enable analytics when analytics.enabled=true', () => {
|
||||||
|
const analyticsEnabled = true;
|
||||||
|
initAnalyticsWriter(analyticsEnabled, testDir);
|
||||||
|
expect(isAnalyticsEnabled()).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should disable analytics when analytics.enabled=false', () => {
|
||||||
|
const analyticsEnabled = false;
|
||||||
|
initAnalyticsWriter(analyticsEnabled, testDir);
|
||||||
|
expect(isAnalyticsEnabled()).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should disable analytics when analytics is undefined', () => {
|
||||||
|
const analytics = undefined;
|
||||||
|
const analyticsEnabled = analytics?.enabled === true;
|
||||||
|
initAnalyticsWriter(analyticsEnabled, testDir);
|
||||||
|
expect(isAnalyticsEnabled()).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('movement_result event assembly', () => {
|
||||||
|
let testDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
testDir = join(tmpdir(), `takt-test-mvt-result-${Date.now()}`);
|
||||||
|
mkdirSync(testDir, { recursive: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
rmSync(testDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should write movement_result event with correct fields', () => {
|
||||||
|
initAnalyticsWriter(true, testDir);
|
||||||
|
|
||||||
|
const event: MovementResultEvent = {
|
||||||
|
type: 'movement_result',
|
||||||
|
movement: 'ai_review',
|
||||||
|
provider: 'claude',
|
||||||
|
model: 'sonnet',
|
||||||
|
decisionTag: 'REJECT',
|
||||||
|
iteration: 3,
|
||||||
|
runId: 'test-run',
|
||||||
|
timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
writeAnalyticsEvent(event);
|
||||||
|
|
||||||
|
const filePath = join(testDir, '2026-02-18.jsonl');
|
||||||
|
expect(existsSync(filePath)).toBe(true);
|
||||||
|
|
||||||
|
const content = readFileSync(filePath, 'utf-8').trim();
|
||||||
|
const parsed = JSON.parse(content) as MovementResultEvent;
|
||||||
|
|
||||||
|
expect(parsed.type).toBe('movement_result');
|
||||||
|
expect(parsed.movement).toBe('ai_review');
|
||||||
|
expect(parsed.decisionTag).toBe('REJECT');
|
||||||
|
expect(parsed.iteration).toBe(3);
|
||||||
|
expect(parsed.runId).toBe('test-run');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('review_finding event writing', () => {
|
||||||
|
let testDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
testDir = join(tmpdir(), `takt-test-review-finding-${Date.now()}`);
|
||||||
|
mkdirSync(testDir, { recursive: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
rmSync(testDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should write review_finding events to JSONL', () => {
|
||||||
|
initAnalyticsWriter(true, testDir);
|
||||||
|
|
||||||
|
const event: ReviewFindingEvent = {
|
||||||
|
type: 'review_finding',
|
||||||
|
findingId: 'AA-001',
|
||||||
|
status: 'new',
|
||||||
|
ruleId: 'AA-001',
|
||||||
|
severity: 'warning',
|
||||||
|
decision: 'reject',
|
||||||
|
file: 'src/foo.ts',
|
||||||
|
line: 42,
|
||||||
|
iteration: 2,
|
||||||
|
runId: 'test-run',
|
||||||
|
timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
writeAnalyticsEvent(event);
|
||||||
|
|
||||||
|
const filePath = join(testDir, '2026-02-18.jsonl');
|
||||||
|
const content = readFileSync(filePath, 'utf-8').trim();
|
||||||
|
const parsed = JSON.parse(content) as ReviewFindingEvent;
|
||||||
|
|
||||||
|
expect(parsed.type).toBe('review_finding');
|
||||||
|
expect(parsed.findingId).toBe('AA-001');
|
||||||
|
expect(parsed.status).toBe('new');
|
||||||
|
expect(parsed.decision).toBe('reject');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('fix_action event writing', () => {
|
||||||
|
let testDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
testDir = join(tmpdir(), `takt-test-fix-action-${Date.now()}`);
|
||||||
|
mkdirSync(testDir, { recursive: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
rmSync(testDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should write fix_action events with fixed action to JSONL', () => {
|
||||||
|
initAnalyticsWriter(true, testDir);
|
||||||
|
|
||||||
|
const event: FixActionEvent = {
|
||||||
|
type: 'fix_action',
|
||||||
|
findingId: 'AA-001',
|
||||||
|
action: 'fixed',
|
||||||
|
iteration: 3,
|
||||||
|
runId: 'test-run',
|
||||||
|
timestamp: '2026-02-18T11:00:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
writeAnalyticsEvent(event);
|
||||||
|
|
||||||
|
const filePath = join(testDir, '2026-02-18.jsonl');
|
||||||
|
const content = readFileSync(filePath, 'utf-8').trim();
|
||||||
|
const parsed = JSON.parse(content) as FixActionEvent;
|
||||||
|
|
||||||
|
expect(parsed.type).toBe('fix_action');
|
||||||
|
expect(parsed.findingId).toBe('AA-001');
|
||||||
|
expect(parsed.action).toBe('fixed');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should write fix_action events with rebutted action to JSONL', () => {
|
||||||
|
initAnalyticsWriter(true, testDir);
|
||||||
|
|
||||||
|
const event: FixActionEvent = {
|
||||||
|
type: 'fix_action',
|
||||||
|
findingId: 'AA-002',
|
||||||
|
action: 'rebutted',
|
||||||
|
iteration: 4,
|
||||||
|
runId: 'test-run',
|
||||||
|
timestamp: '2026-02-18T12:00:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
writeAnalyticsEvent(event);
|
||||||
|
|
||||||
|
const filePath = join(testDir, '2026-02-18.jsonl');
|
||||||
|
const content = readFileSync(filePath, 'utf-8').trim();
|
||||||
|
const parsed = JSON.parse(content) as FixActionEvent;
|
||||||
|
|
||||||
|
expect(parsed.type).toBe('fix_action');
|
||||||
|
expect(parsed.findingId).toBe('AA-002');
|
||||||
|
expect(parsed.action).toBe('rebutted');
|
||||||
|
});
|
||||||
|
});
|
||||||
108
src/__tests__/analytics-purge.test.ts
Normal file
108
src/__tests__/analytics-purge.test.ts
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
/**
|
||||||
|
* Tests for analytics purge — retention-based cleanup of JSONL files.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { existsSync, mkdirSync, rmSync, writeFileSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { tmpdir } from 'node:os';
|
||||||
|
import { purgeOldEvents } from '../features/analytics/index.js';
|
||||||
|
|
||||||
|
describe('purgeOldEvents', () => {
|
||||||
|
let eventsDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
eventsDir = join(tmpdir(), `takt-test-analytics-purge-${Date.now()}`);
|
||||||
|
mkdirSync(eventsDir, { recursive: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
rmSync(eventsDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should delete files older than retention period', () => {
|
||||||
|
// Given: Files from different dates
|
||||||
|
writeFileSync(join(eventsDir, '2026-01-01.jsonl'), '{}', 'utf-8');
|
||||||
|
writeFileSync(join(eventsDir, '2026-01-15.jsonl'), '{}', 'utf-8');
|
||||||
|
writeFileSync(join(eventsDir, '2026-02-10.jsonl'), '{}', 'utf-8');
|
||||||
|
writeFileSync(join(eventsDir, '2026-02-18.jsonl'), '{}', 'utf-8');
|
||||||
|
|
||||||
|
// When: Purge with 30-day retention from Feb 18
|
||||||
|
const now = new Date('2026-02-18T12:00:00Z');
|
||||||
|
const deleted = purgeOldEvents(eventsDir, 30, now);
|
||||||
|
|
||||||
|
// Then: Only files before Jan 19 should be deleted
|
||||||
|
expect(deleted).toContain('2026-01-01.jsonl');
|
||||||
|
expect(deleted).toContain('2026-01-15.jsonl');
|
||||||
|
expect(deleted).not.toContain('2026-02-10.jsonl');
|
||||||
|
expect(deleted).not.toContain('2026-02-18.jsonl');
|
||||||
|
|
||||||
|
expect(existsSync(join(eventsDir, '2026-01-01.jsonl'))).toBe(false);
|
||||||
|
expect(existsSync(join(eventsDir, '2026-01-15.jsonl'))).toBe(false);
|
||||||
|
expect(existsSync(join(eventsDir, '2026-02-10.jsonl'))).toBe(true);
|
||||||
|
expect(existsSync(join(eventsDir, '2026-02-18.jsonl'))).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty array when no files to purge', () => {
|
||||||
|
writeFileSync(join(eventsDir, '2026-02-18.jsonl'), '{}', 'utf-8');
|
||||||
|
|
||||||
|
const now = new Date('2026-02-18T12:00:00Z');
|
||||||
|
const deleted = purgeOldEvents(eventsDir, 30, now);
|
||||||
|
|
||||||
|
expect(deleted).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty array when directory does not exist', () => {
|
||||||
|
const nonExistent = join(eventsDir, 'does-not-exist');
|
||||||
|
const deleted = purgeOldEvents(nonExistent, 30, new Date());
|
||||||
|
|
||||||
|
expect(deleted).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should delete all files when retention is 0', () => {
|
||||||
|
writeFileSync(join(eventsDir, '2026-02-17.jsonl'), '{}', 'utf-8');
|
||||||
|
writeFileSync(join(eventsDir, '2026-02-18.jsonl'), '{}', 'utf-8');
|
||||||
|
|
||||||
|
const now = new Date('2026-02-18T12:00:00Z');
|
||||||
|
const deleted = purgeOldEvents(eventsDir, 0, now);
|
||||||
|
|
||||||
|
expect(deleted).toContain('2026-02-17.jsonl');
|
||||||
|
// The cutoff date is Feb 18, and '2026-02-18' is not < '2026-02-18'
|
||||||
|
expect(deleted).not.toContain('2026-02-18.jsonl');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should ignore non-jsonl files', () => {
|
||||||
|
writeFileSync(join(eventsDir, '2025-01-01.jsonl'), '{}', 'utf-8');
|
||||||
|
writeFileSync(join(eventsDir, 'README.md'), '# test', 'utf-8');
|
||||||
|
writeFileSync(join(eventsDir, 'data.json'), '{}', 'utf-8');
|
||||||
|
|
||||||
|
const now = new Date('2026-02-18T12:00:00Z');
|
||||||
|
const deleted = purgeOldEvents(eventsDir, 30, now);
|
||||||
|
|
||||||
|
expect(deleted).toContain('2025-01-01.jsonl');
|
||||||
|
expect(deleted).not.toContain('README.md');
|
||||||
|
expect(deleted).not.toContain('data.json');
|
||||||
|
|
||||||
|
// Non-jsonl files should still exist
|
||||||
|
expect(existsSync(join(eventsDir, 'README.md'))).toBe(true);
|
||||||
|
expect(existsSync(join(eventsDir, 'data.json'))).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle 7-day retention correctly', () => {
|
||||||
|
writeFileSync(join(eventsDir, '2026-02-10.jsonl'), '{}', 'utf-8');
|
||||||
|
writeFileSync(join(eventsDir, '2026-02-11.jsonl'), '{}', 'utf-8');
|
||||||
|
writeFileSync(join(eventsDir, '2026-02-12.jsonl'), '{}', 'utf-8');
|
||||||
|
writeFileSync(join(eventsDir, '2026-02-17.jsonl'), '{}', 'utf-8');
|
||||||
|
writeFileSync(join(eventsDir, '2026-02-18.jsonl'), '{}', 'utf-8');
|
||||||
|
|
||||||
|
const now = new Date('2026-02-18T12:00:00Z');
|
||||||
|
const deleted = purgeOldEvents(eventsDir, 7, now);
|
||||||
|
|
||||||
|
// Cutoff: Feb 11
|
||||||
|
expect(deleted).toContain('2026-02-10.jsonl');
|
||||||
|
expect(deleted).not.toContain('2026-02-11.jsonl');
|
||||||
|
expect(deleted).not.toContain('2026-02-12.jsonl');
|
||||||
|
expect(deleted).not.toContain('2026-02-17.jsonl');
|
||||||
|
expect(deleted).not.toContain('2026-02-18.jsonl');
|
||||||
|
});
|
||||||
|
});
|
||||||
350
src/__tests__/analytics-report-parser.test.ts
Normal file
350
src/__tests__/analytics-report-parser.test.ts
Normal file
@ -0,0 +1,350 @@
|
|||||||
|
/**
|
||||||
|
* Tests for analytics report parser — extracting findings from review markdown.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { readFileSync, mkdirSync, rmSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { tmpdir } from 'node:os';
|
||||||
|
import {
|
||||||
|
parseFindingsFromReport,
|
||||||
|
extractDecisionFromReport,
|
||||||
|
inferSeverity,
|
||||||
|
emitFixActionEvents,
|
||||||
|
emitRebuttalEvents,
|
||||||
|
} from '../features/analytics/report-parser.js';
|
||||||
|
import { initAnalyticsWriter } from '../features/analytics/writer.js';
|
||||||
|
import { resetAnalyticsWriter } from '../features/analytics/writer.js';
|
||||||
|
import type { FixActionEvent } from '../features/analytics/events.js';
|
||||||
|
|
||||||
|
describe('parseFindingsFromReport', () => {
|
||||||
|
it('should extract new findings from a review report', () => {
|
||||||
|
const report = [
|
||||||
|
'# Review Report',
|
||||||
|
'',
|
||||||
|
'## Result: REJECT',
|
||||||
|
'',
|
||||||
|
'## Current Iteration Findings (new)',
|
||||||
|
'| # | finding_id | Category | Location | Issue | Fix Suggestion |',
|
||||||
|
'|---|------------|---------|------|------|--------|',
|
||||||
|
'| 1 | AA-001 | DRY | `src/foo.ts:42` | Duplication | Extract helper |',
|
||||||
|
'| 2 | AA-002 | Export | `src/bar.ts:10` | Unused export | Remove |',
|
||||||
|
'',
|
||||||
|
].join('\n');
|
||||||
|
|
||||||
|
const findings = parseFindingsFromReport(report);
|
||||||
|
|
||||||
|
expect(findings).toHaveLength(2);
|
||||||
|
expect(findings[0].findingId).toBe('AA-001');
|
||||||
|
expect(findings[0].status).toBe('new');
|
||||||
|
expect(findings[0].ruleId).toBe('DRY');
|
||||||
|
expect(findings[0].file).toBe('src/foo.ts');
|
||||||
|
expect(findings[0].line).toBe(42);
|
||||||
|
expect(findings[1].findingId).toBe('AA-002');
|
||||||
|
expect(findings[1].status).toBe('new');
|
||||||
|
expect(findings[1].ruleId).toBe('Export');
|
||||||
|
expect(findings[1].file).toBe('src/bar.ts');
|
||||||
|
expect(findings[1].line).toBe(10);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should extract persists findings', () => {
|
||||||
|
const report = [
|
||||||
|
'## Carry-over Findings (persists)',
|
||||||
|
'| # | finding_id | Previous Evidence | Current Evidence | Issue | Fix Suggestion |',
|
||||||
|
'|---|------------|----------|----------|------|--------|',
|
||||||
|
'| 1 | ARCH-001 | `src/a.ts:5` was X | `src/a.ts:5` still X | Still bad | Fix it |',
|
||||||
|
'',
|
||||||
|
].join('\n');
|
||||||
|
|
||||||
|
const findings = parseFindingsFromReport(report);
|
||||||
|
|
||||||
|
expect(findings).toHaveLength(1);
|
||||||
|
expect(findings[0].findingId).toBe('ARCH-001');
|
||||||
|
expect(findings[0].status).toBe('persists');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should extract resolved findings', () => {
|
||||||
|
const report = [
|
||||||
|
'## Resolved Findings (resolved)',
|
||||||
|
'| finding_id | Resolution Evidence |',
|
||||||
|
'|------------|---------------------|',
|
||||||
|
'| QA-003 | Fixed in src/c.ts |',
|
||||||
|
'',
|
||||||
|
].join('\n');
|
||||||
|
|
||||||
|
const findings = parseFindingsFromReport(report);
|
||||||
|
|
||||||
|
expect(findings).toHaveLength(1);
|
||||||
|
expect(findings[0].findingId).toBe('QA-003');
|
||||||
|
expect(findings[0].status).toBe('resolved');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle mixed sections in one report', () => {
|
||||||
|
const report = [
|
||||||
|
'## 今回の指摘(new)',
|
||||||
|
'| # | finding_id | カテゴリ | 場所 | 問題 | 修正案 |',
|
||||||
|
'|---|------------|---------|------|------|--------|',
|
||||||
|
'| 1 | AA-001 | DRY | `src/foo.ts:1` | Dup | Fix |',
|
||||||
|
'',
|
||||||
|
'## 継続指摘(persists)',
|
||||||
|
'| # | finding_id | 前回根拠 | 今回根拠 | 問題 | 修正案 |',
|
||||||
|
'|---|------------|----------|----------|------|--------|',
|
||||||
|
'| 1 | AA-002 | Was bad | Still bad | Issue | Fix |',
|
||||||
|
'',
|
||||||
|
'## 解消済み(resolved)',
|
||||||
|
'| finding_id | 解消根拠 |',
|
||||||
|
'|------------|---------|',
|
||||||
|
'| AA-003 | Fixed |',
|
||||||
|
'',
|
||||||
|
].join('\n');
|
||||||
|
|
||||||
|
const findings = parseFindingsFromReport(report);
|
||||||
|
|
||||||
|
expect(findings).toHaveLength(3);
|
||||||
|
expect(findings[0]).toEqual(expect.objectContaining({ findingId: 'AA-001', status: 'new' }));
|
||||||
|
expect(findings[1]).toEqual(expect.objectContaining({ findingId: 'AA-002', status: 'persists' }));
|
||||||
|
expect(findings[2]).toEqual(expect.objectContaining({ findingId: 'AA-003', status: 'resolved' }));
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty array when no finding sections exist', () => {
|
||||||
|
const report = [
|
||||||
|
'# Report',
|
||||||
|
'',
|
||||||
|
'## Summary',
|
||||||
|
'Everything looks good.',
|
||||||
|
'',
|
||||||
|
].join('\n');
|
||||||
|
|
||||||
|
const findings = parseFindingsFromReport(report);
|
||||||
|
|
||||||
|
expect(findings).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should stop collecting findings when a new non-finding section starts', () => {
|
||||||
|
const report = [
|
||||||
|
'## Current Iteration Findings (new)',
|
||||||
|
'| # | finding_id | Category | Location | Issue | Fix |',
|
||||||
|
'|---|------------|---------|------|------|-----|',
|
||||||
|
'| 1 | F-001 | Bug | `src/a.ts` | Bad | Fix |',
|
||||||
|
'',
|
||||||
|
'## REJECT判定条件',
|
||||||
|
'| Condition | Result |',
|
||||||
|
'|-----------|--------|',
|
||||||
|
'| Has findings | Yes |',
|
||||||
|
'',
|
||||||
|
].join('\n');
|
||||||
|
|
||||||
|
const findings = parseFindingsFromReport(report);
|
||||||
|
|
||||||
|
expect(findings).toHaveLength(1);
|
||||||
|
expect(findings[0].findingId).toBe('F-001');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should skip header rows in tables', () => {
|
||||||
|
const report = [
|
||||||
|
'## Current Iteration Findings (new)',
|
||||||
|
'| # | finding_id | Category | Location | Issue | Fix |',
|
||||||
|
'|---|------------|---------|------|------|-----|',
|
||||||
|
'| 1 | X-001 | Cat | `file.ts:5` | Problem | Solution |',
|
||||||
|
'',
|
||||||
|
].join('\n');
|
||||||
|
|
||||||
|
const findings = parseFindingsFromReport(report);
|
||||||
|
|
||||||
|
expect(findings).toHaveLength(1);
|
||||||
|
expect(findings[0].findingId).toBe('X-001');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should parse location with line number from backtick-wrapped paths', () => {
|
||||||
|
const report = [
|
||||||
|
'## Current Iteration Findings (new)',
|
||||||
|
'| # | finding_id | Category | Location | Issue | Fix |',
|
||||||
|
'|---|------------|---------|------|------|-----|',
|
||||||
|
'| 1 | F-001 | Bug | `src/features/analytics/writer.ts:27` | Comment | Remove |',
|
||||||
|
'',
|
||||||
|
].join('\n');
|
||||||
|
|
||||||
|
const findings = parseFindingsFromReport(report);
|
||||||
|
|
||||||
|
expect(findings[0].file).toBe('src/features/analytics/writer.ts');
|
||||||
|
expect(findings[0].line).toBe(27);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle location with multiple line references', () => {
|
||||||
|
const report = [
|
||||||
|
'## Current Iteration Findings (new)',
|
||||||
|
'| # | finding_id | Category | Location | Issue | Fix |',
|
||||||
|
'|---|------------|---------|------|------|-----|',
|
||||||
|
'| 1 | F-001 | Bug | `src/a.ts:10, src/b.ts:20` | Multiple | Fix |',
|
||||||
|
'',
|
||||||
|
].join('\n');
|
||||||
|
|
||||||
|
const findings = parseFindingsFromReport(report);
|
||||||
|
|
||||||
|
expect(findings[0].file).toBe('src/a.ts');
|
||||||
|
expect(findings[0].line).toBe(10);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('extractDecisionFromReport', () => {
|
||||||
|
it('should return reject when report says REJECT', () => {
|
||||||
|
const report = '## 結果: REJECT\n\nSome content';
|
||||||
|
expect(extractDecisionFromReport(report)).toBe('reject');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return approve when report says APPROVE', () => {
|
||||||
|
const report = '## Result: APPROVE\n\nSome content';
|
||||||
|
expect(extractDecisionFromReport(report)).toBe('approve');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null when no result section is found', () => {
|
||||||
|
const report = '# Report\n\nNo result section here.';
|
||||||
|
expect(extractDecisionFromReport(report)).toBeNull();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('inferSeverity', () => {
|
||||||
|
it('should return error for security-related finding IDs', () => {
|
||||||
|
expect(inferSeverity('SEC-001')).toBe('error');
|
||||||
|
expect(inferSeverity('SEC-NEW-xss')).toBe('error');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return warning for other finding IDs', () => {
|
||||||
|
expect(inferSeverity('AA-001')).toBe('warning');
|
||||||
|
expect(inferSeverity('QA-001')).toBe('warning');
|
||||||
|
expect(inferSeverity('ARCH-NEW-dry')).toBe('warning');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('emitFixActionEvents', () => {
|
||||||
|
let testDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
testDir = join(tmpdir(), `takt-test-emit-fix-${Date.now()}`);
|
||||||
|
mkdirSync(testDir, { recursive: true });
|
||||||
|
initAnalyticsWriter(true, testDir);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
rmSync(testDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should emit fix_action events for each finding ID in response', () => {
|
||||||
|
const timestamp = new Date('2026-02-18T12:00:00.000Z');
|
||||||
|
|
||||||
|
emitFixActionEvents('Fixed AA-001 and ARCH-002-barrel', 3, 'run-xyz', timestamp);
|
||||||
|
|
||||||
|
const filePath = join(testDir, '2026-02-18.jsonl');
|
||||||
|
const lines = readFileSync(filePath, 'utf-8').trim().split('\n');
|
||||||
|
expect(lines).toHaveLength(2);
|
||||||
|
|
||||||
|
const event1 = JSON.parse(lines[0]) as FixActionEvent;
|
||||||
|
expect(event1.type).toBe('fix_action');
|
||||||
|
expect(event1.findingId).toBe('AA-001');
|
||||||
|
expect(event1.action).toBe('fixed');
|
||||||
|
expect(event1.iteration).toBe(3);
|
||||||
|
expect(event1.runId).toBe('run-xyz');
|
||||||
|
expect(event1.timestamp).toBe('2026-02-18T12:00:00.000Z');
|
||||||
|
|
||||||
|
const event2 = JSON.parse(lines[1]) as FixActionEvent;
|
||||||
|
expect(event2.type).toBe('fix_action');
|
||||||
|
expect(event2.findingId).toBe('ARCH-002-barrel');
|
||||||
|
expect(event2.action).toBe('fixed');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not emit events when response contains no finding IDs', () => {
|
||||||
|
const timestamp = new Date('2026-02-18T12:00:00.000Z');
|
||||||
|
|
||||||
|
emitFixActionEvents('No issues found, all good.', 1, 'run-abc', timestamp);
|
||||||
|
|
||||||
|
const filePath = join(testDir, '2026-02-18.jsonl');
|
||||||
|
expect(() => readFileSync(filePath, 'utf-8')).toThrow();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should deduplicate repeated finding IDs', () => {
|
||||||
|
const timestamp = new Date('2026-02-18T12:00:00.000Z');
|
||||||
|
|
||||||
|
emitFixActionEvents(
|
||||||
|
'Fixed QA-001, confirmed QA-001 is resolved, also QA-001 again',
|
||||||
|
2,
|
||||||
|
'run-dedup',
|
||||||
|
timestamp,
|
||||||
|
);
|
||||||
|
|
||||||
|
const filePath = join(testDir, '2026-02-18.jsonl');
|
||||||
|
const lines = readFileSync(filePath, 'utf-8').trim().split('\n');
|
||||||
|
expect(lines).toHaveLength(1);
|
||||||
|
|
||||||
|
const event = JSON.parse(lines[0]) as FixActionEvent;
|
||||||
|
expect(event.findingId).toBe('QA-001');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should match various finding ID formats', () => {
|
||||||
|
const timestamp = new Date('2026-02-18T12:00:00.000Z');
|
||||||
|
const response = [
|
||||||
|
'Resolved AA-001 simple ID',
|
||||||
|
'Fixed ARCH-NEW-dry with NEW segment',
|
||||||
|
'Addressed SEC-002-xss with suffix',
|
||||||
|
].join('\n');
|
||||||
|
|
||||||
|
emitFixActionEvents(response, 1, 'run-formats', timestamp);
|
||||||
|
|
||||||
|
const filePath = join(testDir, '2026-02-18.jsonl');
|
||||||
|
const lines = readFileSync(filePath, 'utf-8').trim().split('\n');
|
||||||
|
expect(lines).toHaveLength(3);
|
||||||
|
|
||||||
|
const ids = lines.map((line) => (JSON.parse(line) as FixActionEvent).findingId);
|
||||||
|
expect(ids).toContain('AA-001');
|
||||||
|
expect(ids).toContain('ARCH-NEW-dry');
|
||||||
|
expect(ids).toContain('SEC-002-xss');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('emitRebuttalEvents', () => {
|
||||||
|
let testDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
testDir = join(tmpdir(), `takt-test-emit-rebuttal-${Date.now()}`);
|
||||||
|
mkdirSync(testDir, { recursive: true });
|
||||||
|
initAnalyticsWriter(true, testDir);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
rmSync(testDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should emit fix_action events with rebutted action for finding IDs', () => {
|
||||||
|
const timestamp = new Date('2026-02-18T12:00:00.000Z');
|
||||||
|
|
||||||
|
emitRebuttalEvents('Rebutting AA-001 and ARCH-002-barrel', 3, 'run-xyz', timestamp);
|
||||||
|
|
||||||
|
const filePath = join(testDir, '2026-02-18.jsonl');
|
||||||
|
const lines = readFileSync(filePath, 'utf-8').trim().split('\n');
|
||||||
|
expect(lines).toHaveLength(2);
|
||||||
|
|
||||||
|
const event1 = JSON.parse(lines[0]) as FixActionEvent;
|
||||||
|
expect(event1.type).toBe('fix_action');
|
||||||
|
expect(event1.findingId).toBe('AA-001');
|
||||||
|
expect(event1.action).toBe('rebutted');
|
||||||
|
expect(event1.iteration).toBe(3);
|
||||||
|
expect(event1.runId).toBe('run-xyz');
|
||||||
|
|
||||||
|
const event2 = JSON.parse(lines[1]) as FixActionEvent;
|
||||||
|
expect(event2.type).toBe('fix_action');
|
||||||
|
expect(event2.findingId).toBe('ARCH-002-barrel');
|
||||||
|
expect(event2.action).toBe('rebutted');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not emit events when response contains no finding IDs', () => {
|
||||||
|
const timestamp = new Date('2026-02-18T12:00:00.000Z');
|
||||||
|
|
||||||
|
emitRebuttalEvents('No findings mentioned here.', 1, 'run-abc', timestamp);
|
||||||
|
|
||||||
|
const filePath = join(testDir, '2026-02-18.jsonl');
|
||||||
|
expect(() => readFileSync(filePath, 'utf-8')).toThrow();
|
||||||
|
});
|
||||||
|
});
|
||||||
220
src/__tests__/analytics-writer.test.ts
Normal file
220
src/__tests__/analytics-writer.test.ts
Normal file
@ -0,0 +1,220 @@
|
|||||||
|
/**
|
||||||
|
* Tests for AnalyticsWriter — JSONL append, date rotation, ON/OFF toggle.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { existsSync, readFileSync, mkdirSync, rmSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { tmpdir } from 'node:os';
|
||||||
|
import { resetAnalyticsWriter } from '../features/analytics/writer.js';
|
||||||
|
import {
|
||||||
|
initAnalyticsWriter,
|
||||||
|
isAnalyticsEnabled,
|
||||||
|
writeAnalyticsEvent,
|
||||||
|
} from '../features/analytics/index.js';
|
||||||
|
import type { MovementResultEvent, ReviewFindingEvent } from '../features/analytics/index.js';
|
||||||
|
|
||||||
|
describe('AnalyticsWriter', () => {
|
||||||
|
let testDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
testDir = join(tmpdir(), `takt-test-analytics-writer-${Date.now()}`);
|
||||||
|
mkdirSync(testDir, { recursive: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
rmSync(testDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('ON/OFF toggle', () => {
|
||||||
|
it('should not be enabled by default', () => {
|
||||||
|
expect(isAnalyticsEnabled()).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be enabled when initialized with enabled=true', () => {
|
||||||
|
initAnalyticsWriter(true, testDir);
|
||||||
|
expect(isAnalyticsEnabled()).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not be enabled when initialized with enabled=false', () => {
|
||||||
|
initAnalyticsWriter(false, testDir);
|
||||||
|
expect(isAnalyticsEnabled()).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not write when disabled', () => {
|
||||||
|
initAnalyticsWriter(false, testDir);
|
||||||
|
|
||||||
|
const event: MovementResultEvent = {
|
||||||
|
type: 'movement_result',
|
||||||
|
movement: 'plan',
|
||||||
|
provider: 'claude',
|
||||||
|
model: 'sonnet',
|
||||||
|
decisionTag: 'done',
|
||||||
|
iteration: 1,
|
||||||
|
runId: 'run-1',
|
||||||
|
timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
writeAnalyticsEvent(event);
|
||||||
|
|
||||||
|
const expectedFile = join(testDir, '2026-02-18.jsonl');
|
||||||
|
expect(existsSync(expectedFile)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('event writing', () => {
|
||||||
|
it('should append event to date-based JSONL file', () => {
|
||||||
|
initAnalyticsWriter(true, testDir);
|
||||||
|
|
||||||
|
const event: MovementResultEvent = {
|
||||||
|
type: 'movement_result',
|
||||||
|
movement: 'implement',
|
||||||
|
provider: 'claude',
|
||||||
|
model: 'sonnet',
|
||||||
|
decisionTag: 'approved',
|
||||||
|
iteration: 2,
|
||||||
|
runId: 'run-abc',
|
||||||
|
timestamp: '2026-02-18T14:30:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
writeAnalyticsEvent(event);
|
||||||
|
|
||||||
|
const filePath = join(testDir, '2026-02-18.jsonl');
|
||||||
|
expect(existsSync(filePath)).toBe(true);
|
||||||
|
|
||||||
|
const content = readFileSync(filePath, 'utf-8').trim();
|
||||||
|
const parsed = JSON.parse(content) as MovementResultEvent;
|
||||||
|
expect(parsed.type).toBe('movement_result');
|
||||||
|
expect(parsed.movement).toBe('implement');
|
||||||
|
expect(parsed.provider).toBe('claude');
|
||||||
|
expect(parsed.decisionTag).toBe('approved');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should append multiple events to the same file', () => {
|
||||||
|
initAnalyticsWriter(true, testDir);
|
||||||
|
|
||||||
|
const event1: MovementResultEvent = {
|
||||||
|
type: 'movement_result',
|
||||||
|
movement: 'plan',
|
||||||
|
provider: 'claude',
|
||||||
|
model: 'sonnet',
|
||||||
|
decisionTag: 'done',
|
||||||
|
iteration: 1,
|
||||||
|
runId: 'run-1',
|
||||||
|
timestamp: '2026-02-18T10:00:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
const event2: MovementResultEvent = {
|
||||||
|
type: 'movement_result',
|
||||||
|
movement: 'implement',
|
||||||
|
provider: 'codex',
|
||||||
|
model: 'o3',
|
||||||
|
decisionTag: 'needs_fix',
|
||||||
|
iteration: 2,
|
||||||
|
runId: 'run-1',
|
||||||
|
timestamp: '2026-02-18T11:00:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
writeAnalyticsEvent(event1);
|
||||||
|
writeAnalyticsEvent(event2);
|
||||||
|
|
||||||
|
const filePath = join(testDir, '2026-02-18.jsonl');
|
||||||
|
const lines = readFileSync(filePath, 'utf-8').trim().split('\n');
|
||||||
|
expect(lines).toHaveLength(2);
|
||||||
|
|
||||||
|
const parsed1 = JSON.parse(lines[0]) as MovementResultEvent;
|
||||||
|
const parsed2 = JSON.parse(lines[1]) as MovementResultEvent;
|
||||||
|
expect(parsed1.movement).toBe('plan');
|
||||||
|
expect(parsed2.movement).toBe('implement');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create separate files for different dates', () => {
|
||||||
|
initAnalyticsWriter(true, testDir);
|
||||||
|
|
||||||
|
const event1: MovementResultEvent = {
|
||||||
|
type: 'movement_result',
|
||||||
|
movement: 'plan',
|
||||||
|
provider: 'claude',
|
||||||
|
model: 'sonnet',
|
||||||
|
decisionTag: 'done',
|
||||||
|
iteration: 1,
|
||||||
|
runId: 'run-1',
|
||||||
|
timestamp: '2026-02-17T23:59:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
const event2: MovementResultEvent = {
|
||||||
|
type: 'movement_result',
|
||||||
|
movement: 'implement',
|
||||||
|
provider: 'claude',
|
||||||
|
model: 'sonnet',
|
||||||
|
decisionTag: 'done',
|
||||||
|
iteration: 2,
|
||||||
|
runId: 'run-1',
|
||||||
|
timestamp: '2026-02-18T00:01:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
writeAnalyticsEvent(event1);
|
||||||
|
writeAnalyticsEvent(event2);
|
||||||
|
|
||||||
|
expect(existsSync(join(testDir, '2026-02-17.jsonl'))).toBe(true);
|
||||||
|
expect(existsSync(join(testDir, '2026-02-18.jsonl'))).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should write review_finding events correctly', () => {
|
||||||
|
initAnalyticsWriter(true, testDir);
|
||||||
|
|
||||||
|
const event: ReviewFindingEvent = {
|
||||||
|
type: 'review_finding',
|
||||||
|
findingId: 'f-001',
|
||||||
|
status: 'new',
|
||||||
|
ruleId: 'no-any',
|
||||||
|
severity: 'error',
|
||||||
|
decision: 'reject',
|
||||||
|
file: 'src/index.ts',
|
||||||
|
line: 10,
|
||||||
|
iteration: 1,
|
||||||
|
runId: 'run-1',
|
||||||
|
timestamp: '2026-03-01T08:00:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
writeAnalyticsEvent(event);
|
||||||
|
|
||||||
|
const filePath = join(testDir, '2026-03-01.jsonl');
|
||||||
|
const content = readFileSync(filePath, 'utf-8').trim();
|
||||||
|
const parsed = JSON.parse(content) as ReviewFindingEvent;
|
||||||
|
expect(parsed.type).toBe('review_finding');
|
||||||
|
expect(parsed.findingId).toBe('f-001');
|
||||||
|
expect(parsed.ruleId).toBe('no-any');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('directory creation', () => {
|
||||||
|
it('should create events directory when enabled and dir does not exist', () => {
|
||||||
|
const nestedDir = join(testDir, 'nested', 'analytics', 'events');
|
||||||
|
expect(existsSync(nestedDir)).toBe(false);
|
||||||
|
|
||||||
|
initAnalyticsWriter(true, nestedDir);
|
||||||
|
|
||||||
|
expect(existsSync(nestedDir)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not create directory when disabled', () => {
|
||||||
|
const nestedDir = join(testDir, 'disabled-dir', 'events');
|
||||||
|
initAnalyticsWriter(false, nestedDir);
|
||||||
|
|
||||||
|
expect(existsSync(nestedDir)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('resetInstance', () => {
|
||||||
|
it('should reset to disabled state', () => {
|
||||||
|
initAnalyticsWriter(true, testDir);
|
||||||
|
expect(isAnalyticsEnabled()).toBe(true);
|
||||||
|
|
||||||
|
resetAnalyticsWriter();
|
||||||
|
expect(isAnalyticsEnabled()).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@ -17,8 +17,17 @@ import {
|
|||||||
|
|
||||||
// Mock external dependencies to isolate unit tests
|
// Mock external dependencies to isolate unit tests
|
||||||
vi.mock('../infra/config/global/globalConfig.js', () => ({
|
vi.mock('../infra/config/global/globalConfig.js', () => ({
|
||||||
getLanguage: () => 'en',
|
loadGlobalConfig: () => ({}),
|
||||||
getBuiltinPiecesEnabled: () => true,
|
}));
|
||||||
|
|
||||||
|
vi.mock('../infra/config/loadConfig.js', () => ({
|
||||||
|
loadConfig: () => ({
|
||||||
|
global: {
|
||||||
|
language: 'en',
|
||||||
|
enableBuiltinPieces: true,
|
||||||
|
},
|
||||||
|
project: {},
|
||||||
|
}),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
const mockLogError = vi.fn();
|
const mockLogError = vi.fn();
|
||||||
|
|||||||
@ -15,7 +15,6 @@ vi.mock('../shared/ui/index.js', () => ({
|
|||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../shared/prompt/index.js', () => ({
|
vi.mock('../shared/prompt/index.js', () => ({
|
||||||
confirm: vi.fn(() => true),
|
|
||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../shared/utils/index.js', async (importOriginal) => ({
|
vi.mock('../shared/utils/index.js', async (importOriginal) => ({
|
||||||
@ -51,7 +50,6 @@ vi.mock('../features/pipeline/index.js', () => ({
|
|||||||
vi.mock('../features/interactive/index.js', () => ({
|
vi.mock('../features/interactive/index.js', () => ({
|
||||||
interactiveMode: vi.fn(),
|
interactiveMode: vi.fn(),
|
||||||
selectInteractiveMode: vi.fn(() => 'assistant'),
|
selectInteractiveMode: vi.fn(() => 'assistant'),
|
||||||
selectRecentSession: vi.fn(() => null),
|
|
||||||
passthroughMode: vi.fn(),
|
passthroughMode: vi.fn(),
|
||||||
quietMode: vi.fn(),
|
quietMode: vi.fn(),
|
||||||
personaMode: vi.fn(),
|
personaMode: vi.fn(),
|
||||||
@ -76,7 +74,9 @@ vi.mock('../infra/task/index.js', () => ({
|
|||||||
|
|
||||||
vi.mock('../infra/config/index.js', () => ({
|
vi.mock('../infra/config/index.js', () => ({
|
||||||
getPieceDescription: vi.fn(() => ({ name: 'default', description: 'test piece', pieceStructure: '', movementPreviews: [] })),
|
getPieceDescription: vi.fn(() => ({ name: 'default', description: 'test piece', pieceStructure: '', movementPreviews: [] })),
|
||||||
loadGlobalConfig: vi.fn(() => ({ interactivePreviewMovements: 3 })),
|
resolveConfigValue: vi.fn((_: string, key: string) => (key === 'piece' ? 'default' : false)),
|
||||||
|
resolveConfigValues: vi.fn(() => ({ language: 'en', interactivePreviewMovements: 3, provider: 'claude' })),
|
||||||
|
loadPersonaSessions: vi.fn(() => ({})),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../shared/constants.js', () => ({
|
vi.mock('../shared/constants.js', () => ({
|
||||||
@ -106,11 +106,11 @@ vi.mock('../app/cli/helpers.js', () => ({
|
|||||||
|
|
||||||
import { checkGhCli, fetchIssue, formatIssueAsTask, parseIssueNumbers } from '../infra/github/issue.js';
|
import { checkGhCli, fetchIssue, formatIssueAsTask, parseIssueNumbers } from '../infra/github/issue.js';
|
||||||
import { selectAndExecuteTask, determinePiece, createIssueFromTask, saveTaskFromInteractive } from '../features/tasks/index.js';
|
import { selectAndExecuteTask, determinePiece, createIssueFromTask, saveTaskFromInteractive } from '../features/tasks/index.js';
|
||||||
import { interactiveMode, selectRecentSession } from '../features/interactive/index.js';
|
import { interactiveMode } from '../features/interactive/index.js';
|
||||||
import { loadGlobalConfig } from '../infra/config/index.js';
|
import { resolveConfigValues, loadPersonaSessions } from '../infra/config/index.js';
|
||||||
import { confirm } from '../shared/prompt/index.js';
|
|
||||||
import { isDirectTask } from '../app/cli/helpers.js';
|
import { isDirectTask } from '../app/cli/helpers.js';
|
||||||
import { executeDefaultAction } from '../app/cli/routing.js';
|
import { executeDefaultAction } from '../app/cli/routing.js';
|
||||||
|
import { info } from '../shared/ui/index.js';
|
||||||
import type { GitHubIssue } from '../infra/github/types.js';
|
import type { GitHubIssue } from '../infra/github/types.js';
|
||||||
|
|
||||||
const mockCheckGhCli = vi.mocked(checkGhCli);
|
const mockCheckGhCli = vi.mocked(checkGhCli);
|
||||||
@ -122,10 +122,10 @@ const mockDeterminePiece = vi.mocked(determinePiece);
|
|||||||
const mockCreateIssueFromTask = vi.mocked(createIssueFromTask);
|
const mockCreateIssueFromTask = vi.mocked(createIssueFromTask);
|
||||||
const mockSaveTaskFromInteractive = vi.mocked(saveTaskFromInteractive);
|
const mockSaveTaskFromInteractive = vi.mocked(saveTaskFromInteractive);
|
||||||
const mockInteractiveMode = vi.mocked(interactiveMode);
|
const mockInteractiveMode = vi.mocked(interactiveMode);
|
||||||
const mockSelectRecentSession = vi.mocked(selectRecentSession);
|
const mockLoadPersonaSessions = vi.mocked(loadPersonaSessions);
|
||||||
const mockLoadGlobalConfig = vi.mocked(loadGlobalConfig);
|
const mockResolveConfigValues = vi.mocked(resolveConfigValues);
|
||||||
const mockConfirm = vi.mocked(confirm);
|
|
||||||
const mockIsDirectTask = vi.mocked(isDirectTask);
|
const mockIsDirectTask = vi.mocked(isDirectTask);
|
||||||
|
const mockInfo = vi.mocked(info);
|
||||||
const mockTaskRunnerListAllTaskItems = vi.mocked(mockListAllTaskItems);
|
const mockTaskRunnerListAllTaskItems = vi.mocked(mockListAllTaskItems);
|
||||||
|
|
||||||
function createMockIssue(number: number): GitHubIssue {
|
function createMockIssue(number: number): GitHubIssue {
|
||||||
@ -147,7 +147,6 @@ beforeEach(() => {
|
|||||||
// Default setup
|
// Default setup
|
||||||
mockDeterminePiece.mockResolvedValue('default');
|
mockDeterminePiece.mockResolvedValue('default');
|
||||||
mockInteractiveMode.mockResolvedValue({ action: 'execute', task: 'summarized task' });
|
mockInteractiveMode.mockResolvedValue({ action: 'execute', task: 'summarized task' });
|
||||||
mockConfirm.mockResolvedValue(true);
|
|
||||||
mockIsDirectTask.mockReturnValue(false);
|
mockIsDirectTask.mockReturnValue(false);
|
||||||
mockParseIssueNumbers.mockReturnValue([]);
|
mockParseIssueNumbers.mockReturnValue([]);
|
||||||
mockTaskRunnerListAllTaskItems.mockReturnValue([]);
|
mockTaskRunnerListAllTaskItems.mockReturnValue([]);
|
||||||
@ -480,41 +479,43 @@ describe('Issue resolution in routing', () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('session selection with provider=claude', () => {
|
describe('--continue option', () => {
|
||||||
it('should pass selected session ID to interactiveMode when provider is claude', async () => {
|
it('should load saved session and pass to interactiveMode when --continue is specified', async () => {
|
||||||
// Given
|
// Given
|
||||||
mockLoadGlobalConfig.mockReturnValue({ interactivePreviewMovements: 3, provider: 'claude' });
|
mockOpts.continue = true;
|
||||||
mockConfirm.mockResolvedValue(true);
|
mockResolveConfigValues.mockReturnValue({ language: 'en', interactivePreviewMovements: 3, provider: 'claude' });
|
||||||
mockSelectRecentSession.mockResolvedValue('session-xyz');
|
mockLoadPersonaSessions.mockReturnValue({ interactive: 'saved-session-123' });
|
||||||
|
|
||||||
// When
|
// When
|
||||||
await executeDefaultAction();
|
await executeDefaultAction();
|
||||||
|
|
||||||
// Then: selectRecentSession should be called
|
// Then: loadPersonaSessions should be called with provider
|
||||||
expect(mockSelectRecentSession).toHaveBeenCalledWith('/test/cwd', 'en');
|
expect(mockLoadPersonaSessions).toHaveBeenCalledWith('/test/cwd', 'claude');
|
||||||
|
|
||||||
// Then: interactiveMode should receive the session ID as 4th argument
|
// Then: interactiveMode should receive the saved session ID
|
||||||
expect(mockInteractiveMode).toHaveBeenCalledWith(
|
expect(mockInteractiveMode).toHaveBeenCalledWith(
|
||||||
'/test/cwd',
|
'/test/cwd',
|
||||||
undefined,
|
undefined,
|
||||||
expect.anything(),
|
expect.anything(),
|
||||||
'session-xyz',
|
'saved-session-123',
|
||||||
);
|
);
|
||||||
|
|
||||||
expect(mockConfirm).toHaveBeenCalledWith('Choose a previous session?', false);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should not call selectRecentSession when user selects no in confirmation', async () => {
|
it('should show message and start new session when --continue has no saved session', async () => {
|
||||||
// Given
|
// Given
|
||||||
mockLoadGlobalConfig.mockReturnValue({ interactivePreviewMovements: 3, provider: 'claude' });
|
mockOpts.continue = true;
|
||||||
mockConfirm.mockResolvedValue(false);
|
mockResolveConfigValues.mockReturnValue({ language: 'en', interactivePreviewMovements: 3, provider: 'claude' });
|
||||||
|
mockLoadPersonaSessions.mockReturnValue({});
|
||||||
|
|
||||||
// When
|
// When
|
||||||
await executeDefaultAction();
|
await executeDefaultAction();
|
||||||
|
|
||||||
// Then
|
// Then: info message about no session
|
||||||
expect(mockConfirm).toHaveBeenCalledWith('Choose a previous session?', false);
|
expect(mockInfo).toHaveBeenCalledWith(
|
||||||
expect(mockSelectRecentSession).not.toHaveBeenCalled();
|
'No previous assistant session found. Starting a new session.',
|
||||||
|
);
|
||||||
|
|
||||||
|
// Then: interactiveMode should be called with undefined session ID
|
||||||
expect(mockInteractiveMode).toHaveBeenCalledWith(
|
expect(mockInteractiveMode).toHaveBeenCalledWith(
|
||||||
'/test/cwd',
|
'/test/cwd',
|
||||||
undefined,
|
undefined,
|
||||||
@ -523,15 +524,12 @@ describe('Issue resolution in routing', () => {
|
|||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should not call selectRecentSession when provider is not claude', async () => {
|
it('should not load persona sessions when --continue is not specified', async () => {
|
||||||
// Given
|
|
||||||
mockLoadGlobalConfig.mockReturnValue({ interactivePreviewMovements: 3, provider: 'openai' });
|
|
||||||
|
|
||||||
// When
|
// When
|
||||||
await executeDefaultAction();
|
await executeDefaultAction();
|
||||||
|
|
||||||
// Then: selectRecentSession should NOT be called
|
// Then: loadPersonaSessions should NOT be called
|
||||||
expect(mockSelectRecentSession).not.toHaveBeenCalled();
|
expect(mockLoadPersonaSessions).not.toHaveBeenCalled();
|
||||||
|
|
||||||
// Then: interactiveMode should be called with undefined session ID
|
// Then: interactiveMode should be called with undefined session ID
|
||||||
expect(mockInteractiveMode).toHaveBeenCalledWith(
|
expect(mockInteractiveMode).toHaveBeenCalledWith(
|
||||||
@ -543,14 +541,11 @@ describe('Issue resolution in routing', () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('run session reference', () => {
|
describe('default assistant mode (no --continue)', () => {
|
||||||
it('should not prompt run session reference in default interactive flow', async () => {
|
it('should start new session without loading saved sessions', async () => {
|
||||||
await executeDefaultAction();
|
await executeDefaultAction();
|
||||||
|
|
||||||
expect(mockConfirm).not.toHaveBeenCalledWith(
|
expect(mockLoadPersonaSessions).not.toHaveBeenCalled();
|
||||||
"Reference a previous run's results?",
|
|
||||||
false,
|
|
||||||
);
|
|
||||||
expect(mockInteractiveMode).toHaveBeenCalledWith(
|
expect(mockInteractiveMode).toHaveBeenCalledWith(
|
||||||
'/test/cwd',
|
'/test/cwd',
|
||||||
undefined,
|
undefined,
|
||||||
|
|||||||
@ -66,7 +66,6 @@ vi.mock('../infra/config/index.js', () => ({
|
|||||||
|
|
||||||
vi.mock('../infra/config/paths.js', () => ({
|
vi.mock('../infra/config/paths.js', () => ({
|
||||||
clearPersonaSessions: vi.fn(),
|
clearPersonaSessions: vi.fn(),
|
||||||
getCurrentPiece: vi.fn(() => 'default'),
|
|
||||||
isVerboseMode: vi.fn(() => false),
|
isVerboseMode: vi.fn(() => false),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
|||||||
53
src/__tests__/config-env-overrides.test.ts
Normal file
53
src/__tests__/config-env-overrides.test.ts
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
import { afterEach, describe, expect, it } from 'vitest';
|
||||||
|
import {
|
||||||
|
applyGlobalConfigEnvOverrides,
|
||||||
|
applyProjectConfigEnvOverrides,
|
||||||
|
envVarNameFromPath,
|
||||||
|
} from '../infra/config/env/config-env-overrides.js';
|
||||||
|
|
||||||
|
describe('config env overrides', () => {
|
||||||
|
const envBackup = { ...process.env };
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
for (const key of Object.keys(process.env)) {
|
||||||
|
if (!(key in envBackup)) {
|
||||||
|
delete process.env[key];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const [key, value] of Object.entries(envBackup)) {
|
||||||
|
process.env[key] = value;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should convert dotted and camelCase paths to TAKT env variable names', () => {
|
||||||
|
expect(envVarNameFromPath('verbose')).toBe('TAKT_VERBOSE');
|
||||||
|
expect(envVarNameFromPath('provider_options.claude.sandbox.allow_unsandboxed_commands'))
|
||||||
|
.toBe('TAKT_PROVIDER_OPTIONS_CLAUDE_SANDBOX_ALLOW_UNSANDBOXED_COMMANDS');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should apply global env overrides from generated env names', () => {
|
||||||
|
process.env.TAKT_LOG_LEVEL = 'debug';
|
||||||
|
process.env.TAKT_PROVIDER_OPTIONS_CLAUDE_SANDBOX_ALLOW_UNSANDBOXED_COMMANDS = 'true';
|
||||||
|
|
||||||
|
const raw: Record<string, unknown> = {};
|
||||||
|
applyGlobalConfigEnvOverrides(raw);
|
||||||
|
|
||||||
|
expect(raw.log_level).toBe('debug');
|
||||||
|
expect(raw.provider_options).toEqual({
|
||||||
|
claude: {
|
||||||
|
sandbox: {
|
||||||
|
allow_unsandboxed_commands: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should apply project env overrides from generated env names', () => {
|
||||||
|
process.env.TAKT_VERBOSE = 'true';
|
||||||
|
|
||||||
|
const raw: Record<string, unknown> = {};
|
||||||
|
applyProjectConfigEnvOverrides(raw);
|
||||||
|
|
||||||
|
expect(raw.verbose).toBe(true);
|
||||||
|
});
|
||||||
|
});
|
||||||
@ -1,5 +1,5 @@
|
|||||||
/**
|
/**
|
||||||
* Tests for takt config functions
|
* Tests for config functions
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
@ -13,7 +13,6 @@ import {
|
|||||||
loadPiece,
|
loadPiece,
|
||||||
listPieces,
|
listPieces,
|
||||||
loadPersonaPromptFromPath,
|
loadPersonaPromptFromPath,
|
||||||
getCurrentPiece,
|
|
||||||
setCurrentPiece,
|
setCurrentPiece,
|
||||||
getProjectConfigDir,
|
getProjectConfigDir,
|
||||||
getBuiltinPersonasDir,
|
getBuiltinPersonasDir,
|
||||||
@ -35,17 +34,19 @@ import {
|
|||||||
updateWorktreeSession,
|
updateWorktreeSession,
|
||||||
getLanguage,
|
getLanguage,
|
||||||
loadProjectConfig,
|
loadProjectConfig,
|
||||||
|
isVerboseMode,
|
||||||
|
invalidateGlobalConfigCache,
|
||||||
} from '../infra/config/index.js';
|
} from '../infra/config/index.js';
|
||||||
|
|
||||||
describe('getBuiltinPiece', () => {
|
describe('getBuiltinPiece', () => {
|
||||||
it('should return builtin piece when it exists in resources', () => {
|
it('should return builtin piece when it exists in resources', () => {
|
||||||
const piece = getBuiltinPiece('default');
|
const piece = getBuiltinPiece('default', process.cwd());
|
||||||
expect(piece).not.toBeNull();
|
expect(piece).not.toBeNull();
|
||||||
expect(piece!.name).toBe('default');
|
expect(piece!.name).toBe('default');
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should resolve builtin instruction_template without projectCwd', () => {
|
it('should resolve builtin instruction_template without projectCwd', () => {
|
||||||
const piece = getBuiltinPiece('default');
|
const piece = getBuiltinPiece('default', process.cwd());
|
||||||
expect(piece).not.toBeNull();
|
expect(piece).not.toBeNull();
|
||||||
|
|
||||||
const planMovement = piece!.movements.find((movement) => movement.name === 'plan');
|
const planMovement = piece!.movements.find((movement) => movement.name === 'plan');
|
||||||
@ -54,15 +55,15 @@ describe('getBuiltinPiece', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should return null for non-existent piece names', () => {
|
it('should return null for non-existent piece names', () => {
|
||||||
expect(getBuiltinPiece('nonexistent-piece')).toBeNull();
|
expect(getBuiltinPiece('nonexistent-piece', process.cwd())).toBeNull();
|
||||||
expect(getBuiltinPiece('unknown')).toBeNull();
|
expect(getBuiltinPiece('unknown', process.cwd())).toBeNull();
|
||||||
expect(getBuiltinPiece('')).toBeNull();
|
expect(getBuiltinPiece('', process.cwd())).toBeNull();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('default piece parallel reviewers movement', () => {
|
describe('default piece parallel reviewers movement', () => {
|
||||||
it('should have a reviewers movement with parallel sub-movements', () => {
|
it('should have a reviewers movement with parallel sub-movements', () => {
|
||||||
const piece = getBuiltinPiece('default');
|
const piece = getBuiltinPiece('default', process.cwd());
|
||||||
expect(piece).not.toBeNull();
|
expect(piece).not.toBeNull();
|
||||||
|
|
||||||
const reviewersMovement = piece!.movements.find((s) => s.name === 'reviewers');
|
const reviewersMovement = piece!.movements.find((s) => s.name === 'reviewers');
|
||||||
@ -72,7 +73,7 @@ describe('default piece parallel reviewers movement', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should have arch-review and qa-review as parallel sub-movements', () => {
|
it('should have arch-review and qa-review as parallel sub-movements', () => {
|
||||||
const piece = getBuiltinPiece('default');
|
const piece = getBuiltinPiece('default', process.cwd());
|
||||||
const reviewersMovement = piece!.movements.find((s) => s.name === 'reviewers')!;
|
const reviewersMovement = piece!.movements.find((s) => s.name === 'reviewers')!;
|
||||||
const subMovementNames = reviewersMovement.parallel!.map((s) => s.name);
|
const subMovementNames = reviewersMovement.parallel!.map((s) => s.name);
|
||||||
|
|
||||||
@ -81,7 +82,7 @@ describe('default piece parallel reviewers movement', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should have aggregate conditions on the reviewers parent movement', () => {
|
it('should have aggregate conditions on the reviewers parent movement', () => {
|
||||||
const piece = getBuiltinPiece('default');
|
const piece = getBuiltinPiece('default', process.cwd());
|
||||||
const reviewersMovement = piece!.movements.find((s) => s.name === 'reviewers')!;
|
const reviewersMovement = piece!.movements.find((s) => s.name === 'reviewers')!;
|
||||||
|
|
||||||
expect(reviewersMovement.rules).toBeDefined();
|
expect(reviewersMovement.rules).toBeDefined();
|
||||||
@ -99,7 +100,7 @@ describe('default piece parallel reviewers movement', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should have matching conditions on sub-movements for aggregation', () => {
|
it('should have matching conditions on sub-movements for aggregation', () => {
|
||||||
const piece = getBuiltinPiece('default');
|
const piece = getBuiltinPiece('default', process.cwd());
|
||||||
const reviewersMovement = piece!.movements.find((s) => s.name === 'reviewers')!;
|
const reviewersMovement = piece!.movements.find((s) => s.name === 'reviewers')!;
|
||||||
|
|
||||||
for (const subMovement of reviewersMovement.parallel!) {
|
for (const subMovement of reviewersMovement.parallel!) {
|
||||||
@ -111,7 +112,7 @@ describe('default piece parallel reviewers movement', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should have ai_review transitioning to reviewers movement', () => {
|
it('should have ai_review transitioning to reviewers movement', () => {
|
||||||
const piece = getBuiltinPiece('default');
|
const piece = getBuiltinPiece('default', process.cwd());
|
||||||
const aiReviewMovement = piece!.movements.find((s) => s.name === 'ai_review')!;
|
const aiReviewMovement = piece!.movements.find((s) => s.name === 'ai_review')!;
|
||||||
|
|
||||||
const approveRule = aiReviewMovement.rules!.find((r) => r.next === 'reviewers');
|
const approveRule = aiReviewMovement.rules!.find((r) => r.next === 'reviewers');
|
||||||
@ -119,7 +120,7 @@ describe('default piece parallel reviewers movement', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should have ai_fix transitioning to ai_review movement', () => {
|
it('should have ai_fix transitioning to ai_review movement', () => {
|
||||||
const piece = getBuiltinPiece('default');
|
const piece = getBuiltinPiece('default', process.cwd());
|
||||||
const aiFixMovement = piece!.movements.find((s) => s.name === 'ai_fix')!;
|
const aiFixMovement = piece!.movements.find((s) => s.name === 'ai_fix')!;
|
||||||
|
|
||||||
const fixedRule = aiFixMovement.rules!.find((r) => r.next === 'ai_review');
|
const fixedRule = aiFixMovement.rules!.find((r) => r.next === 'ai_review');
|
||||||
@ -127,7 +128,7 @@ describe('default piece parallel reviewers movement', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should have fix movement transitioning back to reviewers', () => {
|
it('should have fix movement transitioning back to reviewers', () => {
|
||||||
const piece = getBuiltinPiece('default');
|
const piece = getBuiltinPiece('default', process.cwd());
|
||||||
const fixMovement = piece!.movements.find((s) => s.name === 'fix')!;
|
const fixMovement = piece!.movements.find((s) => s.name === 'fix')!;
|
||||||
|
|
||||||
const fixedRule = fixMovement.rules!.find((r) => r.next === 'reviewers');
|
const fixedRule = fixMovement.rules!.find((r) => r.next === 'reviewers');
|
||||||
@ -135,7 +136,7 @@ describe('default piece parallel reviewers movement', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should not have old separate review/security_review/improve movements', () => {
|
it('should not have old separate review/security_review/improve movements', () => {
|
||||||
const piece = getBuiltinPiece('default');
|
const piece = getBuiltinPiece('default', process.cwd());
|
||||||
const movementNames = piece!.movements.map((s) => s.name);
|
const movementNames = piece!.movements.map((s) => s.name);
|
||||||
|
|
||||||
expect(movementNames).not.toContain('review');
|
expect(movementNames).not.toContain('review');
|
||||||
@ -145,7 +146,7 @@ describe('default piece parallel reviewers movement', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should have sub-movements with correct agents', () => {
|
it('should have sub-movements with correct agents', () => {
|
||||||
const piece = getBuiltinPiece('default');
|
const piece = getBuiltinPiece('default', process.cwd());
|
||||||
const reviewersMovement = piece!.movements.find((s) => s.name === 'reviewers')!;
|
const reviewersMovement = piece!.movements.find((s) => s.name === 'reviewers')!;
|
||||||
|
|
||||||
const archReview = reviewersMovement.parallel!.find((s) => s.name === 'arch-review')!;
|
const archReview = reviewersMovement.parallel!.find((s) => s.name === 'arch-review')!;
|
||||||
@ -156,7 +157,7 @@ describe('default piece parallel reviewers movement', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should have output contracts configured on sub-movements', () => {
|
it('should have output contracts configured on sub-movements', () => {
|
||||||
const piece = getBuiltinPiece('default');
|
const piece = getBuiltinPiece('default', process.cwd());
|
||||||
const reviewersMovement = piece!.movements.find((s) => s.name === 'reviewers')!;
|
const reviewersMovement = piece!.movements.find((s) => s.name === 'reviewers')!;
|
||||||
|
|
||||||
const archReview = reviewersMovement.parallel!.find((s) => s.name === 'arch-review')!;
|
const archReview = reviewersMovement.parallel!.find((s) => s.name === 'arch-review')!;
|
||||||
@ -288,54 +289,13 @@ describe('loadPersonaPromptFromPath (builtin paths)', () => {
|
|||||||
const personaPath = join(builtinPersonasDir, 'coder.md');
|
const personaPath = join(builtinPersonasDir, 'coder.md');
|
||||||
|
|
||||||
if (existsSync(personaPath)) {
|
if (existsSync(personaPath)) {
|
||||||
const prompt = loadPersonaPromptFromPath(personaPath);
|
const prompt = loadPersonaPromptFromPath(personaPath, process.cwd());
|
||||||
expect(prompt).toBeTruthy();
|
expect(prompt).toBeTruthy();
|
||||||
expect(typeof prompt).toBe('string');
|
expect(typeof prompt).toBe('string');
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('getCurrentPiece', () => {
|
|
||||||
let testDir: string;
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
testDir = join(tmpdir(), `takt-test-${randomUUID()}`);
|
|
||||||
mkdirSync(testDir, { recursive: true });
|
|
||||||
});
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
if (existsSync(testDir)) {
|
|
||||||
rmSync(testDir, { recursive: true, force: true });
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return default when no config exists', () => {
|
|
||||||
const piece = getCurrentPiece(testDir);
|
|
||||||
|
|
||||||
expect(piece).toBe('default');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return saved piece name from config.yaml', () => {
|
|
||||||
const configDir = getProjectConfigDir(testDir);
|
|
||||||
mkdirSync(configDir, { recursive: true });
|
|
||||||
writeFileSync(join(configDir, 'config.yaml'), 'piece: default\n');
|
|
||||||
|
|
||||||
const piece = getCurrentPiece(testDir);
|
|
||||||
|
|
||||||
expect(piece).toBe('default');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return default for empty config', () => {
|
|
||||||
const configDir = getProjectConfigDir(testDir);
|
|
||||||
mkdirSync(configDir, { recursive: true });
|
|
||||||
writeFileSync(join(configDir, 'config.yaml'), '');
|
|
||||||
|
|
||||||
const piece = getCurrentPiece(testDir);
|
|
||||||
|
|
||||||
expect(piece).toBe('default');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('setCurrentPiece', () => {
|
describe('setCurrentPiece', () => {
|
||||||
let testDir: string;
|
let testDir: string;
|
||||||
|
|
||||||
@ -371,12 +331,160 @@ describe('setCurrentPiece', () => {
|
|||||||
setCurrentPiece(testDir, 'first');
|
setCurrentPiece(testDir, 'first');
|
||||||
setCurrentPiece(testDir, 'second');
|
setCurrentPiece(testDir, 'second');
|
||||||
|
|
||||||
const piece = getCurrentPiece(testDir);
|
const piece = loadProjectConfig(testDir).piece;
|
||||||
|
|
||||||
expect(piece).toBe('second');
|
expect(piece).toBe('second');
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('loadProjectConfig provider_options', () => {
|
||||||
|
let testDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
testDir = join(tmpdir(), `takt-test-${randomUUID()}`);
|
||||||
|
mkdirSync(testDir, { recursive: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
if (existsSync(testDir)) {
|
||||||
|
rmSync(testDir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should normalize provider_options into providerOptions (camelCase)', () => {
|
||||||
|
const projectConfigDir = getProjectConfigDir(testDir);
|
||||||
|
mkdirSync(projectConfigDir, { recursive: true });
|
||||||
|
writeFileSync(join(projectConfigDir, 'config.yaml'), [
|
||||||
|
'piece: default',
|
||||||
|
'provider_options:',
|
||||||
|
' codex:',
|
||||||
|
' network_access: true',
|
||||||
|
' claude:',
|
||||||
|
' sandbox:',
|
||||||
|
' allow_unsandboxed_commands: true',
|
||||||
|
].join('\n'));
|
||||||
|
|
||||||
|
const config = loadProjectConfig(testDir);
|
||||||
|
|
||||||
|
expect(config.providerOptions).toEqual({
|
||||||
|
codex: { networkAccess: true },
|
||||||
|
claude: { sandbox: { allowUnsandboxedCommands: true } },
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should apply TAKT_PROVIDER_OPTIONS_* env overrides for project config', () => {
|
||||||
|
const original = process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS;
|
||||||
|
process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS = 'false';
|
||||||
|
|
||||||
|
const config = loadProjectConfig(testDir);
|
||||||
|
expect(config.providerOptions).toEqual({
|
||||||
|
codex: { networkAccess: false },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (original === undefined) {
|
||||||
|
delete process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS;
|
||||||
|
} else {
|
||||||
|
process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS = original;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('isVerboseMode', () => {
|
||||||
|
let testDir: string;
|
||||||
|
let originalTaktConfigDir: string | undefined;
|
||||||
|
let originalTaktVerbose: string | undefined;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
testDir = join(tmpdir(), `takt-test-${randomUUID()}`);
|
||||||
|
mkdirSync(testDir, { recursive: true });
|
||||||
|
originalTaktConfigDir = process.env.TAKT_CONFIG_DIR;
|
||||||
|
originalTaktVerbose = process.env.TAKT_VERBOSE;
|
||||||
|
process.env.TAKT_CONFIG_DIR = join(testDir, 'global-takt');
|
||||||
|
delete process.env.TAKT_VERBOSE;
|
||||||
|
invalidateGlobalConfigCache();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
if (originalTaktConfigDir === undefined) {
|
||||||
|
delete process.env.TAKT_CONFIG_DIR;
|
||||||
|
} else {
|
||||||
|
process.env.TAKT_CONFIG_DIR = originalTaktConfigDir;
|
||||||
|
}
|
||||||
|
if (originalTaktVerbose === undefined) {
|
||||||
|
delete process.env.TAKT_VERBOSE;
|
||||||
|
} else {
|
||||||
|
process.env.TAKT_VERBOSE = originalTaktVerbose;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (existsSync(testDir)) {
|
||||||
|
rmSync(testDir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return project verbose when project config has verbose: true', () => {
|
||||||
|
const projectConfigDir = getProjectConfigDir(testDir);
|
||||||
|
mkdirSync(projectConfigDir, { recursive: true });
|
||||||
|
writeFileSync(join(projectConfigDir, 'config.yaml'), 'piece: default\nverbose: true\n');
|
||||||
|
|
||||||
|
const globalConfigDir = process.env.TAKT_CONFIG_DIR!;
|
||||||
|
mkdirSync(globalConfigDir, { recursive: true });
|
||||||
|
writeFileSync(join(globalConfigDir, 'config.yaml'), 'verbose: false\n');
|
||||||
|
|
||||||
|
expect(isVerboseMode(testDir)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return project verbose when project config has verbose: false', () => {
|
||||||
|
const projectConfigDir = getProjectConfigDir(testDir);
|
||||||
|
mkdirSync(projectConfigDir, { recursive: true });
|
||||||
|
writeFileSync(join(projectConfigDir, 'config.yaml'), 'piece: default\nverbose: false\n');
|
||||||
|
|
||||||
|
const globalConfigDir = process.env.TAKT_CONFIG_DIR!;
|
||||||
|
mkdirSync(globalConfigDir, { recursive: true });
|
||||||
|
writeFileSync(join(globalConfigDir, 'config.yaml'), 'verbose: true\n');
|
||||||
|
|
||||||
|
expect(isVerboseMode(testDir)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should fallback to global verbose when project verbose is not set', () => {
|
||||||
|
const projectConfigDir = getProjectConfigDir(testDir);
|
||||||
|
mkdirSync(projectConfigDir, { recursive: true });
|
||||||
|
writeFileSync(join(projectConfigDir, 'config.yaml'), 'piece: default\n');
|
||||||
|
|
||||||
|
const globalConfigDir = process.env.TAKT_CONFIG_DIR!;
|
||||||
|
mkdirSync(globalConfigDir, { recursive: true });
|
||||||
|
writeFileSync(join(globalConfigDir, 'config.yaml'), 'verbose: true\n');
|
||||||
|
|
||||||
|
expect(isVerboseMode(testDir)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false when neither project nor global verbose is set', () => {
|
||||||
|
expect(isVerboseMode(testDir)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should prioritize TAKT_VERBOSE over project and global config', () => {
|
||||||
|
const projectConfigDir = getProjectConfigDir(testDir);
|
||||||
|
mkdirSync(projectConfigDir, { recursive: true });
|
||||||
|
writeFileSync(join(projectConfigDir, 'config.yaml'), 'piece: default\nverbose: false\n');
|
||||||
|
|
||||||
|
const globalConfigDir = process.env.TAKT_CONFIG_DIR!;
|
||||||
|
mkdirSync(globalConfigDir, { recursive: true });
|
||||||
|
writeFileSync(join(globalConfigDir, 'config.yaml'), 'verbose: false\n');
|
||||||
|
|
||||||
|
process.env.TAKT_VERBOSE = 'true';
|
||||||
|
expect(isVerboseMode(testDir)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw on TAKT_VERBOSE=0', () => {
|
||||||
|
process.env.TAKT_VERBOSE = '0';
|
||||||
|
expect(() => isVerboseMode(testDir)).toThrow('TAKT_VERBOSE must be one of: true, false');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw on invalid TAKT_VERBOSE value', () => {
|
||||||
|
process.env.TAKT_VERBOSE = 'yes';
|
||||||
|
expect(() => isVerboseMode(testDir)).toThrow('TAKT_VERBOSE must be one of: true, false');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
describe('loadInputHistory', () => {
|
describe('loadInputHistory', () => {
|
||||||
let testDir: string;
|
let testDir: string;
|
||||||
|
|
||||||
|
|||||||
215
src/__tests__/conversationLoop-resume.test.ts
Normal file
215
src/__tests__/conversationLoop-resume.test.ts
Normal file
@ -0,0 +1,215 @@
|
|||||||
|
/**
|
||||||
|
* Tests for /resume command and initializeSession changes.
|
||||||
|
*
|
||||||
|
* Verifies:
|
||||||
|
* - initializeSession returns sessionId: undefined (no implicit auto-load)
|
||||||
|
* - /resume command calls selectRecentSession and updates sessionId
|
||||||
|
* - /resume with cancel does not change sessionId
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||||
|
import {
|
||||||
|
setupRawStdin,
|
||||||
|
restoreStdin,
|
||||||
|
toRawInputs,
|
||||||
|
createMockProvider,
|
||||||
|
createScenarioProvider,
|
||||||
|
type MockProviderCapture,
|
||||||
|
} from './helpers/stdinSimulator.js';
|
||||||
|
|
||||||
|
// --- Infrastructure mocks ---
|
||||||
|
|
||||||
|
vi.mock('../infra/config/global/globalConfig.js', () => ({
|
||||||
|
loadGlobalConfig: vi.fn(() => ({ provider: 'mock', language: 'en' })),
|
||||||
|
getBuiltinPiecesEnabled: vi.fn().mockReturnValue(true),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../infra/providers/index.js', () => ({
|
||||||
|
getProvider: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/utils/index.js', async (importOriginal) => ({
|
||||||
|
...(await importOriginal<Record<string, unknown>>()),
|
||||||
|
createLogger: () => ({ info: vi.fn(), debug: vi.fn(), error: vi.fn() }),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/context.js', () => ({
|
||||||
|
isQuietMode: vi.fn(() => false),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../infra/config/paths.js', async (importOriginal) => ({
|
||||||
|
...(await importOriginal<Record<string, unknown>>()),
|
||||||
|
loadPersonaSessions: vi.fn(() => ({})),
|
||||||
|
updatePersonaSession: vi.fn(),
|
||||||
|
getProjectConfigDir: vi.fn(() => '/tmp'),
|
||||||
|
loadSessionState: vi.fn(() => null),
|
||||||
|
clearSessionState: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/ui/index.js', () => ({
|
||||||
|
info: vi.fn(),
|
||||||
|
error: vi.fn(),
|
||||||
|
blankLine: vi.fn(),
|
||||||
|
StreamDisplay: vi.fn().mockImplementation(() => ({
|
||||||
|
createHandler: vi.fn(() => vi.fn()),
|
||||||
|
flush: vi.fn(),
|
||||||
|
})),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/prompt/index.js', () => ({
|
||||||
|
selectOption: vi.fn().mockResolvedValue('execute'),
|
||||||
|
}));
|
||||||
|
|
||||||
|
const mockSelectRecentSession = vi.fn<(cwd: string, lang: 'en' | 'ja') => Promise<string | null>>();
|
||||||
|
|
||||||
|
vi.mock('../features/interactive/sessionSelector.js', () => ({
|
||||||
|
selectRecentSession: (...args: [string, 'en' | 'ja']) => mockSelectRecentSession(...args),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/i18n/index.js', () => ({
|
||||||
|
getLabel: vi.fn((_key: string, _lang: string) => 'Mock label'),
|
||||||
|
getLabelObject: vi.fn(() => ({
|
||||||
|
intro: 'Intro',
|
||||||
|
resume: 'Resume',
|
||||||
|
noConversation: 'No conversation',
|
||||||
|
summarizeFailed: 'Summarize failed',
|
||||||
|
continuePrompt: 'Continue?',
|
||||||
|
proposed: 'Proposed:',
|
||||||
|
actionPrompt: 'What next?',
|
||||||
|
playNoTask: 'No task for /play',
|
||||||
|
cancelled: 'Cancelled',
|
||||||
|
actions: { execute: 'Execute', saveTask: 'Save', continue: 'Continue' },
|
||||||
|
})),
|
||||||
|
}));
|
||||||
|
|
||||||
|
// --- Imports (after mocks) ---
|
||||||
|
|
||||||
|
import { getProvider } from '../infra/providers/index.js';
|
||||||
|
import { selectOption } from '../shared/prompt/index.js';
|
||||||
|
import { info as logInfo } from '../shared/ui/index.js';
|
||||||
|
import { initializeSession, runConversationLoop, type SessionContext } from '../features/interactive/conversationLoop.js';
|
||||||
|
|
||||||
|
const mockGetProvider = vi.mocked(getProvider);
|
||||||
|
const mockSelectOption = vi.mocked(selectOption);
|
||||||
|
const mockLogInfo = vi.mocked(logInfo);
|
||||||
|
|
||||||
|
// --- Helpers ---
|
||||||
|
|
||||||
|
function setupProvider(responses: string[]): MockProviderCapture {
|
||||||
|
const { provider, capture } = createMockProvider(responses);
|
||||||
|
mockGetProvider.mockReturnValue(provider);
|
||||||
|
return capture;
|
||||||
|
}
|
||||||
|
|
||||||
|
function createSessionContext(overrides: Partial<SessionContext> = {}): SessionContext {
|
||||||
|
const { provider } = createMockProvider([]);
|
||||||
|
mockGetProvider.mockReturnValue(provider);
|
||||||
|
return {
|
||||||
|
provider: provider as SessionContext['provider'],
|
||||||
|
providerType: 'mock' as SessionContext['providerType'],
|
||||||
|
model: undefined,
|
||||||
|
lang: 'en',
|
||||||
|
personaName: 'interactive',
|
||||||
|
sessionId: undefined,
|
||||||
|
...overrides,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const defaultStrategy = {
|
||||||
|
systemPrompt: 'test system prompt',
|
||||||
|
allowedTools: ['Read'],
|
||||||
|
transformPrompt: (msg: string) => msg,
|
||||||
|
introMessage: 'Test intro',
|
||||||
|
};
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
mockSelectOption.mockResolvedValue('execute');
|
||||||
|
mockSelectRecentSession.mockResolvedValue(null);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
restoreStdin();
|
||||||
|
});
|
||||||
|
|
||||||
|
// =================================================================
|
||||||
|
// initializeSession: no implicit session auto-load
|
||||||
|
// =================================================================
|
||||||
|
describe('initializeSession', () => {
|
||||||
|
it('should return sessionId as undefined (no implicit auto-load)', () => {
|
||||||
|
const ctx = initializeSession('/test/cwd', 'interactive');
|
||||||
|
|
||||||
|
expect(ctx.sessionId).toBeUndefined();
|
||||||
|
expect(ctx.personaName).toBe('interactive');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// =================================================================
|
||||||
|
// /resume command
|
||||||
|
// =================================================================
|
||||||
|
describe('/resume command', () => {
|
||||||
|
it('should call selectRecentSession and update sessionId when session selected', async () => {
|
||||||
|
// Given: /resume → select session → /cancel
|
||||||
|
setupRawStdin(toRawInputs(['/resume', '/cancel']));
|
||||||
|
setupProvider([]);
|
||||||
|
mockSelectRecentSession.mockResolvedValue('selected-session-abc');
|
||||||
|
|
||||||
|
const ctx = createSessionContext();
|
||||||
|
|
||||||
|
// When
|
||||||
|
const result = await runConversationLoop('/test', ctx, defaultStrategy, undefined, undefined);
|
||||||
|
|
||||||
|
// Then: selectRecentSession called
|
||||||
|
expect(mockSelectRecentSession).toHaveBeenCalledWith('/test', 'en');
|
||||||
|
|
||||||
|
// Then: info about loaded session displayed
|
||||||
|
expect(mockLogInfo).toHaveBeenCalledWith('Mock label');
|
||||||
|
|
||||||
|
// Then: cancelled at the end
|
||||||
|
expect(result.action).toBe('cancel');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not change sessionId when user cancels session selection', async () => {
|
||||||
|
// Given: /resume → cancel selection → /cancel
|
||||||
|
setupRawStdin(toRawInputs(['/resume', '/cancel']));
|
||||||
|
setupProvider([]);
|
||||||
|
mockSelectRecentSession.mockResolvedValue(null);
|
||||||
|
|
||||||
|
const ctx = createSessionContext();
|
||||||
|
|
||||||
|
// When
|
||||||
|
const result = await runConversationLoop('/test', ctx, defaultStrategy, undefined, undefined);
|
||||||
|
|
||||||
|
// Then: selectRecentSession called but returned null
|
||||||
|
expect(mockSelectRecentSession).toHaveBeenCalledWith('/test', 'en');
|
||||||
|
|
||||||
|
// Then: cancelled
|
||||||
|
expect(result.action).toBe('cancel');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use resumed session for subsequent AI calls', async () => {
|
||||||
|
// Given: /resume → select session → send message → /cancel
|
||||||
|
setupRawStdin(toRawInputs(['/resume', 'hello world', '/cancel']));
|
||||||
|
mockSelectRecentSession.mockResolvedValue('resumed-session-xyz');
|
||||||
|
|
||||||
|
const { provider, capture } = createScenarioProvider([
|
||||||
|
{ content: 'AI response' },
|
||||||
|
]);
|
||||||
|
|
||||||
|
const ctx: SessionContext = {
|
||||||
|
provider: provider as SessionContext['provider'],
|
||||||
|
providerType: 'mock' as SessionContext['providerType'],
|
||||||
|
model: undefined,
|
||||||
|
lang: 'en',
|
||||||
|
personaName: 'interactive',
|
||||||
|
sessionId: undefined,
|
||||||
|
};
|
||||||
|
|
||||||
|
// When
|
||||||
|
const result = await runConversationLoop('/test', ctx, defaultStrategy, undefined, undefined);
|
||||||
|
|
||||||
|
// Then: AI call should use the resumed session ID
|
||||||
|
expect(capture.sessionIds[0]).toBe('resumed-session-xyz');
|
||||||
|
expect(result.action).toBe('cancel');
|
||||||
|
});
|
||||||
|
});
|
||||||
@ -85,7 +85,6 @@ describe('createIsolatedEnv', () => {
|
|||||||
|
|
||||||
expect(config.language).toBe('en');
|
expect(config.language).toBe('en');
|
||||||
expect(config.log_level).toBe('info');
|
expect(config.log_level).toBe('info');
|
||||||
expect(config.default_piece).toBe('default');
|
|
||||||
expect(config.notification_sound).toBe(false);
|
expect(config.notification_sound).toBe(false);
|
||||||
expect(config.notification_sound_events).toEqual({
|
expect(config.notification_sound_events).toEqual({
|
||||||
iteration_limit: false,
|
iteration_limit: false,
|
||||||
@ -173,7 +172,6 @@ describe('createIsolatedEnv', () => {
|
|||||||
[
|
[
|
||||||
'language: en',
|
'language: en',
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'default_piece: default',
|
|
||||||
'notification_sound: true',
|
'notification_sound: true',
|
||||||
'notification_sound_events: true',
|
'notification_sound_events: true',
|
||||||
].join('\n'),
|
].join('\n'),
|
||||||
|
|||||||
135
src/__tests__/engine-provider-options.test.ts
Normal file
135
src/__tests__/engine-provider-options.test.ts
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||||
|
import { rmSync } from 'node:fs';
|
||||||
|
|
||||||
|
vi.mock('../agents/runner.js', () => ({
|
||||||
|
runAgent: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../core/piece/evaluation/index.js', () => ({
|
||||||
|
detectMatchedRule: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../core/piece/phase-runner.js', () => ({
|
||||||
|
needsStatusJudgmentPhase: vi.fn(),
|
||||||
|
runReportPhase: vi.fn(),
|
||||||
|
runStatusJudgmentPhase: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/utils/index.js', async (importOriginal) => ({
|
||||||
|
...(await importOriginal<Record<string, unknown>>()),
|
||||||
|
generateReportDir: vi.fn().mockReturnValue('test-report-dir'),
|
||||||
|
}));
|
||||||
|
|
||||||
|
import { PieceEngine } from '../core/piece/index.js';
|
||||||
|
import { runAgent } from '../agents/runner.js';
|
||||||
|
import {
|
||||||
|
applyDefaultMocks,
|
||||||
|
cleanupPieceEngine,
|
||||||
|
createTestTmpDir,
|
||||||
|
makeMovement,
|
||||||
|
makeResponse,
|
||||||
|
makeRule,
|
||||||
|
mockDetectMatchedRuleSequence,
|
||||||
|
mockRunAgentSequence,
|
||||||
|
} from './engine-test-helpers.js';
|
||||||
|
import type { PieceConfig } from '../core/models/index.js';
|
||||||
|
|
||||||
|
describe('PieceEngine provider_options resolution', () => {
|
||||||
|
let tmpDir: string;
|
||||||
|
let engine: PieceEngine | undefined;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.resetAllMocks();
|
||||||
|
applyDefaultMocks();
|
||||||
|
tmpDir = createTestTmpDir();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
if (engine) {
|
||||||
|
cleanupPieceEngine(engine);
|
||||||
|
engine = undefined;
|
||||||
|
}
|
||||||
|
if (tmpDir) {
|
||||||
|
rmSync(tmpDir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should merge provider_options in order: global < project < movement', async () => {
|
||||||
|
const movement = makeMovement('implement', {
|
||||||
|
providerOptions: {
|
||||||
|
codex: { networkAccess: false },
|
||||||
|
claude: { sandbox: { excludedCommands: ['./gradlew'] } },
|
||||||
|
},
|
||||||
|
rules: [makeRule('done', 'COMPLETE')],
|
||||||
|
});
|
||||||
|
|
||||||
|
const config: PieceConfig = {
|
||||||
|
name: 'provider-options-priority',
|
||||||
|
movements: [movement],
|
||||||
|
initialMovement: 'implement',
|
||||||
|
maxMovements: 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
mockRunAgentSequence([
|
||||||
|
makeResponse({ persona: movement.persona, content: 'done' }),
|
||||||
|
]);
|
||||||
|
mockDetectMatchedRuleSequence([{ index: 0, method: 'phase1_tag' }]);
|
||||||
|
|
||||||
|
engine = new PieceEngine(config, tmpDir, 'test task', {
|
||||||
|
projectCwd: tmpDir,
|
||||||
|
provider: 'claude',
|
||||||
|
providerOptions: {
|
||||||
|
codex: { networkAccess: true },
|
||||||
|
claude: { sandbox: { allowUnsandboxedCommands: false } },
|
||||||
|
opencode: { networkAccess: true },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await engine.run();
|
||||||
|
|
||||||
|
const options = vi.mocked(runAgent).mock.calls[0]?.[2];
|
||||||
|
expect(options?.providerOptions).toEqual({
|
||||||
|
codex: { networkAccess: false },
|
||||||
|
opencode: { networkAccess: true },
|
||||||
|
claude: {
|
||||||
|
sandbox: {
|
||||||
|
allowUnsandboxedCommands: false,
|
||||||
|
excludedCommands: ['./gradlew'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should pass global provider_options when project and movement options are absent', async () => {
|
||||||
|
const movement = makeMovement('implement', {
|
||||||
|
rules: [makeRule('done', 'COMPLETE')],
|
||||||
|
});
|
||||||
|
|
||||||
|
const config: PieceConfig = {
|
||||||
|
name: 'provider-options-global-only',
|
||||||
|
movements: [movement],
|
||||||
|
initialMovement: 'implement',
|
||||||
|
maxMovements: 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
mockRunAgentSequence([
|
||||||
|
makeResponse({ persona: movement.persona, content: 'done' }),
|
||||||
|
]);
|
||||||
|
mockDetectMatchedRuleSequence([{ index: 0, method: 'phase1_tag' }]);
|
||||||
|
|
||||||
|
engine = new PieceEngine(config, tmpDir, 'test task', {
|
||||||
|
projectCwd: tmpDir,
|
||||||
|
provider: 'claude',
|
||||||
|
providerOptions: {
|
||||||
|
codex: { networkAccess: true },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await engine.run();
|
||||||
|
|
||||||
|
const options = vi.mocked(runAgent).mock.calls[0]?.[2];
|
||||||
|
expect(options?.providerOptions).toEqual({
|
||||||
|
codex: { networkAccess: true },
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
158
src/__tests__/faceted-prompting/compose.test.ts
Normal file
158
src/__tests__/faceted-prompting/compose.test.ts
Normal file
@ -0,0 +1,158 @@
|
|||||||
|
/**
|
||||||
|
* Unit tests for faceted-prompting compose module.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import { compose } from '../../faceted-prompting/index.js';
|
||||||
|
import type { FacetSet, ComposeOptions } from '../../faceted-prompting/index.js';
|
||||||
|
|
||||||
|
const defaultOptions: ComposeOptions = { contextMaxChars: 2000 };
|
||||||
|
|
||||||
|
describe('compose', () => {
|
||||||
|
it('should place persona in systemPrompt', () => {
|
||||||
|
const facets: FacetSet = {
|
||||||
|
persona: { body: 'You are a helpful assistant.' },
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = compose(facets, defaultOptions);
|
||||||
|
|
||||||
|
expect(result.systemPrompt).toBe('You are a helpful assistant.');
|
||||||
|
expect(result.userMessage).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should place instruction in userMessage', () => {
|
||||||
|
const facets: FacetSet = {
|
||||||
|
instruction: { body: 'Implement feature X.' },
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = compose(facets, defaultOptions);
|
||||||
|
|
||||||
|
expect(result.systemPrompt).toBe('');
|
||||||
|
expect(result.userMessage).toBe('Implement feature X.');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should place policy in userMessage with conflict notice', () => {
|
||||||
|
const facets: FacetSet = {
|
||||||
|
policies: [{ body: 'Follow clean code principles.' }],
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = compose(facets, defaultOptions);
|
||||||
|
|
||||||
|
expect(result.systemPrompt).toBe('');
|
||||||
|
expect(result.userMessage).toContain('Follow clean code principles.');
|
||||||
|
expect(result.userMessage).toContain('If prompt content conflicts with source files');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should place knowledge in userMessage with conflict notice', () => {
|
||||||
|
const facets: FacetSet = {
|
||||||
|
knowledge: [{ body: 'Architecture documentation.' }],
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = compose(facets, defaultOptions);
|
||||||
|
|
||||||
|
expect(result.systemPrompt).toBe('');
|
||||||
|
expect(result.userMessage).toContain('Architecture documentation.');
|
||||||
|
expect(result.userMessage).toContain('If prompt content conflicts with source files');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should compose all facets in correct order: policy, knowledge, instruction', () => {
|
||||||
|
const facets: FacetSet = {
|
||||||
|
persona: { body: 'You are a coder.' },
|
||||||
|
policies: [{ body: 'POLICY' }],
|
||||||
|
knowledge: [{ body: 'KNOWLEDGE' }],
|
||||||
|
instruction: { body: 'INSTRUCTION' },
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = compose(facets, defaultOptions);
|
||||||
|
|
||||||
|
expect(result.systemPrompt).toBe('You are a coder.');
|
||||||
|
|
||||||
|
const policyIdx = result.userMessage.indexOf('POLICY');
|
||||||
|
const knowledgeIdx = result.userMessage.indexOf('KNOWLEDGE');
|
||||||
|
const instructionIdx = result.userMessage.indexOf('INSTRUCTION');
|
||||||
|
|
||||||
|
expect(policyIdx).toBeLessThan(knowledgeIdx);
|
||||||
|
expect(knowledgeIdx).toBeLessThan(instructionIdx);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should join multiple policies with separator', () => {
|
||||||
|
const facets: FacetSet = {
|
||||||
|
policies: [
|
||||||
|
{ body: 'Policy A' },
|
||||||
|
{ body: 'Policy B' },
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = compose(facets, defaultOptions);
|
||||||
|
|
||||||
|
expect(result.userMessage).toContain('Policy A');
|
||||||
|
expect(result.userMessage).toContain('---');
|
||||||
|
expect(result.userMessage).toContain('Policy B');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should join multiple knowledge items with separator', () => {
|
||||||
|
const facets: FacetSet = {
|
||||||
|
knowledge: [
|
||||||
|
{ body: 'Knowledge A' },
|
||||||
|
{ body: 'Knowledge B' },
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = compose(facets, defaultOptions);
|
||||||
|
|
||||||
|
expect(result.userMessage).toContain('Knowledge A');
|
||||||
|
expect(result.userMessage).toContain('---');
|
||||||
|
expect(result.userMessage).toContain('Knowledge B');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should truncate policy content exceeding contextMaxChars', () => {
|
||||||
|
const longPolicy = 'x'.repeat(3000);
|
||||||
|
const facets: FacetSet = {
|
||||||
|
policies: [{ body: longPolicy, sourcePath: '/path/policy.md' }],
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = compose(facets, { contextMaxChars: 2000 });
|
||||||
|
|
||||||
|
expect(result.userMessage).toContain('...TRUNCATED...');
|
||||||
|
expect(result.userMessage).toContain('Policy is authoritative');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should truncate knowledge content exceeding contextMaxChars', () => {
|
||||||
|
const longKnowledge = 'y'.repeat(3000);
|
||||||
|
const facets: FacetSet = {
|
||||||
|
knowledge: [{ body: longKnowledge, sourcePath: '/path/knowledge.md' }],
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = compose(facets, { contextMaxChars: 2000 });
|
||||||
|
|
||||||
|
expect(result.userMessage).toContain('...TRUNCATED...');
|
||||||
|
expect(result.userMessage).toContain('Knowledge is truncated');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty facet set', () => {
|
||||||
|
const result = compose({}, defaultOptions);
|
||||||
|
|
||||||
|
expect(result.systemPrompt).toBe('');
|
||||||
|
expect(result.userMessage).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include source path for single policy', () => {
|
||||||
|
const facets: FacetSet = {
|
||||||
|
policies: [{ body: 'Policy text', sourcePath: '/policies/coding.md' }],
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = compose(facets, defaultOptions);
|
||||||
|
|
||||||
|
expect(result.userMessage).toContain('Policy Source: /policies/coding.md');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include source path for single knowledge', () => {
|
||||||
|
const facets: FacetSet = {
|
||||||
|
knowledge: [{ body: 'Knowledge text', sourcePath: '/knowledge/arch.md' }],
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = compose(facets, defaultOptions);
|
||||||
|
|
||||||
|
expect(result.userMessage).toContain('Knowledge Source: /knowledge/arch.md');
|
||||||
|
});
|
||||||
|
});
|
||||||
174
src/__tests__/faceted-prompting/data-engine.test.ts
Normal file
174
src/__tests__/faceted-prompting/data-engine.test.ts
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
/**
|
||||||
|
* Unit tests for faceted-prompting DataEngine implementations.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { FileDataEngine, CompositeDataEngine } from '../../faceted-prompting/index.js';
|
||||||
|
import type { DataEngine, FacetKind } from '../../faceted-prompting/index.js';
|
||||||
|
|
||||||
|
import { existsSync, readFileSync, readdirSync } from 'node:fs';
|
||||||
|
|
||||||
|
vi.mock('node:fs', () => ({
|
||||||
|
existsSync: vi.fn(),
|
||||||
|
readFileSync: vi.fn(),
|
||||||
|
readdirSync: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
const mockExistsSync = vi.mocked(existsSync);
|
||||||
|
const mockReadFileSync = vi.mocked(readFileSync);
|
||||||
|
const mockReaddirSync = vi.mocked(readdirSync);
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.resetAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('FileDataEngine', () => {
|
||||||
|
const engine = new FileDataEngine('/root');
|
||||||
|
|
||||||
|
describe('resolve', () => {
|
||||||
|
it('should return FacetContent when file exists', async () => {
|
||||||
|
mockExistsSync.mockReturnValue(true);
|
||||||
|
mockReadFileSync.mockReturnValue('persona body');
|
||||||
|
|
||||||
|
const result = await engine.resolve('personas', 'coder');
|
||||||
|
expect(result).toEqual({
|
||||||
|
body: 'persona body',
|
||||||
|
sourcePath: '/root/personas/coder.md',
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined when file does not exist', async () => {
|
||||||
|
mockExistsSync.mockReturnValue(false);
|
||||||
|
|
||||||
|
const result = await engine.resolve('policies', 'missing');
|
||||||
|
expect(result).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should resolve correct directory for each facet kind', async () => {
|
||||||
|
const kinds: FacetKind[] = ['personas', 'policies', 'knowledge', 'instructions', 'output-contracts'];
|
||||||
|
mockExistsSync.mockReturnValue(true);
|
||||||
|
mockReadFileSync.mockReturnValue('content');
|
||||||
|
|
||||||
|
for (const kind of kinds) {
|
||||||
|
const result = await engine.resolve(kind, 'test');
|
||||||
|
expect(result?.sourcePath).toBe(`/root/${kind}/test.md`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('list', () => {
|
||||||
|
it('should return facet keys from directory', async () => {
|
||||||
|
mockExistsSync.mockReturnValue(true);
|
||||||
|
mockReaddirSync.mockReturnValue(['coder.md', 'architect.md', 'readme.txt'] as unknown as ReturnType<typeof readdirSync>);
|
||||||
|
|
||||||
|
const result = await engine.list('personas');
|
||||||
|
expect(result).toEqual(['coder', 'architect']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty array when directory does not exist', async () => {
|
||||||
|
mockExistsSync.mockReturnValue(false);
|
||||||
|
|
||||||
|
const result = await engine.list('policies');
|
||||||
|
expect(result).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should filter non-.md files', async () => {
|
||||||
|
mockExistsSync.mockReturnValue(true);
|
||||||
|
mockReaddirSync.mockReturnValue(['a.md', 'b.txt', 'c.md'] as unknown as ReturnType<typeof readdirSync>);
|
||||||
|
|
||||||
|
const result = await engine.list('knowledge');
|
||||||
|
expect(result).toEqual(['a', 'c']);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('CompositeDataEngine', () => {
|
||||||
|
it('should throw when constructed with empty engines array', () => {
|
||||||
|
expect(() => new CompositeDataEngine([])).toThrow(
|
||||||
|
'CompositeDataEngine requires at least one engine',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('resolve', () => {
|
||||||
|
it('should return result from first engine that resolves', async () => {
|
||||||
|
const engine1: DataEngine = {
|
||||||
|
resolve: vi.fn().mockResolvedValue(undefined),
|
||||||
|
list: vi.fn().mockResolvedValue([]),
|
||||||
|
};
|
||||||
|
const engine2: DataEngine = {
|
||||||
|
resolve: vi.fn().mockResolvedValue({ body: 'from engine2', sourcePath: '/e2/p.md' }),
|
||||||
|
list: vi.fn().mockResolvedValue([]),
|
||||||
|
};
|
||||||
|
|
||||||
|
const composite = new CompositeDataEngine([engine1, engine2]);
|
||||||
|
const result = await composite.resolve('personas', 'coder');
|
||||||
|
|
||||||
|
expect(result).toEqual({ body: 'from engine2', sourcePath: '/e2/p.md' });
|
||||||
|
expect(engine1.resolve).toHaveBeenCalledWith('personas', 'coder');
|
||||||
|
expect(engine2.resolve).toHaveBeenCalledWith('personas', 'coder');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return first match (first-wins)', async () => {
|
||||||
|
const engine1: DataEngine = {
|
||||||
|
resolve: vi.fn().mockResolvedValue({ body: 'from engine1' }),
|
||||||
|
list: vi.fn().mockResolvedValue([]),
|
||||||
|
};
|
||||||
|
const engine2: DataEngine = {
|
||||||
|
resolve: vi.fn().mockResolvedValue({ body: 'from engine2' }),
|
||||||
|
list: vi.fn().mockResolvedValue([]),
|
||||||
|
};
|
||||||
|
|
||||||
|
const composite = new CompositeDataEngine([engine1, engine2]);
|
||||||
|
const result = await composite.resolve('personas', 'coder');
|
||||||
|
|
||||||
|
expect(result?.body).toBe('from engine1');
|
||||||
|
expect(engine2.resolve).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined when no engine resolves', async () => {
|
||||||
|
const engine1: DataEngine = {
|
||||||
|
resolve: vi.fn().mockResolvedValue(undefined),
|
||||||
|
list: vi.fn().mockResolvedValue([]),
|
||||||
|
};
|
||||||
|
|
||||||
|
const composite = new CompositeDataEngine([engine1]);
|
||||||
|
const result = await composite.resolve('policies', 'missing');
|
||||||
|
|
||||||
|
expect(result).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('list', () => {
|
||||||
|
it('should return deduplicated keys from all engines', async () => {
|
||||||
|
const engine1: DataEngine = {
|
||||||
|
resolve: vi.fn(),
|
||||||
|
list: vi.fn().mockResolvedValue(['a', 'b']),
|
||||||
|
};
|
||||||
|
const engine2: DataEngine = {
|
||||||
|
resolve: vi.fn(),
|
||||||
|
list: vi.fn().mockResolvedValue(['b', 'c']),
|
||||||
|
};
|
||||||
|
|
||||||
|
const composite = new CompositeDataEngine([engine1, engine2]);
|
||||||
|
const result = await composite.list('personas');
|
||||||
|
|
||||||
|
expect(result).toEqual(['a', 'b', 'c']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should preserve order with first-seen priority', async () => {
|
||||||
|
const engine1: DataEngine = {
|
||||||
|
resolve: vi.fn(),
|
||||||
|
list: vi.fn().mockResolvedValue(['x', 'y']),
|
||||||
|
};
|
||||||
|
const engine2: DataEngine = {
|
||||||
|
resolve: vi.fn(),
|
||||||
|
list: vi.fn().mockResolvedValue(['y', 'z']),
|
||||||
|
};
|
||||||
|
|
||||||
|
const composite = new CompositeDataEngine([engine1, engine2]);
|
||||||
|
const result = await composite.list('knowledge');
|
||||||
|
|
||||||
|
expect(result).toEqual(['x', 'y', 'z']);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
30
src/__tests__/faceted-prompting/escape.test.ts
Normal file
30
src/__tests__/faceted-prompting/escape.test.ts
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
/**
|
||||||
|
* Unit tests for faceted-prompting escape module.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import { escapeTemplateChars } from '../../faceted-prompting/index.js';
|
||||||
|
|
||||||
|
describe('escapeTemplateChars', () => {
|
||||||
|
it('should replace curly braces with full-width equivalents', () => {
|
||||||
|
expect(escapeTemplateChars('{hello}')).toBe('\uff5bhello\uff5d');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiple braces', () => {
|
||||||
|
expect(escapeTemplateChars('{{nested}}')).toBe('\uff5b\uff5bnested\uff5d\uff5d');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return unchanged string when no braces', () => {
|
||||||
|
expect(escapeTemplateChars('no braces here')).toBe('no braces here');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty string', () => {
|
||||||
|
expect(escapeTemplateChars('')).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle braces in code snippets', () => {
|
||||||
|
const input = 'function foo() { return { a: 1 }; }';
|
||||||
|
const expected = 'function foo() \uff5b return \uff5b a: 1 \uff5d; \uff5d';
|
||||||
|
expect(escapeTemplateChars(input)).toBe(expected);
|
||||||
|
});
|
||||||
|
});
|
||||||
287
src/__tests__/faceted-prompting/resolve.test.ts
Normal file
287
src/__tests__/faceted-prompting/resolve.test.ts
Normal file
@ -0,0 +1,287 @@
|
|||||||
|
/**
|
||||||
|
* Unit tests for faceted-prompting resolve module.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||||
|
import {
|
||||||
|
isResourcePath,
|
||||||
|
resolveFacetPath,
|
||||||
|
resolveFacetByName,
|
||||||
|
resolveResourcePath,
|
||||||
|
resolveResourceContent,
|
||||||
|
resolveRefToContent,
|
||||||
|
resolveRefList,
|
||||||
|
resolveSectionMap,
|
||||||
|
extractPersonaDisplayName,
|
||||||
|
resolvePersona,
|
||||||
|
} from '../../faceted-prompting/index.js';
|
||||||
|
|
||||||
|
import { existsSync, readFileSync } from 'node:fs';
|
||||||
|
import { homedir } from 'node:os';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
|
||||||
|
vi.mock('node:fs', () => ({
|
||||||
|
existsSync: vi.fn(),
|
||||||
|
readFileSync: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
const mockExistsSync = vi.mocked(existsSync);
|
||||||
|
const mockReadFileSync = vi.mocked(readFileSync);
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.resetAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('isResourcePath', () => {
|
||||||
|
it('should return true for relative path with ./', () => {
|
||||||
|
expect(isResourcePath('./file.md')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true for parent-relative path', () => {
|
||||||
|
expect(isResourcePath('../file.md')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true for absolute path', () => {
|
||||||
|
expect(isResourcePath('/absolute/path.md')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true for home-relative path', () => {
|
||||||
|
expect(isResourcePath('~/file.md')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true for .md extension', () => {
|
||||||
|
expect(isResourcePath('some-file.md')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for a plain facet name', () => {
|
||||||
|
expect(isResourcePath('coding')).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for a name with dots but not .md', () => {
|
||||||
|
expect(isResourcePath('my.config')).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('resolveFacetPath', () => {
|
||||||
|
it('should return the first existing file path', () => {
|
||||||
|
mockExistsSync.mockImplementation((p) => p === '/dir1/coding.md');
|
||||||
|
|
||||||
|
const result = resolveFacetPath('coding', ['/dir1', '/dir2']);
|
||||||
|
expect(result).toBe('/dir1/coding.md');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should skip non-existing directories and find in later ones', () => {
|
||||||
|
mockExistsSync.mockImplementation((p) => p === '/dir2/coding.md');
|
||||||
|
|
||||||
|
const result = resolveFacetPath('coding', ['/dir1', '/dir2']);
|
||||||
|
expect(result).toBe('/dir2/coding.md');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined when not found in any directory', () => {
|
||||||
|
mockExistsSync.mockReturnValue(false);
|
||||||
|
|
||||||
|
const result = resolveFacetPath('missing', ['/dir1', '/dir2']);
|
||||||
|
expect(result).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined for empty candidate list', () => {
|
||||||
|
const result = resolveFacetPath('anything', []);
|
||||||
|
expect(result).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('resolveFacetByName', () => {
|
||||||
|
it('should return file content when facet exists', () => {
|
||||||
|
mockExistsSync.mockImplementation((p) => p === '/dir/coder.md');
|
||||||
|
mockReadFileSync.mockReturnValue('You are a coder.');
|
||||||
|
|
||||||
|
const result = resolveFacetByName('coder', ['/dir']);
|
||||||
|
expect(result).toBe('You are a coder.');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined when facet does not exist', () => {
|
||||||
|
mockExistsSync.mockReturnValue(false);
|
||||||
|
|
||||||
|
const result = resolveFacetByName('missing', ['/dir']);
|
||||||
|
expect(result).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('resolveResourcePath', () => {
|
||||||
|
it('should resolve ./ relative to pieceDir', () => {
|
||||||
|
const result = resolveResourcePath('./policies/coding.md', '/project/pieces');
|
||||||
|
expect(result).toBe(join('/project/pieces', 'policies/coding.md'));
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should resolve ~ relative to homedir', () => {
|
||||||
|
const result = resolveResourcePath('~/policies/coding.md', '/project');
|
||||||
|
expect(result).toBe(join(homedir(), 'policies/coding.md'));
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return absolute path unchanged', () => {
|
||||||
|
const result = resolveResourcePath('/absolute/path.md', '/project');
|
||||||
|
expect(result).toBe('/absolute/path.md');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should resolve plain name relative to pieceDir', () => {
|
||||||
|
const result = resolveResourcePath('coding.md', '/project/pieces');
|
||||||
|
expect(result).toBe(join('/project/pieces', 'coding.md'));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('resolveResourceContent', () => {
|
||||||
|
it('should return undefined for null/undefined spec', () => {
|
||||||
|
expect(resolveResourceContent(undefined, '/dir')).toBeUndefined();
|
||||||
|
expect(resolveResourceContent(null as unknown as string | undefined, '/dir')).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should read file content for .md spec when file exists', () => {
|
||||||
|
mockExistsSync.mockReturnValue(true);
|
||||||
|
mockReadFileSync.mockReturnValue('file content');
|
||||||
|
|
||||||
|
const result = resolveResourceContent('./policy.md', '/dir');
|
||||||
|
expect(result).toBe('file content');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return spec as-is for .md spec when file does not exist', () => {
|
||||||
|
mockExistsSync.mockReturnValue(false);
|
||||||
|
|
||||||
|
const result = resolveResourceContent('./policy.md', '/dir');
|
||||||
|
expect(result).toBe('./policy.md');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return spec as-is for non-.md spec', () => {
|
||||||
|
const result = resolveResourceContent('inline content', '/dir');
|
||||||
|
expect(result).toBe('inline content');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('resolveRefToContent', () => {
|
||||||
|
it('should return mapped content when found in resolvedMap', () => {
|
||||||
|
const result = resolveRefToContent('coding', { coding: 'mapped content' }, '/dir');
|
||||||
|
expect(result).toBe('mapped content');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should resolve resource path when ref is a resource path', () => {
|
||||||
|
mockExistsSync.mockReturnValue(true);
|
||||||
|
mockReadFileSync.mockReturnValue('file content');
|
||||||
|
|
||||||
|
const result = resolveRefToContent('./policy.md', undefined, '/dir');
|
||||||
|
expect(result).toBe('file content');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should try facet resolution via candidateDirs when ref is a name', () => {
|
||||||
|
mockExistsSync.mockImplementation((p) => p === '/facets/coding.md');
|
||||||
|
mockReadFileSync.mockReturnValue('facet content');
|
||||||
|
|
||||||
|
const result = resolveRefToContent('coding', undefined, '/dir', ['/facets']);
|
||||||
|
expect(result).toBe('facet content');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should fall back to resolveResourceContent when not found elsewhere', () => {
|
||||||
|
mockExistsSync.mockReturnValue(false);
|
||||||
|
|
||||||
|
const result = resolveRefToContent('inline text', undefined, '/dir');
|
||||||
|
expect(result).toBe('inline text');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('resolveRefList', () => {
|
||||||
|
it('should return undefined for null/undefined refs', () => {
|
||||||
|
expect(resolveRefList(undefined, undefined, '/dir')).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle single string ref', () => {
|
||||||
|
const result = resolveRefList('inline', { inline: 'content' }, '/dir');
|
||||||
|
expect(result).toEqual(['content']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle array of refs', () => {
|
||||||
|
const result = resolveRefList(
|
||||||
|
['a', 'b'],
|
||||||
|
{ a: 'content A', b: 'content B' },
|
||||||
|
'/dir',
|
||||||
|
);
|
||||||
|
expect(result).toEqual(['content A', 'content B']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined when no refs resolve', () => {
|
||||||
|
mockExistsSync.mockReturnValue(false);
|
||||||
|
const result = resolveRefList(['nonexistent.md'], undefined, '/dir');
|
||||||
|
// 'nonexistent.md' ends with .md, file doesn't exist, falls back to spec
|
||||||
|
// But the spec is 'nonexistent.md' which is treated as inline
|
||||||
|
expect(result).toEqual(['nonexistent.md']);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('resolveSectionMap', () => {
|
||||||
|
it('should return undefined for undefined input', () => {
|
||||||
|
expect(resolveSectionMap(undefined, '/dir')).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should resolve each entry in the map', () => {
|
||||||
|
const result = resolveSectionMap(
|
||||||
|
{ key1: 'inline value', key2: 'another value' },
|
||||||
|
'/dir',
|
||||||
|
);
|
||||||
|
expect(result).toEqual({
|
||||||
|
key1: 'inline value',
|
||||||
|
key2: 'another value',
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('extractPersonaDisplayName', () => {
|
||||||
|
it('should extract name from .md path', () => {
|
||||||
|
expect(extractPersonaDisplayName('coder.md')).toBe('coder');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should extract name from full path', () => {
|
||||||
|
expect(extractPersonaDisplayName('/path/to/architect.md')).toBe('architect');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return name unchanged if no .md extension', () => {
|
||||||
|
expect(extractPersonaDisplayName('coder')).toBe('coder');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('resolvePersona', () => {
|
||||||
|
it('should return empty object for undefined persona', () => {
|
||||||
|
expect(resolvePersona(undefined, {}, '/dir')).toEqual({});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use section mapping when available', () => {
|
||||||
|
mockExistsSync.mockReturnValue(true);
|
||||||
|
|
||||||
|
const result = resolvePersona(
|
||||||
|
'coder',
|
||||||
|
{ personas: { coder: './personas/coder.md' } },
|
||||||
|
'/dir',
|
||||||
|
);
|
||||||
|
expect(result.personaSpec).toBe('./personas/coder.md');
|
||||||
|
expect(result.personaPath).toBeDefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should resolve path-based persona directly', () => {
|
||||||
|
mockExistsSync.mockReturnValue(true);
|
||||||
|
|
||||||
|
const result = resolvePersona('./coder.md', {}, '/dir');
|
||||||
|
expect(result.personaSpec).toBe('./coder.md');
|
||||||
|
expect(result.personaPath).toBeDefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should try candidate directories for name-based persona', () => {
|
||||||
|
mockExistsSync.mockImplementation((p) => p === '/facets/coder.md');
|
||||||
|
|
||||||
|
const result = resolvePersona('coder', {}, '/dir', ['/facets']);
|
||||||
|
expect(result.personaSpec).toBe('coder');
|
||||||
|
expect(result.personaPath).toBe('/facets/coder.md');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should fall back to pieceDir resolution when no candidateDirs match', () => {
|
||||||
|
mockExistsSync.mockImplementation((p) => p === join('/dir', 'coder'));
|
||||||
|
|
||||||
|
const result = resolvePersona('coder', {}, '/dir');
|
||||||
|
expect(result.personaSpec).toBe('coder');
|
||||||
|
});
|
||||||
|
});
|
||||||
108
src/__tests__/faceted-prompting/template.test.ts
Normal file
108
src/__tests__/faceted-prompting/template.test.ts
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
/**
|
||||||
|
* Unit tests for faceted-prompting template engine.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import { renderTemplate } from '../../faceted-prompting/index.js';
|
||||||
|
import {
|
||||||
|
processConditionals,
|
||||||
|
substituteVariables,
|
||||||
|
} from '../../faceted-prompting/template.js';
|
||||||
|
|
||||||
|
describe('processConditionals', () => {
|
||||||
|
it('should include truthy block content', () => {
|
||||||
|
const template = '{{#if showGreeting}}Hello!{{/if}}';
|
||||||
|
const result = processConditionals(template, { showGreeting: true });
|
||||||
|
expect(result).toBe('Hello!');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should exclude falsy block content', () => {
|
||||||
|
const template = '{{#if showGreeting}}Hello!{{/if}}';
|
||||||
|
const result = processConditionals(template, { showGreeting: false });
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle else branch when truthy', () => {
|
||||||
|
const template = '{{#if isAdmin}}Admin panel{{else}}User panel{{/if}}';
|
||||||
|
const result = processConditionals(template, { isAdmin: true });
|
||||||
|
expect(result).toBe('Admin panel');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle else branch when falsy', () => {
|
||||||
|
const template = '{{#if isAdmin}}Admin panel{{else}}User panel{{/if}}';
|
||||||
|
const result = processConditionals(template, { isAdmin: false });
|
||||||
|
expect(result).toBe('User panel');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should treat non-empty string as truthy', () => {
|
||||||
|
const template = '{{#if name}}Name: provided{{/if}}';
|
||||||
|
const result = processConditionals(template, { name: 'Alice' });
|
||||||
|
expect(result).toBe('Name: provided');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should treat empty string as falsy', () => {
|
||||||
|
const template = '{{#if name}}Name: provided{{/if}}';
|
||||||
|
const result = processConditionals(template, { name: '' });
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should treat undefined variable as falsy', () => {
|
||||||
|
const template = '{{#if missing}}exists{{else}}missing{{/if}}';
|
||||||
|
const result = processConditionals(template, {});
|
||||||
|
expect(result).toBe('missing');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiline content in blocks', () => {
|
||||||
|
const template = '{{#if hasContent}}line1\nline2\nline3{{/if}}';
|
||||||
|
const result = processConditionals(template, { hasContent: true });
|
||||||
|
expect(result).toBe('line1\nline2\nline3');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('substituteVariables', () => {
|
||||||
|
it('should replace variable with string value', () => {
|
||||||
|
const result = substituteVariables('Hello {{name}}!', { name: 'World' });
|
||||||
|
expect(result).toBe('Hello World!');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should replace true with string "true"', () => {
|
||||||
|
const result = substituteVariables('Value: {{flag}}', { flag: true });
|
||||||
|
expect(result).toBe('Value: true');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should replace false with empty string', () => {
|
||||||
|
const result = substituteVariables('Value: {{flag}}', { flag: false });
|
||||||
|
expect(result).toBe('Value: ');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should replace undefined variable with empty string', () => {
|
||||||
|
const result = substituteVariables('Value: {{missing}}', {});
|
||||||
|
expect(result).toBe('Value: ');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiple variables', () => {
|
||||||
|
const result = substituteVariables('{{greeting}} {{name}}!', {
|
||||||
|
greeting: 'Hello',
|
||||||
|
name: 'World',
|
||||||
|
});
|
||||||
|
expect(result).toBe('Hello World!');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('renderTemplate', () => {
|
||||||
|
it('should process conditionals and then substitute variables', () => {
|
||||||
|
const template = '{{#if hasName}}Name: {{name}}{{else}}Anonymous{{/if}}';
|
||||||
|
const result = renderTemplate(template, { hasName: true, name: 'Alice' });
|
||||||
|
expect(result).toBe('Name: Alice');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle template with no conditionals', () => {
|
||||||
|
const result = renderTemplate('Hello {{name}}!', { name: 'World' });
|
||||||
|
expect(result).toBe('Hello World!');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle template with no variables', () => {
|
||||||
|
const result = renderTemplate('Static text', {});
|
||||||
|
expect(result).toBe('Static text');
|
||||||
|
});
|
||||||
|
});
|
||||||
100
src/__tests__/faceted-prompting/truncation.test.ts
Normal file
100
src/__tests__/faceted-prompting/truncation.test.ts
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
/**
|
||||||
|
* Unit tests for faceted-prompting truncation module.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import {
|
||||||
|
trimContextContent,
|
||||||
|
renderConflictNotice,
|
||||||
|
prepareKnowledgeContent,
|
||||||
|
preparePolicyContent,
|
||||||
|
} from '../../faceted-prompting/index.js';
|
||||||
|
|
||||||
|
describe('trimContextContent', () => {
|
||||||
|
it('should return content unchanged when under limit', () => {
|
||||||
|
const result = trimContextContent('short content', 100);
|
||||||
|
expect(result.content).toBe('short content');
|
||||||
|
expect(result.truncated).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should truncate content exceeding limit', () => {
|
||||||
|
const longContent = 'a'.repeat(150);
|
||||||
|
const result = trimContextContent(longContent, 100);
|
||||||
|
expect(result.content).toBe('a'.repeat(100) + '\n...TRUNCATED...');
|
||||||
|
expect(result.truncated).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not truncate content at exact limit', () => {
|
||||||
|
const exactContent = 'b'.repeat(100);
|
||||||
|
const result = trimContextContent(exactContent, 100);
|
||||||
|
expect(result.content).toBe(exactContent);
|
||||||
|
expect(result.truncated).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('renderConflictNotice', () => {
|
||||||
|
it('should return the standard conflict notice', () => {
|
||||||
|
const notice = renderConflictNotice();
|
||||||
|
expect(notice).toBe('If prompt content conflicts with source files, source files take precedence.');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('prepareKnowledgeContent', () => {
|
||||||
|
it('should append conflict notice without sourcePath', () => {
|
||||||
|
const result = prepareKnowledgeContent('knowledge text', 2000);
|
||||||
|
expect(result).toContain('knowledge text');
|
||||||
|
expect(result).toContain('If prompt content conflicts with source files');
|
||||||
|
expect(result).not.toContain('Knowledge Source:');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should append source path when provided', () => {
|
||||||
|
const result = prepareKnowledgeContent('knowledge text', 2000, '/path/to/knowledge.md');
|
||||||
|
expect(result).toContain('Knowledge Source: /path/to/knowledge.md');
|
||||||
|
expect(result).toContain('If prompt content conflicts with source files');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should append truncation notice when truncated with sourcePath', () => {
|
||||||
|
const longContent = 'x'.repeat(3000);
|
||||||
|
const result = prepareKnowledgeContent(longContent, 2000, '/path/to/knowledge.md');
|
||||||
|
expect(result).toContain('...TRUNCATED...');
|
||||||
|
expect(result).toContain('Knowledge is truncated. You MUST consult the source files before making decisions.');
|
||||||
|
expect(result).toContain('Knowledge Source: /path/to/knowledge.md');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not include truncation notice when truncated without sourcePath', () => {
|
||||||
|
const longContent = 'x'.repeat(3000);
|
||||||
|
const result = prepareKnowledgeContent(longContent, 2000);
|
||||||
|
expect(result).toContain('...TRUNCATED...');
|
||||||
|
expect(result).not.toContain('Knowledge is truncated');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('preparePolicyContent', () => {
|
||||||
|
it('should append conflict notice without sourcePath', () => {
|
||||||
|
const result = preparePolicyContent('policy text', 2000);
|
||||||
|
expect(result).toContain('policy text');
|
||||||
|
expect(result).toContain('If prompt content conflicts with source files');
|
||||||
|
expect(result).not.toContain('Policy Source:');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should append source path when provided', () => {
|
||||||
|
const result = preparePolicyContent('policy text', 2000, '/path/to/policy.md');
|
||||||
|
expect(result).toContain('Policy Source: /path/to/policy.md');
|
||||||
|
expect(result).toContain('If prompt content conflicts with source files');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should append authoritative notice when truncated with sourcePath', () => {
|
||||||
|
const longContent = 'y'.repeat(3000);
|
||||||
|
const result = preparePolicyContent(longContent, 2000, '/path/to/policy.md');
|
||||||
|
expect(result).toContain('...TRUNCATED...');
|
||||||
|
expect(result).toContain('Policy is authoritative. If truncated, you MUST read the full policy file and follow it strictly.');
|
||||||
|
expect(result).toContain('Policy Source: /path/to/policy.md');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not include authoritative notice when truncated without sourcePath', () => {
|
||||||
|
const longContent = 'y'.repeat(3000);
|
||||||
|
const result = preparePolicyContent(longContent, 2000);
|
||||||
|
expect(result).toContain('...TRUNCATED...');
|
||||||
|
expect(result).not.toContain('Policy is authoritative');
|
||||||
|
});
|
||||||
|
});
|
||||||
87
src/__tests__/faceted-prompting/types.test.ts
Normal file
87
src/__tests__/faceted-prompting/types.test.ts
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
/**
|
||||||
|
* Unit tests for faceted-prompting type definitions.
|
||||||
|
*
|
||||||
|
* Verifies that types are correctly exported and usable.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import type {
|
||||||
|
FacetKind,
|
||||||
|
FacetContent,
|
||||||
|
FacetSet,
|
||||||
|
ComposedPrompt,
|
||||||
|
ComposeOptions,
|
||||||
|
} from '../../faceted-prompting/index.js';
|
||||||
|
|
||||||
|
describe('FacetKind type', () => {
|
||||||
|
it('should accept valid facet kinds', () => {
|
||||||
|
const kinds: FacetKind[] = [
|
||||||
|
'personas',
|
||||||
|
'policies',
|
||||||
|
'knowledge',
|
||||||
|
'instructions',
|
||||||
|
'output-contracts',
|
||||||
|
];
|
||||||
|
expect(kinds).toHaveLength(5);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('FacetContent interface', () => {
|
||||||
|
it('should accept body with sourcePath', () => {
|
||||||
|
const content: FacetContent = {
|
||||||
|
body: 'You are a helpful assistant.',
|
||||||
|
sourcePath: '/path/to/persona.md',
|
||||||
|
};
|
||||||
|
expect(content.body).toBe('You are a helpful assistant.');
|
||||||
|
expect(content.sourcePath).toBe('/path/to/persona.md');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should accept body without sourcePath', () => {
|
||||||
|
const content: FacetContent = {
|
||||||
|
body: 'Inline content',
|
||||||
|
};
|
||||||
|
expect(content.body).toBe('Inline content');
|
||||||
|
expect(content.sourcePath).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('FacetSet interface', () => {
|
||||||
|
it('should accept a complete facet set', () => {
|
||||||
|
const set: FacetSet = {
|
||||||
|
persona: { body: 'You are a coder.' },
|
||||||
|
policies: [{ body: 'Follow clean code.' }],
|
||||||
|
knowledge: [{ body: 'Architecture docs.' }],
|
||||||
|
instruction: { body: 'Implement the feature.' },
|
||||||
|
};
|
||||||
|
expect(set.persona?.body).toBe('You are a coder.');
|
||||||
|
expect(set.policies).toHaveLength(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should accept a partial facet set', () => {
|
||||||
|
const set: FacetSet = {
|
||||||
|
instruction: { body: 'Do the task.' },
|
||||||
|
};
|
||||||
|
expect(set.persona).toBeUndefined();
|
||||||
|
expect(set.instruction?.body).toBe('Do the task.');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('ComposedPrompt interface', () => {
|
||||||
|
it('should hold systemPrompt and userMessage', () => {
|
||||||
|
const prompt: ComposedPrompt = {
|
||||||
|
systemPrompt: 'You are a coder.',
|
||||||
|
userMessage: 'Implement feature X.',
|
||||||
|
};
|
||||||
|
expect(prompt.systemPrompt).toBe('You are a coder.');
|
||||||
|
expect(prompt.userMessage).toBe('Implement feature X.');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('ComposeOptions interface', () => {
|
||||||
|
it('should hold contextMaxChars', () => {
|
||||||
|
const options: ComposeOptions = {
|
||||||
|
contextMaxChars: 2000,
|
||||||
|
};
|
||||||
|
expect(options.contextMaxChars).toBe(2000);
|
||||||
|
});
|
||||||
|
});
|
||||||
@ -1,14 +1,64 @@
|
|||||||
/**
|
/**
|
||||||
* Tests for github/pr module
|
* Tests for github/pr module
|
||||||
*
|
*
|
||||||
* Tests buildPrBody formatting.
|
* Tests buildPrBody formatting and findExistingPr logic.
|
||||||
* createPullRequest/pushBranch call `gh`/`git` CLI, not unit-tested here.
|
* createPullRequest/pushBranch/commentOnPr call `gh`/`git` CLI, not unit-tested here.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { describe, it, expect } from 'vitest';
|
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||||
import { buildPrBody } from '../infra/github/pr.js';
|
|
||||||
|
const mockExecFileSync = vi.fn();
|
||||||
|
vi.mock('node:child_process', () => ({
|
||||||
|
execFileSync: (...args: unknown[]) => mockExecFileSync(...args),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../infra/github/issue.js', () => ({
|
||||||
|
checkGhCli: vi.fn().mockReturnValue({ available: true }),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/utils/index.js', async (importOriginal) => ({
|
||||||
|
...(await importOriginal<Record<string, unknown>>()),
|
||||||
|
createLogger: () => ({
|
||||||
|
info: vi.fn(),
|
||||||
|
debug: vi.fn(),
|
||||||
|
error: vi.fn(),
|
||||||
|
}),
|
||||||
|
getErrorMessage: (e: unknown) => String(e),
|
||||||
|
}));
|
||||||
|
|
||||||
|
import { buildPrBody, findExistingPr } from '../infra/github/pr.js';
|
||||||
import type { GitHubIssue } from '../infra/github/types.js';
|
import type { GitHubIssue } from '../infra/github/types.js';
|
||||||
|
|
||||||
|
describe('findExistingPr', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('オープンな PR がある場合はその PR を返す', () => {
|
||||||
|
mockExecFileSync.mockReturnValue(JSON.stringify([{ number: 42, url: 'https://github.com/org/repo/pull/42' }]));
|
||||||
|
|
||||||
|
const result = findExistingPr('/project', 'task/fix-bug');
|
||||||
|
|
||||||
|
expect(result).toEqual({ number: 42, url: 'https://github.com/org/repo/pull/42' });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('PR がない場合は undefined を返す', () => {
|
||||||
|
mockExecFileSync.mockReturnValue(JSON.stringify([]));
|
||||||
|
|
||||||
|
const result = findExistingPr('/project', 'task/fix-bug');
|
||||||
|
|
||||||
|
expect(result).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('gh CLI が失敗した場合は undefined を返す', () => {
|
||||||
|
mockExecFileSync.mockImplementation(() => { throw new Error('gh: command not found'); });
|
||||||
|
|
||||||
|
const result = findExistingPr('/project', 'task/fix-bug');
|
||||||
|
|
||||||
|
expect(result).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
describe('buildPrBody', () => {
|
describe('buildPrBody', () => {
|
||||||
it('should build body with single issue and report', () => {
|
it('should build body with single issue and report', () => {
|
||||||
const issue: GitHubIssue = {
|
const issue: GitHubIssue = {
|
||||||
|
|||||||
@ -7,14 +7,35 @@ import { tmpdir } from 'node:os';
|
|||||||
import { dirname, join } from 'node:path';
|
import { dirname, join } from 'node:path';
|
||||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||||
|
|
||||||
const loadGlobalConfigMock = vi.hoisted(() => vi.fn());
|
const loadConfigMock = vi.hoisted(() => vi.fn());
|
||||||
|
|
||||||
vi.mock('../infra/config/paths.js', () => ({
|
vi.mock('../infra/config/paths.js', () => ({
|
||||||
getGlobalConfigDir: () => '/tmp/.takt',
|
getGlobalConfigDir: () => '/tmp/.takt',
|
||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../infra/config/global/globalConfig.js', () => ({
|
vi.mock('../infra/config/loadConfig.js', () => ({
|
||||||
loadGlobalConfig: loadGlobalConfigMock,
|
loadConfig: loadConfigMock,
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../infra/config/resolvePieceConfigValue.js', () => ({
|
||||||
|
resolvePieceConfigValue: (_projectDir: string, key: string) => {
|
||||||
|
const loaded = loadConfigMock() as Record<string, Record<string, unknown>>;
|
||||||
|
const global = loaded?.global ?? {};
|
||||||
|
const project = loaded?.project ?? {};
|
||||||
|
const merged: Record<string, unknown> = { ...global, ...project };
|
||||||
|
return merged[key];
|
||||||
|
},
|
||||||
|
resolvePieceConfigValues: (_projectDir: string, keys: readonly string[]) => {
|
||||||
|
const loaded = loadConfigMock() as Record<string, Record<string, unknown>>;
|
||||||
|
const global = loaded?.global ?? {};
|
||||||
|
const project = loaded?.project ?? {};
|
||||||
|
const merged: Record<string, unknown> = { ...global, ...project };
|
||||||
|
const result: Record<string, unknown> = {};
|
||||||
|
for (const key of keys) {
|
||||||
|
result[key] = merged[key];
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
},
|
||||||
}));
|
}));
|
||||||
|
|
||||||
const { getPieceCategoriesPath, resetPieceCategories } = await import(
|
const { getPieceCategoriesPath, resetPieceCategories } = await import(
|
||||||
@ -28,17 +49,18 @@ function createTempCategoriesPath(): string {
|
|||||||
|
|
||||||
describe('getPieceCategoriesPath', () => {
|
describe('getPieceCategoriesPath', () => {
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
loadGlobalConfigMock.mockReset();
|
loadConfigMock.mockReset();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return configured path when pieceCategoriesFile is set', () => {
|
it('should return configured path when pieceCategoriesFile is set', () => {
|
||||||
// Given
|
// Given
|
||||||
loadGlobalConfigMock.mockReturnValue({
|
loadConfigMock.mockReturnValue({
|
||||||
pieceCategoriesFile: '/custom/piece-categories.yaml',
|
global: { pieceCategoriesFile: '/custom/piece-categories.yaml' },
|
||||||
|
project: {},
|
||||||
});
|
});
|
||||||
|
|
||||||
// When
|
// When
|
||||||
const path = getPieceCategoriesPath();
|
const path = getPieceCategoriesPath(process.cwd());
|
||||||
|
|
||||||
// Then
|
// Then
|
||||||
expect(path).toBe('/custom/piece-categories.yaml');
|
expect(path).toBe('/custom/piece-categories.yaml');
|
||||||
@ -46,10 +68,10 @@ describe('getPieceCategoriesPath', () => {
|
|||||||
|
|
||||||
it('should return default path when pieceCategoriesFile is not set', () => {
|
it('should return default path when pieceCategoriesFile is not set', () => {
|
||||||
// Given
|
// Given
|
||||||
loadGlobalConfigMock.mockReturnValue({});
|
loadConfigMock.mockReturnValue({ global: {}, project: {} });
|
||||||
|
|
||||||
// When
|
// When
|
||||||
const path = getPieceCategoriesPath();
|
const path = getPieceCategoriesPath(process.cwd());
|
||||||
|
|
||||||
// Then
|
// Then
|
||||||
expect(path).toBe('/tmp/.takt/preferences/piece-categories.yaml');
|
expect(path).toBe('/tmp/.takt/preferences/piece-categories.yaml');
|
||||||
@ -57,12 +79,12 @@ describe('getPieceCategoriesPath', () => {
|
|||||||
|
|
||||||
it('should rethrow when global config loading fails', () => {
|
it('should rethrow when global config loading fails', () => {
|
||||||
// Given
|
// Given
|
||||||
loadGlobalConfigMock.mockImplementation(() => {
|
loadConfigMock.mockImplementation(() => {
|
||||||
throw new Error('invalid global config');
|
throw new Error('invalid global config');
|
||||||
});
|
});
|
||||||
|
|
||||||
// When / Then
|
// When / Then
|
||||||
expect(() => getPieceCategoriesPath()).toThrow('invalid global config');
|
expect(() => getPieceCategoriesPath(process.cwd())).toThrow('invalid global config');
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -70,7 +92,7 @@ describe('resetPieceCategories', () => {
|
|||||||
const tempRoots: string[] = [];
|
const tempRoots: string[] = [];
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
loadGlobalConfigMock.mockReset();
|
loadConfigMock.mockReset();
|
||||||
});
|
});
|
||||||
|
|
||||||
afterEach(() => {
|
afterEach(() => {
|
||||||
@ -84,12 +106,13 @@ describe('resetPieceCategories', () => {
|
|||||||
// Given
|
// Given
|
||||||
const categoriesPath = createTempCategoriesPath();
|
const categoriesPath = createTempCategoriesPath();
|
||||||
tempRoots.push(dirname(dirname(categoriesPath)));
|
tempRoots.push(dirname(dirname(categoriesPath)));
|
||||||
loadGlobalConfigMock.mockReturnValue({
|
loadConfigMock.mockReturnValue({
|
||||||
pieceCategoriesFile: categoriesPath,
|
global: { pieceCategoriesFile: categoriesPath },
|
||||||
|
project: {},
|
||||||
});
|
});
|
||||||
|
|
||||||
// When
|
// When
|
||||||
resetPieceCategories();
|
resetPieceCategories(process.cwd());
|
||||||
|
|
||||||
// Then
|
// Then
|
||||||
expect(existsSync(dirname(categoriesPath))).toBe(true);
|
expect(existsSync(dirname(categoriesPath))).toBe(true);
|
||||||
@ -102,14 +125,15 @@ describe('resetPieceCategories', () => {
|
|||||||
const categoriesDir = dirname(categoriesPath);
|
const categoriesDir = dirname(categoriesPath);
|
||||||
const tempRoot = dirname(categoriesDir);
|
const tempRoot = dirname(categoriesDir);
|
||||||
tempRoots.push(tempRoot);
|
tempRoots.push(tempRoot);
|
||||||
loadGlobalConfigMock.mockReturnValue({
|
loadConfigMock.mockReturnValue({
|
||||||
pieceCategoriesFile: categoriesPath,
|
global: { pieceCategoriesFile: categoriesPath },
|
||||||
|
project: {},
|
||||||
});
|
});
|
||||||
mkdirSync(categoriesDir, { recursive: true });
|
mkdirSync(categoriesDir, { recursive: true });
|
||||||
writeFileSync(categoriesPath, 'piece_categories:\n old:\n - stale-piece\n', 'utf-8');
|
writeFileSync(categoriesPath, 'piece_categories:\n old:\n - stale-piece\n', 'utf-8');
|
||||||
|
|
||||||
// When
|
// When
|
||||||
resetPieceCategories();
|
resetPieceCategories(process.cwd());
|
||||||
|
|
||||||
// Then
|
// Then
|
||||||
expect(readFileSync(categoriesPath, 'utf-8')).toBe('piece_categories: {}\n');
|
expect(readFileSync(categoriesPath, 'utf-8')).toBe('piece_categories: {}\n');
|
||||||
|
|||||||
@ -39,7 +39,6 @@ describe('loadGlobalConfig', () => {
|
|||||||
const config = loadGlobalConfig();
|
const config = loadGlobalConfig();
|
||||||
|
|
||||||
expect(config.language).toBe('en');
|
expect(config.language).toBe('en');
|
||||||
expect(config.defaultPiece).toBe('default');
|
|
||||||
expect(config.logLevel).toBe('info');
|
expect(config.logLevel).toBe('info');
|
||||||
expect(config.provider).toBe('claude');
|
expect(config.provider).toBe('claude');
|
||||||
expect(config.model).toBeUndefined();
|
expect(config.model).toBeUndefined();
|
||||||
@ -79,6 +78,23 @@ describe('loadGlobalConfig', () => {
|
|||||||
expect(config.logLevel).toBe('debug');
|
expect(config.logLevel).toBe('debug');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should apply env override for nested provider_options key', () => {
|
||||||
|
const original = process.env.TAKT_PROVIDER_OPTIONS_CLAUDE_SANDBOX_ALLOW_UNSANDBOXED_COMMANDS;
|
||||||
|
try {
|
||||||
|
process.env.TAKT_PROVIDER_OPTIONS_CLAUDE_SANDBOX_ALLOW_UNSANDBOXED_COMMANDS = 'true';
|
||||||
|
invalidateGlobalConfigCache();
|
||||||
|
|
||||||
|
const config = loadGlobalConfig();
|
||||||
|
expect(config.providerOptions?.claude?.sandbox?.allowUnsandboxedCommands).toBe(true);
|
||||||
|
} finally {
|
||||||
|
if (original === undefined) {
|
||||||
|
delete process.env.TAKT_PROVIDER_OPTIONS_CLAUDE_SANDBOX_ALLOW_UNSANDBOXED_COMMANDS;
|
||||||
|
} else {
|
||||||
|
process.env.TAKT_PROVIDER_OPTIONS_CLAUDE_SANDBOX_ALLOW_UNSANDBOXED_COMMANDS = original;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
it('should load pipeline config from config.yaml', () => {
|
it('should load pipeline config from config.yaml', () => {
|
||||||
const taktDir = join(testHomeDir, '.takt');
|
const taktDir = join(testHomeDir, '.takt');
|
||||||
mkdirSync(taktDir, { recursive: true });
|
mkdirSync(taktDir, { recursive: true });
|
||||||
|
|||||||
@ -97,7 +97,6 @@ describe('GlobalConfig load/save with API keys', () => {
|
|||||||
it('should load config with API keys from YAML', () => {
|
it('should load config with API keys from YAML', () => {
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: claude',
|
'provider: claude',
|
||||||
'anthropic_api_key: sk-ant-from-yaml',
|
'anthropic_api_key: sk-ant-from-yaml',
|
||||||
@ -113,7 +112,6 @@ describe('GlobalConfig load/save with API keys', () => {
|
|||||||
it('should load config without API keys', () => {
|
it('should load config without API keys', () => {
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: claude',
|
'provider: claude',
|
||||||
].join('\n');
|
].join('\n');
|
||||||
@ -128,7 +126,6 @@ describe('GlobalConfig load/save with API keys', () => {
|
|||||||
// Write initial config
|
// Write initial config
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: claude',
|
'provider: claude',
|
||||||
].join('\n');
|
].join('\n');
|
||||||
@ -147,7 +144,6 @@ describe('GlobalConfig load/save with API keys', () => {
|
|||||||
it('should not persist API keys when not set', () => {
|
it('should not persist API keys when not set', () => {
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: claude',
|
'provider: claude',
|
||||||
].join('\n');
|
].join('\n');
|
||||||
@ -183,7 +179,6 @@ describe('resolveAnthropicApiKey', () => {
|
|||||||
process.env['TAKT_ANTHROPIC_API_KEY'] = 'sk-ant-from-env';
|
process.env['TAKT_ANTHROPIC_API_KEY'] = 'sk-ant-from-env';
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: claude',
|
'provider: claude',
|
||||||
'anthropic_api_key: sk-ant-from-yaml',
|
'anthropic_api_key: sk-ant-from-yaml',
|
||||||
@ -198,7 +193,6 @@ describe('resolveAnthropicApiKey', () => {
|
|||||||
delete process.env['TAKT_ANTHROPIC_API_KEY'];
|
delete process.env['TAKT_ANTHROPIC_API_KEY'];
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: claude',
|
'provider: claude',
|
||||||
'anthropic_api_key: sk-ant-from-yaml',
|
'anthropic_api_key: sk-ant-from-yaml',
|
||||||
@ -213,7 +207,6 @@ describe('resolveAnthropicApiKey', () => {
|
|||||||
delete process.env['TAKT_ANTHROPIC_API_KEY'];
|
delete process.env['TAKT_ANTHROPIC_API_KEY'];
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: claude',
|
'provider: claude',
|
||||||
].join('\n');
|
].join('\n');
|
||||||
@ -254,7 +247,6 @@ describe('resolveOpenaiApiKey', () => {
|
|||||||
process.env['TAKT_OPENAI_API_KEY'] = 'sk-openai-from-env';
|
process.env['TAKT_OPENAI_API_KEY'] = 'sk-openai-from-env';
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: claude',
|
'provider: claude',
|
||||||
'openai_api_key: sk-openai-from-yaml',
|
'openai_api_key: sk-openai-from-yaml',
|
||||||
@ -269,7 +261,6 @@ describe('resolveOpenaiApiKey', () => {
|
|||||||
delete process.env['TAKT_OPENAI_API_KEY'];
|
delete process.env['TAKT_OPENAI_API_KEY'];
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: claude',
|
'provider: claude',
|
||||||
'openai_api_key: sk-openai-from-yaml',
|
'openai_api_key: sk-openai-from-yaml',
|
||||||
@ -284,7 +275,6 @@ describe('resolveOpenaiApiKey', () => {
|
|||||||
delete process.env['TAKT_OPENAI_API_KEY'];
|
delete process.env['TAKT_OPENAI_API_KEY'];
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: claude',
|
'provider: claude',
|
||||||
].join('\n');
|
].join('\n');
|
||||||
@ -318,7 +308,6 @@ describe('resolveCodexCliPath', () => {
|
|||||||
process.env['TAKT_CODEX_CLI_PATH'] = envCodexPath;
|
process.env['TAKT_CODEX_CLI_PATH'] = envCodexPath;
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: codex',
|
'provider: codex',
|
||||||
`codex_cli_path: ${configCodexPath}`,
|
`codex_cli_path: ${configCodexPath}`,
|
||||||
@ -334,7 +323,6 @@ describe('resolveCodexCliPath', () => {
|
|||||||
const configCodexPath = createExecutableFile('config-codex');
|
const configCodexPath = createExecutableFile('config-codex');
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: codex',
|
'provider: codex',
|
||||||
`codex_cli_path: ${configCodexPath}`,
|
`codex_cli_path: ${configCodexPath}`,
|
||||||
@ -349,7 +337,6 @@ describe('resolveCodexCliPath', () => {
|
|||||||
delete process.env['TAKT_CODEX_CLI_PATH'];
|
delete process.env['TAKT_CODEX_CLI_PATH'];
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: codex',
|
'provider: codex',
|
||||||
].join('\n');
|
].join('\n');
|
||||||
@ -395,7 +382,6 @@ describe('resolveCodexCliPath', () => {
|
|||||||
delete process.env['TAKT_CODEX_CLI_PATH'];
|
delete process.env['TAKT_CODEX_CLI_PATH'];
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: codex',
|
'provider: codex',
|
||||||
`codex_cli_path: ${join(testDir, 'missing-codex-from-config')}`,
|
`codex_cli_path: ${join(testDir, 'missing-codex-from-config')}`,
|
||||||
@ -427,7 +413,6 @@ describe('resolveOpencodeApiKey', () => {
|
|||||||
process.env['TAKT_OPENCODE_API_KEY'] = 'sk-opencode-from-env';
|
process.env['TAKT_OPENCODE_API_KEY'] = 'sk-opencode-from-env';
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: claude',
|
'provider: claude',
|
||||||
'opencode_api_key: sk-opencode-from-yaml',
|
'opencode_api_key: sk-opencode-from-yaml',
|
||||||
@ -442,7 +427,6 @@ describe('resolveOpencodeApiKey', () => {
|
|||||||
delete process.env['TAKT_OPENCODE_API_KEY'];
|
delete process.env['TAKT_OPENCODE_API_KEY'];
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: claude',
|
'provider: claude',
|
||||||
'opencode_api_key: sk-opencode-from-yaml',
|
'opencode_api_key: sk-opencode-from-yaml',
|
||||||
@ -457,7 +441,6 @@ describe('resolveOpencodeApiKey', () => {
|
|||||||
delete process.env['TAKT_OPENCODE_API_KEY'];
|
delete process.env['TAKT_OPENCODE_API_KEY'];
|
||||||
const yaml = [
|
const yaml = [
|
||||||
'language: en',
|
'language: en',
|
||||||
'default_piece: default',
|
|
||||||
'log_level: info',
|
'log_level: info',
|
||||||
'provider: claude',
|
'provider: claude',
|
||||||
].join('\n');
|
].join('\n');
|
||||||
|
|||||||
@ -149,9 +149,10 @@ describe('runInstructMode', () => {
|
|||||||
expect(result.action).toBe('cancel');
|
expect(result.action).toBe('cancel');
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should use custom action selector without create_issue option', async () => {
|
it('should exclude execute from action selector options', async () => {
|
||||||
setupRawStdin(toRawInputs(['task', '/go']));
|
setupRawStdin(toRawInputs(['task', '/go']));
|
||||||
setupMockProvider(['response', 'Task summary.']);
|
setupMockProvider(['response', 'Task summary.']);
|
||||||
|
mockSelectOption.mockResolvedValue('save_task');
|
||||||
|
|
||||||
await runInstructMode('/project', 'branch context', 'feature-branch', 'my-task', 'Do something', '');
|
await runInstructMode('/project', 'branch context', 'feature-branch', 'my-task', 'Do something', '');
|
||||||
|
|
||||||
@ -161,7 +162,7 @@ describe('runInstructMode', () => {
|
|||||||
expect(selectCall).toBeDefined();
|
expect(selectCall).toBeDefined();
|
||||||
const options = selectCall![1] as Array<{ value: string }>;
|
const options = selectCall![1] as Array<{ value: string }>;
|
||||||
const values = options.map((o) => o.value);
|
const values = options.map((o) => o.value);
|
||||||
expect(values).toContain('execute');
|
expect(values).not.toContain('execute');
|
||||||
expect(values).toContain('save_task');
|
expect(values).toContain('save_task');
|
||||||
expect(values).toContain('continue');
|
expect(values).toContain('continue');
|
||||||
expect(values).not.toContain('create_issue');
|
expect(values).not.toContain('create_issue');
|
||||||
@ -215,4 +216,63 @@ describe('runInstructMode', () => {
|
|||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should inject previousOrderContent into template variables when provided', async () => {
|
||||||
|
setupRawStdin(toRawInputs(['/cancel']));
|
||||||
|
setupMockProvider([]);
|
||||||
|
|
||||||
|
await runInstructMode('/project', 'branch context', 'feature-branch', 'my-task', 'Do something', '', undefined, undefined, '# Previous Order\nDo the thing');
|
||||||
|
|
||||||
|
expect(mockLoadTemplate).toHaveBeenCalledWith(
|
||||||
|
'score_instruct_system_prompt',
|
||||||
|
'en',
|
||||||
|
expect.objectContaining({
|
||||||
|
hasOrderContent: true,
|
||||||
|
orderContent: '# Previous Order\nDo the thing',
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set hasOrderContent=false when previousOrderContent is null', async () => {
|
||||||
|
setupRawStdin(toRawInputs(['/cancel']));
|
||||||
|
setupMockProvider([]);
|
||||||
|
|
||||||
|
await runInstructMode('/project', 'branch context', 'feature-branch', 'my-task', 'Do something', '', undefined, undefined, null);
|
||||||
|
|
||||||
|
expect(mockLoadTemplate).toHaveBeenCalledWith(
|
||||||
|
'score_instruct_system_prompt',
|
||||||
|
'en',
|
||||||
|
expect.objectContaining({
|
||||||
|
hasOrderContent: false,
|
||||||
|
orderContent: '',
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return execute with previous order content on /replay when previousOrderContent is set', async () => {
|
||||||
|
setupRawStdin(toRawInputs(['/replay']));
|
||||||
|
setupMockProvider([]);
|
||||||
|
|
||||||
|
const previousOrder = '# Previous Order\nDo the thing';
|
||||||
|
const result = await runInstructMode(
|
||||||
|
'/project', 'branch context', 'feature-branch', 'my-task', 'Do something', '',
|
||||||
|
undefined, undefined, previousOrder,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(result.action).toBe('execute');
|
||||||
|
expect(result.task).toBe(previousOrder);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should show error and continue when /replay is used without previousOrderContent', async () => {
|
||||||
|
setupRawStdin(toRawInputs(['/replay', '/cancel']));
|
||||||
|
setupMockProvider([]);
|
||||||
|
|
||||||
|
const result = await runInstructMode(
|
||||||
|
'/project', 'branch context', 'feature-branch', 'my-task', 'Do something', '',
|
||||||
|
undefined, undefined, null,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(result.action).toBe('cancel');
|
||||||
|
expect(mockInfo).toHaveBeenCalledWith('Mock label');
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@ -6,8 +6,10 @@ import { describe, expect, it } from 'vitest';
|
|||||||
|
|
||||||
import {
|
import {
|
||||||
buildSummaryPrompt,
|
buildSummaryPrompt,
|
||||||
|
buildSummaryActionOptions,
|
||||||
formatTaskHistorySummary,
|
formatTaskHistorySummary,
|
||||||
type PieceContext,
|
type PieceContext,
|
||||||
|
type SummaryActionLabels,
|
||||||
type TaskHistorySummaryItem,
|
type TaskHistorySummaryItem,
|
||||||
} from '../features/interactive/interactive.js';
|
} from '../features/interactive/interactive.js';
|
||||||
|
|
||||||
@ -100,3 +102,54 @@ describe('buildSummaryPrompt', () => {
|
|||||||
expect(summary).toContain('User: Improve parser');
|
expect(summary).toContain('User: Improve parser');
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('buildSummaryActionOptions', () => {
|
||||||
|
const labels: SummaryActionLabels = {
|
||||||
|
execute: 'Execute now',
|
||||||
|
saveTask: 'Save as Task',
|
||||||
|
continue: 'Continue editing',
|
||||||
|
};
|
||||||
|
|
||||||
|
it('should include all base actions when no exclude is given', () => {
|
||||||
|
const options = buildSummaryActionOptions(labels);
|
||||||
|
const values = options.map((o) => o.value);
|
||||||
|
|
||||||
|
expect(values).toEqual(['execute', 'save_task', 'continue']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should exclude specified actions', () => {
|
||||||
|
const options = buildSummaryActionOptions(labels, [], ['execute']);
|
||||||
|
const values = options.map((o) => o.value);
|
||||||
|
|
||||||
|
expect(values).toEqual(['save_task', 'continue']);
|
||||||
|
expect(values).not.toContain('execute');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should exclude multiple actions', () => {
|
||||||
|
const options = buildSummaryActionOptions(labels, [], ['execute', 'continue']);
|
||||||
|
const values = options.map((o) => o.value);
|
||||||
|
|
||||||
|
expect(values).toEqual(['save_task']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle append and exclude together', () => {
|
||||||
|
const labelsWithIssue: SummaryActionLabels = {
|
||||||
|
...labels,
|
||||||
|
createIssue: 'Create Issue',
|
||||||
|
};
|
||||||
|
const options = buildSummaryActionOptions(labelsWithIssue, ['create_issue'], ['execute']);
|
||||||
|
const values = options.map((o) => o.value);
|
||||||
|
|
||||||
|
expect(values).toEqual(['save_task', 'continue', 'create_issue']);
|
||||||
|
expect(values).not.toContain('execute');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty exclude by default (backward compatible)', () => {
|
||||||
|
const options = buildSummaryActionOptions(labels, []);
|
||||||
|
const values = options.map((o) => o.value);
|
||||||
|
|
||||||
|
expect(values).toContain('execute');
|
||||||
|
expect(values).toContain('save_task');
|
||||||
|
expect(values).toContain('continue');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|||||||
203
src/__tests__/it-config-provider-options.test.ts
Normal file
203
src/__tests__/it-config-provider-options.test.ts
Normal file
@ -0,0 +1,203 @@
|
|||||||
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||||
|
import { mkdirSync, rmSync, writeFileSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { tmpdir } from 'node:os';
|
||||||
|
import { randomUUID } from 'node:crypto';
|
||||||
|
|
||||||
|
vi.mock('../agents/runner.js', () => ({
|
||||||
|
runAgent: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../agents/ai-judge.js', async (importOriginal) => {
|
||||||
|
const original = await importOriginal<typeof import('../agents/ai-judge.js')>();
|
||||||
|
return {
|
||||||
|
...original,
|
||||||
|
callAiJudge: vi.fn().mockResolvedValue(-1),
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
vi.mock('../core/piece/phase-runner.js', () => ({
|
||||||
|
needsStatusJudgmentPhase: vi.fn().mockReturnValue(false),
|
||||||
|
runReportPhase: vi.fn().mockResolvedValue(undefined),
|
||||||
|
runStatusJudgmentPhase: vi.fn().mockResolvedValue({ tag: '', ruleIndex: 0, method: 'auto_select' }),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/utils/index.js', async (importOriginal) => ({
|
||||||
|
...(await importOriginal<Record<string, unknown>>()),
|
||||||
|
generateReportDir: vi.fn().mockReturnValue('test-report-dir'),
|
||||||
|
}));
|
||||||
|
|
||||||
|
import { runAgent } from '../agents/runner.js';
|
||||||
|
import { executeTask } from '../features/tasks/execute/taskExecution.js';
|
||||||
|
import { invalidateGlobalConfigCache } from '../infra/config/index.js';
|
||||||
|
|
||||||
|
interface TestEnv {
|
||||||
|
projectDir: string;
|
||||||
|
globalDir: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
function createEnv(): TestEnv {
|
||||||
|
const root = join(tmpdir(), `takt-it-config-${randomUUID()}`);
|
||||||
|
const projectDir = join(root, 'project');
|
||||||
|
const globalDir = join(root, 'global');
|
||||||
|
|
||||||
|
mkdirSync(projectDir, { recursive: true });
|
||||||
|
mkdirSync(join(projectDir, '.takt', 'pieces', 'personas'), { recursive: true });
|
||||||
|
mkdirSync(globalDir, { recursive: true });
|
||||||
|
|
||||||
|
writeFileSync(
|
||||||
|
join(projectDir, '.takt', 'pieces', 'config-it.yaml'),
|
||||||
|
[
|
||||||
|
'name: config-it',
|
||||||
|
'description: config provider options integration test',
|
||||||
|
'max_movements: 3',
|
||||||
|
'initial_movement: plan',
|
||||||
|
'movements:',
|
||||||
|
' - name: plan',
|
||||||
|
' persona: ./personas/planner.md',
|
||||||
|
' instruction: "{task}"',
|
||||||
|
' rules:',
|
||||||
|
' - condition: done',
|
||||||
|
' next: COMPLETE',
|
||||||
|
].join('\n'),
|
||||||
|
'utf-8',
|
||||||
|
);
|
||||||
|
writeFileSync(join(projectDir, '.takt', 'pieces', 'personas', 'planner.md'), 'You are planner.', 'utf-8');
|
||||||
|
|
||||||
|
return { projectDir, globalDir };
|
||||||
|
}
|
||||||
|
|
||||||
|
function setGlobalConfig(globalDir: string, body: string): void {
|
||||||
|
writeFileSync(join(globalDir, 'config.yaml'), body, 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
function setProjectConfig(projectDir: string, body: string): void {
|
||||||
|
writeFileSync(join(projectDir, '.takt', 'config.yaml'), body, 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
function makeDoneResponse() {
|
||||||
|
return {
|
||||||
|
persona: 'planner',
|
||||||
|
status: 'done',
|
||||||
|
content: '[PLAN:1]\ndone',
|
||||||
|
timestamp: new Date(),
|
||||||
|
sessionId: 'session-it',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('IT: config provider_options reflection', () => {
|
||||||
|
let env: TestEnv;
|
||||||
|
let originalConfigDir: string | undefined;
|
||||||
|
let originalEnvCodex: string | undefined;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
env = createEnv();
|
||||||
|
originalConfigDir = process.env.TAKT_CONFIG_DIR;
|
||||||
|
originalEnvCodex = process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS;
|
||||||
|
|
||||||
|
process.env.TAKT_CONFIG_DIR = env.globalDir;
|
||||||
|
delete process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS;
|
||||||
|
invalidateGlobalConfigCache();
|
||||||
|
|
||||||
|
vi.mocked(runAgent).mockResolvedValue(makeDoneResponse());
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
if (originalConfigDir === undefined) {
|
||||||
|
delete process.env.TAKT_CONFIG_DIR;
|
||||||
|
} else {
|
||||||
|
process.env.TAKT_CONFIG_DIR = originalConfigDir;
|
||||||
|
}
|
||||||
|
if (originalEnvCodex === undefined) {
|
||||||
|
delete process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS;
|
||||||
|
} else {
|
||||||
|
process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS = originalEnvCodex;
|
||||||
|
}
|
||||||
|
invalidateGlobalConfigCache();
|
||||||
|
rmSync(join(env.projectDir, '..'), { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('global provider_options should be passed to runAgent', async () => {
|
||||||
|
setGlobalConfig(
|
||||||
|
env.globalDir,
|
||||||
|
[
|
||||||
|
'provider_options:',
|
||||||
|
' codex:',
|
||||||
|
' network_access: true',
|
||||||
|
].join('\n'),
|
||||||
|
);
|
||||||
|
|
||||||
|
const ok = await executeTask({
|
||||||
|
task: 'test task',
|
||||||
|
cwd: env.projectDir,
|
||||||
|
projectCwd: env.projectDir,
|
||||||
|
pieceIdentifier: 'config-it',
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(ok).toBe(true);
|
||||||
|
const options = vi.mocked(runAgent).mock.calls[0]?.[2];
|
||||||
|
expect(options?.providerOptions).toEqual({
|
||||||
|
codex: { networkAccess: true },
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('project provider_options should override global provider_options', async () => {
|
||||||
|
setGlobalConfig(
|
||||||
|
env.globalDir,
|
||||||
|
[
|
||||||
|
'provider_options:',
|
||||||
|
' codex:',
|
||||||
|
' network_access: true',
|
||||||
|
].join('\n'),
|
||||||
|
);
|
||||||
|
setProjectConfig(
|
||||||
|
env.projectDir,
|
||||||
|
[
|
||||||
|
'provider_options:',
|
||||||
|
' codex:',
|
||||||
|
' network_access: false',
|
||||||
|
].join('\n'),
|
||||||
|
);
|
||||||
|
|
||||||
|
const ok = await executeTask({
|
||||||
|
task: 'test task',
|
||||||
|
cwd: env.projectDir,
|
||||||
|
projectCwd: env.projectDir,
|
||||||
|
pieceIdentifier: 'config-it',
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(ok).toBe(true);
|
||||||
|
const options = vi.mocked(runAgent).mock.calls[0]?.[2];
|
||||||
|
expect(options?.providerOptions).toEqual({
|
||||||
|
codex: { networkAccess: false },
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('env provider_options should override yaml provider_options', async () => {
|
||||||
|
setGlobalConfig(
|
||||||
|
env.globalDir,
|
||||||
|
[
|
||||||
|
'provider_options:',
|
||||||
|
' codex:',
|
||||||
|
' network_access: true',
|
||||||
|
].join('\n'),
|
||||||
|
);
|
||||||
|
process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS = 'false';
|
||||||
|
invalidateGlobalConfigCache();
|
||||||
|
|
||||||
|
const ok = await executeTask({
|
||||||
|
task: 'test task',
|
||||||
|
cwd: env.projectDir,
|
||||||
|
projectCwd: env.projectDir,
|
||||||
|
pieceIdentifier: 'config-it',
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(ok).toBe(true);
|
||||||
|
const options = vi.mocked(runAgent).mock.calls[0]?.[2];
|
||||||
|
expect(options?.providerOptions).toEqual({
|
||||||
|
codex: { networkAccess: false },
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
@ -118,6 +118,19 @@ vi.mock('../infra/config/index.js', () => ({
|
|||||||
loadWorktreeSessions: vi.fn().mockReturnValue({}),
|
loadWorktreeSessions: vi.fn().mockReturnValue({}),
|
||||||
updateWorktreeSession: vi.fn(),
|
updateWorktreeSession: vi.fn(),
|
||||||
loadGlobalConfig: mockLoadGlobalConfig,
|
loadGlobalConfig: mockLoadGlobalConfig,
|
||||||
|
loadConfig: vi.fn().mockImplementation(() => ({
|
||||||
|
global: mockLoadGlobalConfig(),
|
||||||
|
project: {},
|
||||||
|
})),
|
||||||
|
resolvePieceConfigValues: (_projectDir: string, keys: readonly string[]) => {
|
||||||
|
const global = mockLoadGlobalConfig() as Record<string, unknown>;
|
||||||
|
const config = { ...global, piece: 'default', provider: global.provider ?? 'claude', verbose: false };
|
||||||
|
const result: Record<string, unknown> = {};
|
||||||
|
for (const key of keys) {
|
||||||
|
result[key] = config[key];
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
},
|
||||||
saveSessionState: vi.fn(),
|
saveSessionState: vi.fn(),
|
||||||
ensureDir: vi.fn(),
|
ensureDir: vi.fn(),
|
||||||
writeFileAtomic: vi.fn(),
|
writeFileAtomic: vi.fn(),
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
* Tests the 3-tier piece resolution (project-local → user → builtin)
|
* Tests the 3-tier piece resolution (project-local → user → builtin)
|
||||||
* and YAML parsing including special rule syntax (ai(), all(), any()).
|
* and YAML parsing including special rule syntax (ai(), all(), any()).
|
||||||
*
|
*
|
||||||
* Mocked: globalConfig (for language/builtins)
|
* Mocked: loadConfig (for language/builtins)
|
||||||
* Not mocked: loadPiece, parsePiece, rule parsing
|
* Not mocked: loadPiece, parsePiece, rule parsing
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -18,9 +18,24 @@ const languageState = vi.hoisted(() => ({ value: 'en' as 'en' | 'ja' }));
|
|||||||
|
|
||||||
vi.mock('../infra/config/global/globalConfig.js', () => ({
|
vi.mock('../infra/config/global/globalConfig.js', () => ({
|
||||||
loadGlobalConfig: vi.fn().mockReturnValue({}),
|
loadGlobalConfig: vi.fn().mockReturnValue({}),
|
||||||
getLanguage: vi.fn(() => languageState.value),
|
}));
|
||||||
getDisabledBuiltins: vi.fn().mockReturnValue([]),
|
|
||||||
getBuiltinPiecesEnabled: vi.fn().mockReturnValue(true),
|
vi.mock('../infra/config/resolveConfigValue.js', () => ({
|
||||||
|
resolveConfigValue: vi.fn((_cwd: string, key: string) => {
|
||||||
|
if (key === 'language') return languageState.value;
|
||||||
|
if (key === 'enableBuiltinPieces') return true;
|
||||||
|
if (key === 'disabledBuiltins') return [];
|
||||||
|
return undefined;
|
||||||
|
}),
|
||||||
|
resolveConfigValues: vi.fn((_cwd: string, keys: readonly string[]) => {
|
||||||
|
const result: Record<string, unknown> = {};
|
||||||
|
for (const key of keys) {
|
||||||
|
if (key === 'language') result[key] = languageState.value;
|
||||||
|
if (key === 'enableBuiltinPieces') result[key] = true;
|
||||||
|
if (key === 'disabledBuiltins') result[key] = [];
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// --- Imports (after mocks) ---
|
// --- Imports (after mocks) ---
|
||||||
@ -38,6 +53,7 @@ function createTestDir(): string {
|
|||||||
|
|
||||||
describe('Piece Loader IT: builtin piece loading', () => {
|
describe('Piece Loader IT: builtin piece loading', () => {
|
||||||
let testDir: string;
|
let testDir: string;
|
||||||
|
const builtinNames = listBuiltinPieceNames(process.cwd(), { includeDisabled: true });
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
testDir = createTestDir();
|
testDir = createTestDir();
|
||||||
@ -48,8 +64,6 @@ describe('Piece Loader IT: builtin piece loading', () => {
|
|||||||
rmSync(testDir, { recursive: true, force: true });
|
rmSync(testDir, { recursive: true, force: true });
|
||||||
});
|
});
|
||||||
|
|
||||||
const builtinNames = listBuiltinPieceNames({ includeDisabled: true });
|
|
||||||
|
|
||||||
for (const name of builtinNames) {
|
for (const name of builtinNames) {
|
||||||
it(`should load builtin piece: ${name}`, () => {
|
it(`should load builtin piece: ${name}`, () => {
|
||||||
const config = loadPiece(name, testDir);
|
const config = loadPiece(name, testDir);
|
||||||
@ -85,7 +99,7 @@ describe('Piece Loader IT: builtin piece loading', () => {
|
|||||||
it('should load e2e-test as a builtin piece in ja locale', () => {
|
it('should load e2e-test as a builtin piece in ja locale', () => {
|
||||||
languageState.value = 'ja';
|
languageState.value = 'ja';
|
||||||
|
|
||||||
const jaBuiltinNames = listBuiltinPieceNames({ includeDisabled: true });
|
const jaBuiltinNames = listBuiltinPieceNames(testDir, { includeDisabled: true });
|
||||||
expect(jaBuiltinNames).toContain('e2e-test');
|
expect(jaBuiltinNames).toContain('e2e-test');
|
||||||
|
|
||||||
const config = loadPiece('e2e-test', testDir);
|
const config = loadPiece('e2e-test', testDir);
|
||||||
|
|||||||
@ -57,6 +57,24 @@ vi.mock('../infra/config/project/projectConfig.js', () => ({
|
|||||||
loadProjectConfig: vi.fn().mockReturnValue({}),
|
loadProjectConfig: vi.fn().mockReturnValue({}),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
vi.mock('../infra/config/resolveConfigValue.js', () => ({
|
||||||
|
resolveConfigValue: vi.fn((_cwd: string, key: string) => {
|
||||||
|
if (key === 'language') return 'en';
|
||||||
|
if (key === 'enableBuiltinPieces') return true;
|
||||||
|
if (key === 'disabledBuiltins') return [];
|
||||||
|
return undefined;
|
||||||
|
}),
|
||||||
|
resolveConfigValues: vi.fn((_cwd: string, keys: readonly string[]) => {
|
||||||
|
const result: Record<string, unknown> = {};
|
||||||
|
for (const key of keys) {
|
||||||
|
if (key === 'language') result[key] = 'en';
|
||||||
|
if (key === 'enableBuiltinPieces') result[key] = true;
|
||||||
|
if (key === 'disabledBuiltins') result[key] = [];
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}),
|
||||||
|
}));
|
||||||
|
|
||||||
// --- Imports (after mocks) ---
|
// --- Imports (after mocks) ---
|
||||||
|
|
||||||
import { PieceEngine } from '../core/piece/index.js';
|
import { PieceEngine } from '../core/piece/index.js';
|
||||||
|
|||||||
@ -109,7 +109,6 @@ vi.mock('../infra/config/paths.js', async (importOriginal) => {
|
|||||||
updatePersonaSession: vi.fn(),
|
updatePersonaSession: vi.fn(),
|
||||||
loadWorktreeSessions: vi.fn().mockReturnValue({}),
|
loadWorktreeSessions: vi.fn().mockReturnValue({}),
|
||||||
updateWorktreeSession: vi.fn(),
|
updateWorktreeSession: vi.fn(),
|
||||||
getCurrentPiece: vi.fn().mockReturnValue('default'),
|
|
||||||
getProjectConfigDir: vi.fn().mockImplementation((cwd: string) => join(cwd, '.takt')),
|
getProjectConfigDir: vi.fn().mockImplementation((cwd: string) => join(cwd, '.takt')),
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
@ -118,7 +117,11 @@ vi.mock('../infra/config/global/globalConfig.js', async (importOriginal) => {
|
|||||||
const original = await importOriginal<typeof import('../infra/config/global/globalConfig.js')>();
|
const original = await importOriginal<typeof import('../infra/config/global/globalConfig.js')>();
|
||||||
return {
|
return {
|
||||||
...original,
|
...original,
|
||||||
loadGlobalConfig: vi.fn().mockReturnValue({}),
|
loadGlobalConfig: vi.fn().mockReturnValue({
|
||||||
|
language: 'en',
|
||||||
|
enableBuiltinPieces: true,
|
||||||
|
disabledBuiltins: [],
|
||||||
|
}),
|
||||||
getLanguage: vi.fn().mockReturnValue('en'),
|
getLanguage: vi.fn().mockReturnValue('en'),
|
||||||
getDisabledBuiltins: vi.fn().mockReturnValue([]),
|
getDisabledBuiltins: vi.fn().mockReturnValue([]),
|
||||||
};
|
};
|
||||||
|
|||||||
@ -91,7 +91,6 @@ vi.mock('../infra/config/paths.js', async (importOriginal) => {
|
|||||||
updatePersonaSession: vi.fn(),
|
updatePersonaSession: vi.fn(),
|
||||||
loadWorktreeSessions: vi.fn().mockReturnValue({}),
|
loadWorktreeSessions: vi.fn().mockReturnValue({}),
|
||||||
updateWorktreeSession: vi.fn(),
|
updateWorktreeSession: vi.fn(),
|
||||||
getCurrentPiece: vi.fn().mockReturnValue('default'),
|
|
||||||
getProjectConfigDir: vi.fn().mockImplementation((cwd: string) => join(cwd, '.takt')),
|
getProjectConfigDir: vi.fn().mockImplementation((cwd: string) => join(cwd, '.takt')),
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
@ -100,7 +99,11 @@ vi.mock('../infra/config/global/globalConfig.js', async (importOriginal) => {
|
|||||||
const original = await importOriginal<typeof import('../infra/config/global/globalConfig.js')>();
|
const original = await importOriginal<typeof import('../infra/config/global/globalConfig.js')>();
|
||||||
return {
|
return {
|
||||||
...original,
|
...original,
|
||||||
loadGlobalConfig: vi.fn().mockReturnValue({}),
|
loadGlobalConfig: vi.fn().mockReturnValue({
|
||||||
|
language: 'en',
|
||||||
|
enableBuiltinPieces: true,
|
||||||
|
disabledBuiltins: [],
|
||||||
|
}),
|
||||||
getLanguage: vi.fn().mockReturnValue('en'),
|
getLanguage: vi.fn().mockReturnValue('en'),
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|||||||
@ -191,6 +191,7 @@ describe('E2E: Retry mode with failure context injection', () => {
|
|||||||
const retryContext: RetryContext = {
|
const retryContext: RetryContext = {
|
||||||
failure: {
|
failure: {
|
||||||
taskName: 'implement-auth',
|
taskName: 'implement-auth',
|
||||||
|
taskContent: 'Implement authentication feature',
|
||||||
createdAt: '2026-02-15T10:00:00Z',
|
createdAt: '2026-02-15T10:00:00Z',
|
||||||
failedMovement: 'review',
|
failedMovement: 'review',
|
||||||
error: 'Timeout after 300s',
|
error: 'Timeout after 300s',
|
||||||
@ -205,9 +206,10 @@ describe('E2E: Retry mode with failure context injection', () => {
|
|||||||
movementPreviews: [],
|
movementPreviews: [],
|
||||||
},
|
},
|
||||||
run: null,
|
run: null,
|
||||||
|
previousOrderContent: null,
|
||||||
};
|
};
|
||||||
|
|
||||||
const result = await runRetryMode(tmpDir, retryContext);
|
const result = await runRetryMode(tmpDir, retryContext, null);
|
||||||
|
|
||||||
// Verify: system prompt contains failure information
|
// Verify: system prompt contains failure information
|
||||||
expect(capture.systemPrompts.length).toBeGreaterThan(0);
|
expect(capture.systemPrompts.length).toBeGreaterThan(0);
|
||||||
@ -252,6 +254,7 @@ describe('E2E: Retry mode with failure context injection', () => {
|
|||||||
const retryContext: RetryContext = {
|
const retryContext: RetryContext = {
|
||||||
failure: {
|
failure: {
|
||||||
taskName: 'build-login',
|
taskName: 'build-login',
|
||||||
|
taskContent: 'Build login page with OAuth2',
|
||||||
createdAt: '2026-02-15T14:00:00Z',
|
createdAt: '2026-02-15T14:00:00Z',
|
||||||
failedMovement: 'implement',
|
failedMovement: 'implement',
|
||||||
error: 'CSS compilation failed',
|
error: 'CSS compilation failed',
|
||||||
@ -274,9 +277,10 @@ describe('E2E: Retry mode with failure context injection', () => {
|
|||||||
movementLogs: formatted.runMovementLogs,
|
movementLogs: formatted.runMovementLogs,
|
||||||
reports: formatted.runReports,
|
reports: formatted.runReports,
|
||||||
},
|
},
|
||||||
|
previousOrderContent: null,
|
||||||
};
|
};
|
||||||
|
|
||||||
const result = await runRetryMode(tmpDir, retryContext);
|
const result = await runRetryMode(tmpDir, retryContext, null);
|
||||||
|
|
||||||
// Verify: system prompt contains BOTH failure info and run session data
|
// Verify: system prompt contains BOTH failure info and run session data
|
||||||
const systemPrompt = capture.systemPrompts[0]!;
|
const systemPrompt = capture.systemPrompts[0]!;
|
||||||
@ -314,6 +318,7 @@ describe('E2E: Retry mode with failure context injection', () => {
|
|||||||
const retryContext: RetryContext = {
|
const retryContext: RetryContext = {
|
||||||
failure: {
|
failure: {
|
||||||
taskName: 'fix-tests',
|
taskName: 'fix-tests',
|
||||||
|
taskContent: 'Fix failing test suite',
|
||||||
createdAt: '2026-02-15T16:00:00Z',
|
createdAt: '2026-02-15T16:00:00Z',
|
||||||
failedMovement: '',
|
failedMovement: '',
|
||||||
error: 'Test suite failed',
|
error: 'Test suite failed',
|
||||||
@ -328,9 +333,10 @@ describe('E2E: Retry mode with failure context injection', () => {
|
|||||||
movementPreviews: [],
|
movementPreviews: [],
|
||||||
},
|
},
|
||||||
run: null,
|
run: null,
|
||||||
|
previousOrderContent: null,
|
||||||
};
|
};
|
||||||
|
|
||||||
await runRetryMode(tmpDir, retryContext);
|
await runRetryMode(tmpDir, retryContext, null);
|
||||||
|
|
||||||
const systemPrompt = capture.systemPrompts[0]!;
|
const systemPrompt = capture.systemPrompts[0]!;
|
||||||
expect(systemPrompt).toContain('Existing Retry Note');
|
expect(systemPrompt).toContain('Existing Retry Note');
|
||||||
@ -348,6 +354,7 @@ describe('E2E: Retry mode with failure context injection', () => {
|
|||||||
const retryContext: RetryContext = {
|
const retryContext: RetryContext = {
|
||||||
failure: {
|
failure: {
|
||||||
taskName: 'some-task',
|
taskName: 'some-task',
|
||||||
|
taskContent: 'Complete some task',
|
||||||
createdAt: '2026-02-15T12:00:00Z',
|
createdAt: '2026-02-15T12:00:00Z',
|
||||||
failedMovement: 'plan',
|
failedMovement: 'plan',
|
||||||
error: 'Unknown error',
|
error: 'Unknown error',
|
||||||
@ -362,9 +369,10 @@ describe('E2E: Retry mode with failure context injection', () => {
|
|||||||
movementPreviews: [],
|
movementPreviews: [],
|
||||||
},
|
},
|
||||||
run: null,
|
run: null,
|
||||||
|
previousOrderContent: null,
|
||||||
};
|
};
|
||||||
|
|
||||||
const result = await runRetryMode(tmpDir, retryContext);
|
const result = await runRetryMode(tmpDir, retryContext, null);
|
||||||
|
|
||||||
expect(result.action).toBe('cancel');
|
expect(result.action).toBe('cancel');
|
||||||
expect(result.task).toBe('');
|
expect(result.task).toBe('');
|
||||||
@ -385,6 +393,7 @@ describe('E2E: Retry mode with failure context injection', () => {
|
|||||||
const retryContext: RetryContext = {
|
const retryContext: RetryContext = {
|
||||||
failure: {
|
failure: {
|
||||||
taskName: 'optimize-review',
|
taskName: 'optimize-review',
|
||||||
|
taskContent: 'Optimize the review step',
|
||||||
createdAt: '2026-02-15T18:00:00Z',
|
createdAt: '2026-02-15T18:00:00Z',
|
||||||
failedMovement: 'review',
|
failedMovement: 'review',
|
||||||
error: 'Timeout',
|
error: 'Timeout',
|
||||||
@ -399,9 +408,10 @@ describe('E2E: Retry mode with failure context injection', () => {
|
|||||||
movementPreviews: [],
|
movementPreviews: [],
|
||||||
},
|
},
|
||||||
run: null,
|
run: null,
|
||||||
|
previousOrderContent: null,
|
||||||
};
|
};
|
||||||
|
|
||||||
const result = await runRetryMode(tmpDir, retryContext);
|
const result = await runRetryMode(tmpDir, retryContext, null);
|
||||||
|
|
||||||
expect(result.action).toBe('execute');
|
expect(result.action).toBe('execute');
|
||||||
expect(result.task).toBe('Increase review timeout to 600s and add retry logic.');
|
expect(result.task).toBe('Increase review timeout to 600s and add retry logic.');
|
||||||
|
|||||||
170
src/__tests__/it-run-config-provider-options.test.ts
Normal file
170
src/__tests__/it-run-config-provider-options.test.ts
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||||
|
import { mkdirSync, rmSync, writeFileSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { tmpdir } from 'node:os';
|
||||||
|
import { randomUUID } from 'node:crypto';
|
||||||
|
|
||||||
|
vi.mock('../agents/runner.js', () => ({
|
||||||
|
runAgent: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../agents/ai-judge.js', async (importOriginal) => {
|
||||||
|
const original = await importOriginal<typeof import('../agents/ai-judge.js')>();
|
||||||
|
return {
|
||||||
|
...original,
|
||||||
|
callAiJudge: vi.fn().mockResolvedValue(-1),
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
vi.mock('../core/piece/phase-runner.js', () => ({
|
||||||
|
needsStatusJudgmentPhase: vi.fn().mockReturnValue(false),
|
||||||
|
runReportPhase: vi.fn().mockResolvedValue(undefined),
|
||||||
|
runStatusJudgmentPhase: vi.fn().mockResolvedValue({ tag: '', ruleIndex: 0, method: 'auto_select' }),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/utils/index.js', async (importOriginal) => ({
|
||||||
|
...(await importOriginal<Record<string, unknown>>()),
|
||||||
|
generateReportDir: vi.fn().mockReturnValue('test-report-dir'),
|
||||||
|
notifySuccess: vi.fn(),
|
||||||
|
notifyError: vi.fn(),
|
||||||
|
sendSlackNotification: vi.fn(),
|
||||||
|
getSlackWebhookUrl: vi.fn(() => undefined),
|
||||||
|
}));
|
||||||
|
|
||||||
|
import { runAllTasks } from '../features/tasks/index.js';
|
||||||
|
import { TaskRunner } from '../infra/task/index.js';
|
||||||
|
import { runAgent } from '../agents/runner.js';
|
||||||
|
import { invalidateGlobalConfigCache } from '../infra/config/index.js';
|
||||||
|
|
||||||
|
interface TestEnv {
|
||||||
|
root: string;
|
||||||
|
projectDir: string;
|
||||||
|
globalDir: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
function createEnv(): TestEnv {
|
||||||
|
const root = join(tmpdir(), `takt-it-run-config-${randomUUID()}`);
|
||||||
|
const projectDir = join(root, 'project');
|
||||||
|
const globalDir = join(root, 'global');
|
||||||
|
|
||||||
|
mkdirSync(join(projectDir, '.takt', 'pieces', 'personas'), { recursive: true });
|
||||||
|
mkdirSync(globalDir, { recursive: true });
|
||||||
|
|
||||||
|
writeFileSync(
|
||||||
|
join(projectDir, '.takt', 'pieces', 'run-config-it.yaml'),
|
||||||
|
[
|
||||||
|
'name: run-config-it',
|
||||||
|
'description: run config provider options integration test',
|
||||||
|
'max_movements: 3',
|
||||||
|
'initial_movement: plan',
|
||||||
|
'movements:',
|
||||||
|
' - name: plan',
|
||||||
|
' persona: ./personas/planner.md',
|
||||||
|
' instruction: "{task}"',
|
||||||
|
' rules:',
|
||||||
|
' - condition: done',
|
||||||
|
' next: COMPLETE',
|
||||||
|
].join('\n'),
|
||||||
|
'utf-8',
|
||||||
|
);
|
||||||
|
writeFileSync(join(projectDir, '.takt', 'pieces', 'personas', 'planner.md'), 'You are planner.', 'utf-8');
|
||||||
|
|
||||||
|
return { root, projectDir, globalDir };
|
||||||
|
}
|
||||||
|
|
||||||
|
function setGlobalConfig(globalDir: string, body: string): void {
|
||||||
|
writeFileSync(join(globalDir, 'config.yaml'), body, 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
function setProjectConfig(projectDir: string, body: string): void {
|
||||||
|
writeFileSync(join(projectDir, '.takt', 'config.yaml'), body, 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
function mockDoneResponse() {
|
||||||
|
return {
|
||||||
|
persona: 'planner',
|
||||||
|
status: 'done',
|
||||||
|
content: '[PLAN:1]\ndone',
|
||||||
|
timestamp: new Date(),
|
||||||
|
sessionId: 'session-it',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('IT: runAllTasks provider_options reflection', () => {
|
||||||
|
let env: TestEnv;
|
||||||
|
let originalConfigDir: string | undefined;
|
||||||
|
let originalEnvCodex: string | undefined;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
env = createEnv();
|
||||||
|
originalConfigDir = process.env.TAKT_CONFIG_DIR;
|
||||||
|
originalEnvCodex = process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS;
|
||||||
|
process.env.TAKT_CONFIG_DIR = env.globalDir;
|
||||||
|
delete process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS;
|
||||||
|
invalidateGlobalConfigCache();
|
||||||
|
|
||||||
|
vi.mocked(runAgent).mockResolvedValue(mockDoneResponse());
|
||||||
|
|
||||||
|
const runner = new TaskRunner(env.projectDir);
|
||||||
|
runner.addTask('test task');
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
if (originalConfigDir === undefined) {
|
||||||
|
delete process.env.TAKT_CONFIG_DIR;
|
||||||
|
} else {
|
||||||
|
process.env.TAKT_CONFIG_DIR = originalConfigDir;
|
||||||
|
}
|
||||||
|
if (originalEnvCodex === undefined) {
|
||||||
|
delete process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS;
|
||||||
|
} else {
|
||||||
|
process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS = originalEnvCodex;
|
||||||
|
}
|
||||||
|
invalidateGlobalConfigCache();
|
||||||
|
rmSync(env.root, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('project provider_options should override global in runAllTasks flow', async () => {
|
||||||
|
setGlobalConfig(env.globalDir, [
|
||||||
|
'provider_options:',
|
||||||
|
' codex:',
|
||||||
|
' network_access: true',
|
||||||
|
].join('\n'));
|
||||||
|
setProjectConfig(env.projectDir, [
|
||||||
|
'provider_options:',
|
||||||
|
' codex:',
|
||||||
|
' network_access: false',
|
||||||
|
].join('\n'));
|
||||||
|
|
||||||
|
await runAllTasks(env.projectDir, 'run-config-it');
|
||||||
|
|
||||||
|
const options = vi.mocked(runAgent).mock.calls[0]?.[2];
|
||||||
|
expect(options?.providerOptions).toEqual({
|
||||||
|
codex: { networkAccess: false },
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('env provider_options should override yaml in runAllTasks flow', async () => {
|
||||||
|
setGlobalConfig(env.globalDir, [
|
||||||
|
'provider_options:',
|
||||||
|
' codex:',
|
||||||
|
' network_access: false',
|
||||||
|
].join('\n'));
|
||||||
|
setProjectConfig(env.projectDir, [
|
||||||
|
'provider_options:',
|
||||||
|
' codex:',
|
||||||
|
' network_access: false',
|
||||||
|
].join('\n'));
|
||||||
|
process.env.TAKT_PROVIDER_OPTIONS_CODEX_NETWORK_ACCESS = 'true';
|
||||||
|
invalidateGlobalConfigCache();
|
||||||
|
|
||||||
|
await runAllTasks(env.projectDir, 'run-config-it');
|
||||||
|
|
||||||
|
const options = vi.mocked(runAgent).mock.calls[0]?.[2];
|
||||||
|
expect(options?.providerOptions).toEqual({
|
||||||
|
codex: { networkAccess: true },
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
@ -89,6 +89,18 @@ vi.mock('../infra/config/index.js', () => ({
|
|||||||
loadWorktreeSessions: vi.fn().mockReturnValue({}),
|
loadWorktreeSessions: vi.fn().mockReturnValue({}),
|
||||||
updateWorktreeSession: vi.fn(),
|
updateWorktreeSession: vi.fn(),
|
||||||
loadGlobalConfig: vi.fn().mockReturnValue({ provider: 'claude' }),
|
loadGlobalConfig: vi.fn().mockReturnValue({ provider: 'claude' }),
|
||||||
|
loadConfig: vi.fn().mockReturnValue({
|
||||||
|
global: { provider: 'claude' },
|
||||||
|
project: {},
|
||||||
|
}),
|
||||||
|
resolvePieceConfigValues: (_projectDir: string, keys: readonly string[]) => {
|
||||||
|
const config: Record<string, unknown> = { provider: 'claude', piece: 'default', verbose: false };
|
||||||
|
const result: Record<string, unknown> = {};
|
||||||
|
for (const key of keys) {
|
||||||
|
result[key] = config[key];
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
},
|
||||||
saveSessionState: vi.fn(),
|
saveSessionState: vi.fn(),
|
||||||
ensureDir: vi.fn(),
|
ensureDir: vi.fn(),
|
||||||
writeFileAtomic: vi.fn(),
|
writeFileAtomic: vi.fn(),
|
||||||
|
|||||||
106
src/__tests__/loadPreviousOrderContent.test.ts
Normal file
106
src/__tests__/loadPreviousOrderContent.test.ts
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
/**
|
||||||
|
* Tests for loadPreviousOrderContent utility function.
|
||||||
|
*
|
||||||
|
* Verifies order.md loading from run directories,
|
||||||
|
* including happy path, missing slug, and missing file cases.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { mkdirSync, writeFileSync, rmSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { tmpdir } from 'node:os';
|
||||||
|
import { loadPreviousOrderContent } from '../features/interactive/runSessionReader.js';
|
||||||
|
|
||||||
|
function createTmpDir(): string {
|
||||||
|
const dir = join(tmpdir(), `takt-order-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||||
|
mkdirSync(dir, { recursive: true });
|
||||||
|
return dir;
|
||||||
|
}
|
||||||
|
|
||||||
|
function createRunWithOrder(cwd: string, slug: string, taskContent: string, orderContent: string): void {
|
||||||
|
const runDir = join(cwd, '.takt', 'runs', slug);
|
||||||
|
mkdirSync(join(runDir, 'context', 'task'), { recursive: true });
|
||||||
|
|
||||||
|
const meta = {
|
||||||
|
task: taskContent,
|
||||||
|
piece: 'default',
|
||||||
|
status: 'completed',
|
||||||
|
startTime: '2026-02-01T00:00:00.000Z',
|
||||||
|
logsDirectory: `.takt/runs/${slug}/logs`,
|
||||||
|
reportDirectory: `.takt/runs/${slug}/reports`,
|
||||||
|
runSlug: slug,
|
||||||
|
};
|
||||||
|
writeFileSync(join(runDir, 'meta.json'), JSON.stringify(meta), 'utf-8');
|
||||||
|
writeFileSync(join(runDir, 'context', 'task', 'order.md'), orderContent, 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
function createRunWithoutOrder(cwd: string, slug: string, taskContent: string): void {
|
||||||
|
const runDir = join(cwd, '.takt', 'runs', slug);
|
||||||
|
mkdirSync(runDir, { recursive: true });
|
||||||
|
|
||||||
|
const meta = {
|
||||||
|
task: taskContent,
|
||||||
|
piece: 'default',
|
||||||
|
status: 'completed',
|
||||||
|
startTime: '2026-02-01T00:00:00.000Z',
|
||||||
|
logsDirectory: `.takt/runs/${slug}/logs`,
|
||||||
|
reportDirectory: `.takt/runs/${slug}/reports`,
|
||||||
|
runSlug: slug,
|
||||||
|
};
|
||||||
|
writeFileSync(join(runDir, 'meta.json'), JSON.stringify(meta), 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('loadPreviousOrderContent', () => {
|
||||||
|
let tmpDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
tmpDir = createTmpDir();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
rmSync(tmpDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return order.md content when run and file exist', () => {
|
||||||
|
const taskContent = 'Implement feature X';
|
||||||
|
const orderContent = '# Task\n\nImplement feature X with tests.';
|
||||||
|
createRunWithOrder(tmpDir, 'run-feature-x', taskContent, orderContent);
|
||||||
|
|
||||||
|
const result = loadPreviousOrderContent(tmpDir, taskContent);
|
||||||
|
|
||||||
|
expect(result).toBe(orderContent);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null when no matching run exists', () => {
|
||||||
|
const result = loadPreviousOrderContent(tmpDir, 'Non-existent task');
|
||||||
|
|
||||||
|
expect(result).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null when run exists but order.md is missing', () => {
|
||||||
|
const taskContent = 'Task without order';
|
||||||
|
createRunWithoutOrder(tmpDir, 'run-no-order', taskContent);
|
||||||
|
|
||||||
|
const result = loadPreviousOrderContent(tmpDir, taskContent);
|
||||||
|
|
||||||
|
expect(result).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null when .takt/runs directory does not exist', () => {
|
||||||
|
const emptyDir = join(tmpdir(), `takt-empty-${Date.now()}`);
|
||||||
|
mkdirSync(emptyDir, { recursive: true });
|
||||||
|
|
||||||
|
const result = loadPreviousOrderContent(emptyDir, 'any task');
|
||||||
|
|
||||||
|
expect(result).toBeNull();
|
||||||
|
rmSync(emptyDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should match the correct run among multiple runs', () => {
|
||||||
|
createRunWithOrder(tmpDir, 'run-a', 'Task A', '# Order A');
|
||||||
|
createRunWithOrder(tmpDir, 'run-b', 'Task B', '# Order B');
|
||||||
|
|
||||||
|
expect(loadPreviousOrderContent(tmpDir, 'Task A')).toBe('# Order A');
|
||||||
|
expect(loadPreviousOrderContent(tmpDir, 'Task B')).toBe('# Order B');
|
||||||
|
});
|
||||||
|
});
|
||||||
@ -495,7 +495,6 @@ describe('GlobalConfigSchema', () => {
|
|||||||
const config = {};
|
const config = {};
|
||||||
const result = GlobalConfigSchema.parse(config);
|
const result = GlobalConfigSchema.parse(config);
|
||||||
|
|
||||||
expect(result.default_piece).toBe('default');
|
|
||||||
expect(result.log_level).toBe('info');
|
expect(result.log_level).toBe('info');
|
||||||
expect(result.provider).toBe('claude');
|
expect(result.provider).toBe('claude');
|
||||||
expect(result.observability).toBeUndefined();
|
expect(result.observability).toBeUndefined();
|
||||||
@ -503,7 +502,6 @@ describe('GlobalConfigSchema', () => {
|
|||||||
|
|
||||||
it('should accept valid config', () => {
|
it('should accept valid config', () => {
|
||||||
const config = {
|
const config = {
|
||||||
default_piece: 'custom',
|
|
||||||
log_level: 'debug' as const,
|
log_level: 'debug' as const,
|
||||||
observability: {
|
observability: {
|
||||||
provider_events: false,
|
provider_events: false,
|
||||||
|
|||||||
@ -1,11 +1,11 @@
|
|||||||
/**
|
/**
|
||||||
* Unit tests for task naming utilities
|
* Unit tests for task naming utilities
|
||||||
*
|
*
|
||||||
* Tests nowIso, firstLine, and sanitizeTaskName functions.
|
* Tests nowIso and firstLine functions.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { describe, it, expect, vi, afterEach } from 'vitest';
|
import { describe, it, expect, vi, afterEach } from 'vitest';
|
||||||
import { nowIso, firstLine, sanitizeTaskName } from '../infra/task/naming.js';
|
import { nowIso, firstLine } from '../infra/task/naming.js';
|
||||||
|
|
||||||
describe('nowIso', () => {
|
describe('nowIso', () => {
|
||||||
afterEach(() => {
|
afterEach(() => {
|
||||||
@ -54,34 +54,3 @@ describe('firstLine', () => {
|
|||||||
expect(firstLine(' \n ')).toBe('');
|
expect(firstLine(' \n ')).toBe('');
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('sanitizeTaskName', () => {
|
|
||||||
it('should lowercase the input', () => {
|
|
||||||
expect(sanitizeTaskName('Hello World')).toBe('hello-world');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should replace special characters with spaces then hyphens', () => {
|
|
||||||
expect(sanitizeTaskName('task@name#123')).toBe('task-name-123');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should collapse multiple hyphens', () => {
|
|
||||||
expect(sanitizeTaskName('a---b')).toBe('a-b');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should trim leading/trailing whitespace', () => {
|
|
||||||
expect(sanitizeTaskName(' hello ')).toBe('hello');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle typical task names', () => {
|
|
||||||
expect(sanitizeTaskName('Fix: login bug (#42)')).toBe('fix-login-bug-42');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should generate fallback name for empty result', () => {
|
|
||||||
const result = sanitizeTaskName('!@#$%');
|
|
||||||
expect(result).toMatch(/^task-\d+$/);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should preserve numbers and lowercase letters', () => {
|
|
||||||
expect(sanitizeTaskName('abc123def')).toBe('abc123def');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|||||||
@ -2,8 +2,7 @@ import { beforeEach, describe, expect, it, vi } from 'vitest';
|
|||||||
|
|
||||||
const {
|
const {
|
||||||
getProviderMock,
|
getProviderMock,
|
||||||
loadProjectConfigMock,
|
loadConfigMock,
|
||||||
loadGlobalConfigMock,
|
|
||||||
loadCustomAgentsMock,
|
loadCustomAgentsMock,
|
||||||
loadAgentPromptMock,
|
loadAgentPromptMock,
|
||||||
loadTemplateMock,
|
loadTemplateMock,
|
||||||
@ -15,8 +14,7 @@ const {
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
getProviderMock: vi.fn(() => ({ setup: providerSetup })),
|
getProviderMock: vi.fn(() => ({ setup: providerSetup })),
|
||||||
loadProjectConfigMock: vi.fn(),
|
loadConfigMock: vi.fn(),
|
||||||
loadGlobalConfigMock: vi.fn(),
|
|
||||||
loadCustomAgentsMock: vi.fn(),
|
loadCustomAgentsMock: vi.fn(),
|
||||||
loadAgentPromptMock: vi.fn(),
|
loadAgentPromptMock: vi.fn(),
|
||||||
loadTemplateMock: vi.fn(),
|
loadTemplateMock: vi.fn(),
|
||||||
@ -30,10 +28,21 @@ vi.mock('../infra/providers/index.js', () => ({
|
|||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../infra/config/index.js', () => ({
|
vi.mock('../infra/config/index.js', () => ({
|
||||||
loadProjectConfig: loadProjectConfigMock,
|
loadConfig: loadConfigMock,
|
||||||
loadGlobalConfig: loadGlobalConfigMock,
|
|
||||||
loadCustomAgents: loadCustomAgentsMock,
|
loadCustomAgents: loadCustomAgentsMock,
|
||||||
loadAgentPrompt: loadAgentPromptMock,
|
loadAgentPrompt: loadAgentPromptMock,
|
||||||
|
resolveConfigValues: (_projectDir: string, keys: readonly string[]) => {
|
||||||
|
const loaded = loadConfigMock() as Record<string, unknown>;
|
||||||
|
const global = (loaded.global ?? {}) as Record<string, unknown>;
|
||||||
|
const project = (loaded.project ?? {}) as Record<string, unknown>;
|
||||||
|
const provider = (project.provider ?? global.provider ?? 'claude') as string;
|
||||||
|
const config: Record<string, unknown> = { ...global, ...project, provider, piece: project.piece ?? 'default', verbose: false };
|
||||||
|
const result: Record<string, unknown> = {};
|
||||||
|
for (const key of keys) {
|
||||||
|
result[key] = config[key];
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
},
|
||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../shared/prompts/index.js', () => ({
|
vi.mock('../shared/prompts/index.js', () => ({
|
||||||
@ -47,17 +56,18 @@ describe('option resolution order', () => {
|
|||||||
vi.clearAllMocks();
|
vi.clearAllMocks();
|
||||||
|
|
||||||
providerCallMock.mockResolvedValue({ content: 'ok' });
|
providerCallMock.mockResolvedValue({ content: 'ok' });
|
||||||
loadProjectConfigMock.mockReturnValue({});
|
loadConfigMock.mockReturnValue({ global: {}, project: {} });
|
||||||
loadGlobalConfigMock.mockReturnValue({});
|
|
||||||
loadCustomAgentsMock.mockReturnValue(new Map());
|
loadCustomAgentsMock.mockReturnValue(new Map());
|
||||||
loadAgentPromptMock.mockReturnValue('prompt');
|
loadAgentPromptMock.mockReturnValue('prompt');
|
||||||
loadTemplateMock.mockReturnValue('template');
|
loadTemplateMock.mockReturnValue('template');
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should resolve provider in order: CLI > Local > Piece(step) > Global', async () => {
|
it('should resolve provider in order: CLI > Config(project??global) > stepProvider > default', async () => {
|
||||||
// Given
|
// Given
|
||||||
loadProjectConfigMock.mockReturnValue({ provider: 'opencode' });
|
loadConfigMock.mockReturnValue({
|
||||||
loadGlobalConfigMock.mockReturnValue({ provider: 'mock' });
|
project: { provider: 'opencode' },
|
||||||
|
global: { provider: 'mock' },
|
||||||
|
});
|
||||||
|
|
||||||
// When: CLI provider が指定される
|
// When: CLI provider が指定される
|
||||||
await runAgent(undefined, 'task', {
|
await runAgent(undefined, 'task', {
|
||||||
@ -69,7 +79,7 @@ describe('option resolution order', () => {
|
|||||||
// Then
|
// Then
|
||||||
expect(getProviderMock).toHaveBeenLastCalledWith('codex');
|
expect(getProviderMock).toHaveBeenLastCalledWith('codex');
|
||||||
|
|
||||||
// When: CLI 指定なし(Local が有効)
|
// When: CLI 指定なし(project provider が有効: resolveConfigValues は project.provider ?? global.provider を返す)
|
||||||
await runAgent(undefined, 'task', {
|
await runAgent(undefined, 'task', {
|
||||||
cwd: '/repo',
|
cwd: '/repo',
|
||||||
stepProvider: 'claude',
|
stepProvider: 'claude',
|
||||||
@ -78,17 +88,20 @@ describe('option resolution order', () => {
|
|||||||
// Then
|
// Then
|
||||||
expect(getProviderMock).toHaveBeenLastCalledWith('opencode');
|
expect(getProviderMock).toHaveBeenLastCalledWith('opencode');
|
||||||
|
|
||||||
// When: Local なし(Piece が有効)
|
// When: project なし → resolveConfigValues は global.provider を返す(フラットマージ)
|
||||||
loadProjectConfigMock.mockReturnValue({});
|
loadConfigMock.mockReturnValue({
|
||||||
|
project: {},
|
||||||
|
global: { provider: 'mock' },
|
||||||
|
});
|
||||||
await runAgent(undefined, 'task', {
|
await runAgent(undefined, 'task', {
|
||||||
cwd: '/repo',
|
cwd: '/repo',
|
||||||
stepProvider: 'claude',
|
stepProvider: 'claude',
|
||||||
});
|
});
|
||||||
|
|
||||||
// Then
|
// Then: resolveConfigValues returns 'mock' (global fallback), so stepProvider is not reached
|
||||||
expect(getProviderMock).toHaveBeenLastCalledWith('claude');
|
expect(getProviderMock).toHaveBeenLastCalledWith('mock');
|
||||||
|
|
||||||
// When: Piece なし(Global が有効)
|
// When: stepProvider もなし → 同様に global.provider
|
||||||
await runAgent(undefined, 'task', { cwd: '/repo' });
|
await runAgent(undefined, 'task', { cwd: '/repo' });
|
||||||
|
|
||||||
// Then
|
// Then
|
||||||
@ -97,8 +110,10 @@ describe('option resolution order', () => {
|
|||||||
|
|
||||||
it('should resolve model in order: CLI > Piece(step) > Global(matching provider)', async () => {
|
it('should resolve model in order: CLI > Piece(step) > Global(matching provider)', async () => {
|
||||||
// Given
|
// Given
|
||||||
loadProjectConfigMock.mockReturnValue({ provider: 'claude' });
|
loadConfigMock.mockReturnValue({
|
||||||
loadGlobalConfigMock.mockReturnValue({ provider: 'claude', model: 'global-model' });
|
project: { provider: 'claude' },
|
||||||
|
global: { provider: 'claude', model: 'global-model' },
|
||||||
|
});
|
||||||
|
|
||||||
// When: CLI model あり
|
// When: CLI model あり
|
||||||
await runAgent(undefined, 'task', {
|
await runAgent(undefined, 'task', {
|
||||||
@ -135,13 +150,16 @@ describe('option resolution order', () => {
|
|||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should ignore global model when global provider does not match resolved provider', async () => {
|
it('should ignore global model when resolved provider does not match config provider', async () => {
|
||||||
// Given
|
// Given: CLI provider overrides config provider, causing mismatch with config.model
|
||||||
loadProjectConfigMock.mockReturnValue({ provider: 'codex' });
|
loadConfigMock.mockReturnValue({
|
||||||
loadGlobalConfigMock.mockReturnValue({ provider: 'claude', model: 'global-model' });
|
project: {},
|
||||||
|
global: { provider: 'claude', model: 'global-model' },
|
||||||
|
});
|
||||||
|
|
||||||
// When
|
// When: CLI provider='codex' overrides config provider='claude'
|
||||||
await runAgent(undefined, 'task', { cwd: '/repo' });
|
// resolveModel compares config.provider ('claude') with resolvedProvider ('codex') → mismatch → model ignored
|
||||||
|
await runAgent(undefined, 'task', { cwd: '/repo', provider: 'codex' });
|
||||||
|
|
||||||
// Then
|
// Then
|
||||||
expect(providerCallMock).toHaveBeenLastCalledWith(
|
expect(providerCallMock).toHaveBeenLastCalledWith(
|
||||||
@ -160,17 +178,16 @@ describe('option resolution order', () => {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
loadProjectConfigMock.mockReturnValue({
|
loadConfigMock.mockReturnValue({
|
||||||
|
project: {
|
||||||
provider: 'claude',
|
provider: 'claude',
|
||||||
provider_options: {
|
|
||||||
claude: { sandbox: { allow_unsandboxed_commands: true } },
|
|
||||||
},
|
},
|
||||||
});
|
global: {
|
||||||
loadGlobalConfigMock.mockReturnValue({
|
|
||||||
provider: 'claude',
|
provider: 'claude',
|
||||||
providerOptions: {
|
providerOptions: {
|
||||||
claude: { sandbox: { allowUnsandboxedCommands: true } },
|
claude: { sandbox: { allowUnsandboxedCommands: true } },
|
||||||
},
|
},
|
||||||
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
// When
|
// When
|
||||||
@ -187,8 +204,11 @@ describe('option resolution order', () => {
|
|||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should use custom agent provider/model when higher-priority values are absent', async () => {
|
it('should use custom agent model and prompt when higher-priority values are absent', async () => {
|
||||||
// Given
|
// Given: custom agent with provider/model, but no CLI/config override
|
||||||
|
// Note: resolveConfigValues returns provider='claude' by default (loadConfig merges project ?? global ?? 'claude'),
|
||||||
|
// so agentConfig.provider is not reached in resolveProvider (config.provider is always truthy).
|
||||||
|
// However, custom agent model IS used because resolveModel checks agentConfig.model before config.
|
||||||
const customAgents = new Map([
|
const customAgents = new Map([
|
||||||
['custom', { name: 'custom', prompt: 'agent prompt', provider: 'opencode', model: 'agent-model' }],
|
['custom', { name: 'custom', prompt: 'agent prompt', provider: 'opencode', model: 'agent-model' }],
|
||||||
]);
|
]);
|
||||||
@ -197,12 +217,14 @@ describe('option resolution order', () => {
|
|||||||
// When
|
// When
|
||||||
await runAgent('custom', 'task', { cwd: '/repo' });
|
await runAgent('custom', 'task', { cwd: '/repo' });
|
||||||
|
|
||||||
// Then
|
// Then: provider falls back to config default ('claude'), not agentConfig.provider
|
||||||
expect(getProviderMock).toHaveBeenLastCalledWith('opencode');
|
expect(getProviderMock).toHaveBeenLastCalledWith('claude');
|
||||||
|
// Agent model is used (resolved before config.model in resolveModel)
|
||||||
expect(providerCallMock).toHaveBeenLastCalledWith(
|
expect(providerCallMock).toHaveBeenLastCalledWith(
|
||||||
'task',
|
'task',
|
||||||
expect.objectContaining({ model: 'agent-model' }),
|
expect.objectContaining({ model: 'agent-model' }),
|
||||||
);
|
);
|
||||||
|
// Agent prompt is still used
|
||||||
expect(providerSetupMock).toHaveBeenLastCalledWith(
|
expect(providerSetupMock).toHaveBeenLastCalledWith(
|
||||||
expect.objectContaining({ systemPrompt: 'prompt' }),
|
expect.objectContaining({ systemPrompt: 'prompt' }),
|
||||||
);
|
);
|
||||||
|
|||||||
@ -16,8 +16,8 @@ function createMovement(overrides: Partial<PieceMovement> = {}): PieceMovement {
|
|||||||
function createBuilder(step: PieceMovement, engineOverrides: Partial<PieceEngineOptions> = {}): OptionsBuilder {
|
function createBuilder(step: PieceMovement, engineOverrides: Partial<PieceEngineOptions> = {}): OptionsBuilder {
|
||||||
const engineOptions: PieceEngineOptions = {
|
const engineOptions: PieceEngineOptions = {
|
||||||
projectCwd: '/project',
|
projectCwd: '/project',
|
||||||
globalProvider: 'codex',
|
provider: 'codex',
|
||||||
globalProviderProfiles: {
|
providerProfiles: {
|
||||||
codex: {
|
codex: {
|
||||||
defaultPermissionMode: 'full',
|
defaultPermissionMode: 'full',
|
||||||
},
|
},
|
||||||
@ -60,15 +60,57 @@ describe('OptionsBuilder.buildBaseOptions', () => {
|
|||||||
it('uses default profile when provider_profiles are not provided', () => {
|
it('uses default profile when provider_profiles are not provided', () => {
|
||||||
const step = createMovement();
|
const step = createMovement();
|
||||||
const builder = createBuilder(step, {
|
const builder = createBuilder(step, {
|
||||||
globalProvider: undefined,
|
|
||||||
globalProviderProfiles: undefined,
|
|
||||||
projectProvider: undefined,
|
|
||||||
provider: undefined,
|
provider: undefined,
|
||||||
|
providerProfiles: undefined,
|
||||||
});
|
});
|
||||||
|
|
||||||
const options = builder.buildBaseOptions(step);
|
const options = builder.buildBaseOptions(step);
|
||||||
expect(options.permissionMode).toBe('edit');
|
expect(options.permissionMode).toBe('edit');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('merges provider options with precedence: global < project < movement', () => {
|
||||||
|
const step = createMovement({
|
||||||
|
providerOptions: {
|
||||||
|
codex: { networkAccess: false },
|
||||||
|
claude: { sandbox: { excludedCommands: ['./gradlew'] } },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
const builder = createBuilder(step, {
|
||||||
|
providerOptions: {
|
||||||
|
codex: { networkAccess: true },
|
||||||
|
claude: { sandbox: { allowUnsandboxedCommands: true } },
|
||||||
|
opencode: { networkAccess: true },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const options = builder.buildBaseOptions(step);
|
||||||
|
|
||||||
|
expect(options.providerOptions).toEqual({
|
||||||
|
codex: { networkAccess: false },
|
||||||
|
opencode: { networkAccess: true },
|
||||||
|
claude: {
|
||||||
|
sandbox: {
|
||||||
|
allowUnsandboxedCommands: true,
|
||||||
|
excludedCommands: ['./gradlew'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('falls back to global/project provider options when movement has none', () => {
|
||||||
|
const step = createMovement();
|
||||||
|
const builder = createBuilder(step, {
|
||||||
|
providerOptions: {
|
||||||
|
codex: { networkAccess: false },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const options = builder.buildBaseOptions(step);
|
||||||
|
|
||||||
|
expect(options.providerOptions).toEqual({
|
||||||
|
codex: { networkAccess: false },
|
||||||
|
});
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('OptionsBuilder.buildResumeOptions', () => {
|
describe('OptionsBuilder.buildResumeOptions', () => {
|
||||||
|
|||||||
104
src/__tests__/orderReader.test.ts
Normal file
104
src/__tests__/orderReader.test.ts
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
/**
|
||||||
|
* Unit tests for orderReader: findPreviousOrderContent
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { mkdirSync, writeFileSync, rmSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { findPreviousOrderContent } from '../features/interactive/orderReader.js';
|
||||||
|
|
||||||
|
const TEST_DIR = join(process.cwd(), 'tmp-test-order-reader');
|
||||||
|
|
||||||
|
function createRunWithOrder(slug: string, content: string): void {
|
||||||
|
const orderDir = join(TEST_DIR, '.takt', 'runs', slug, 'context', 'task');
|
||||||
|
mkdirSync(orderDir, { recursive: true });
|
||||||
|
writeFileSync(join(orderDir, 'order.md'), content, 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
function createRunWithoutOrder(slug: string): void {
|
||||||
|
const runDir = join(TEST_DIR, '.takt', 'runs', slug);
|
||||||
|
mkdirSync(runDir, { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
mkdirSync(TEST_DIR, { recursive: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
rmSync(TEST_DIR, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('findPreviousOrderContent', () => {
|
||||||
|
it('should return order content when slug is specified and order.md exists', () => {
|
||||||
|
createRunWithOrder('20260218-run1', '# Task Order\nDo something');
|
||||||
|
|
||||||
|
const result = findPreviousOrderContent(TEST_DIR, '20260218-run1');
|
||||||
|
|
||||||
|
expect(result).toBe('# Task Order\nDo something');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null when slug is specified but order.md does not exist', () => {
|
||||||
|
createRunWithoutOrder('20260218-run1');
|
||||||
|
|
||||||
|
const result = findPreviousOrderContent(TEST_DIR, '20260218-run1');
|
||||||
|
|
||||||
|
expect(result).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null when slug is specified but run directory does not exist', () => {
|
||||||
|
mkdirSync(join(TEST_DIR, '.takt', 'runs'), { recursive: true });
|
||||||
|
|
||||||
|
const result = findPreviousOrderContent(TEST_DIR, 'nonexistent-slug');
|
||||||
|
|
||||||
|
expect(result).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null for empty order.md content', () => {
|
||||||
|
createRunWithOrder('20260218-run1', '');
|
||||||
|
|
||||||
|
const result = findPreviousOrderContent(TEST_DIR, '20260218-run1');
|
||||||
|
|
||||||
|
expect(result).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null for whitespace-only order.md content', () => {
|
||||||
|
createRunWithOrder('20260218-run1', ' \n ');
|
||||||
|
|
||||||
|
const result = findPreviousOrderContent(TEST_DIR, '20260218-run1');
|
||||||
|
|
||||||
|
expect(result).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should find order from latest run when slug is null', () => {
|
||||||
|
createRunWithOrder('20260218-run-a', 'First order');
|
||||||
|
createRunWithOrder('20260219-run-b', 'Second order');
|
||||||
|
|
||||||
|
const result = findPreviousOrderContent(TEST_DIR, null);
|
||||||
|
|
||||||
|
expect(result).toBe('Second order');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should skip runs without order.md when searching latest', () => {
|
||||||
|
createRunWithOrder('20260218-run-a', 'First order');
|
||||||
|
createRunWithoutOrder('20260219-run-b');
|
||||||
|
|
||||||
|
const result = findPreviousOrderContent(TEST_DIR, null);
|
||||||
|
|
||||||
|
expect(result).toBe('First order');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null when no runs have order.md', () => {
|
||||||
|
createRunWithoutOrder('20260218-run-a');
|
||||||
|
createRunWithoutOrder('20260219-run-b');
|
||||||
|
|
||||||
|
const result = findPreviousOrderContent(TEST_DIR, null);
|
||||||
|
|
||||||
|
expect(result).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null when .takt/runs directory does not exist', () => {
|
||||||
|
const result = findPreviousOrderContent(TEST_DIR, null);
|
||||||
|
|
||||||
|
expect(result).toBeNull();
|
||||||
|
});
|
||||||
|
});
|
||||||
@ -17,6 +17,24 @@ vi.mock('../infra/config/global/globalConfig.js', async (importOriginal) => {
|
|||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
vi.mock('../infra/config/resolveConfigValue.js', () => ({
|
||||||
|
resolveConfigValue: (_cwd: string, key: string) => {
|
||||||
|
if (key === 'language') return 'en';
|
||||||
|
if (key === 'enableBuiltinPieces') return false;
|
||||||
|
if (key === 'disabledBuiltins') return [];
|
||||||
|
return undefined;
|
||||||
|
},
|
||||||
|
resolveConfigValues: (_cwd: string, keys: readonly string[]) => {
|
||||||
|
const result: Record<string, unknown> = {};
|
||||||
|
for (const key of keys) {
|
||||||
|
if (key === 'language') result[key] = 'en';
|
||||||
|
if (key === 'enableBuiltinPieces') result[key] = false;
|
||||||
|
if (key === 'disabledBuiltins') result[key] = [];
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
const { listPieces } = await import('../infra/config/loaders/pieceLoader.js');
|
const { listPieces } = await import('../infra/config/loaders/pieceLoader.js');
|
||||||
|
|
||||||
const SAMPLE_PIECE = `name: test-piece
|
const SAMPLE_PIECE = `name: test-piece
|
||||||
|
|||||||
@ -22,12 +22,28 @@ vi.mock('../infra/config/global/globalConfig.js', async (importOriginal) => {
|
|||||||
const original = await importOriginal() as Record<string, unknown>;
|
const original = await importOriginal() as Record<string, unknown>;
|
||||||
return {
|
return {
|
||||||
...original,
|
...original,
|
||||||
getLanguage: () => languageState.value,
|
loadGlobalConfig: () => ({}),
|
||||||
getBuiltinPiecesEnabled: () => true,
|
|
||||||
getDisabledBuiltins: () => [],
|
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
vi.mock('../infra/config/resolveConfigValue.js', () => ({
|
||||||
|
resolveConfigValue: (_cwd: string, key: string) => {
|
||||||
|
if (key === 'language') return languageState.value;
|
||||||
|
if (key === 'enableBuiltinPieces') return true;
|
||||||
|
if (key === 'disabledBuiltins') return [];
|
||||||
|
return undefined;
|
||||||
|
},
|
||||||
|
resolveConfigValues: (_cwd: string, keys: readonly string[]) => {
|
||||||
|
const result: Record<string, unknown> = {};
|
||||||
|
for (const key of keys) {
|
||||||
|
if (key === 'language') result[key] = languageState.value;
|
||||||
|
if (key === 'enableBuiltinPieces') result[key] = true;
|
||||||
|
if (key === 'disabledBuiltins') result[key] = [];
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
vi.mock('../infra/resources/index.js', async (importOriginal) => {
|
vi.mock('../infra/resources/index.js', async (importOriginal) => {
|
||||||
const original = await importOriginal() as Record<string, unknown>;
|
const original = await importOriginal() as Record<string, unknown>;
|
||||||
return {
|
return {
|
||||||
@ -45,6 +61,7 @@ vi.mock('../infra/config/global/pieceCategories.js', async (importOriginal) => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
const {
|
const {
|
||||||
|
BUILTIN_CATEGORY_NAME,
|
||||||
getPieceCategories,
|
getPieceCategories,
|
||||||
loadDefaultCategories,
|
loadDefaultCategories,
|
||||||
buildCategorizedPieces,
|
buildCategorizedPieces,
|
||||||
@ -92,7 +109,7 @@ describe('piece category config loading', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should return null when builtin categories file is missing', () => {
|
it('should return null when builtin categories file is missing', () => {
|
||||||
const config = getPieceCategories();
|
const config = getPieceCategories(testDir);
|
||||||
expect(config).toBeNull();
|
expect(config).toBeNull();
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -104,7 +121,7 @@ piece_categories:
|
|||||||
- default
|
- default
|
||||||
`);
|
`);
|
||||||
|
|
||||||
const config = loadDefaultCategories();
|
const config = loadDefaultCategories(testDir);
|
||||||
expect(config).not.toBeNull();
|
expect(config).not.toBeNull();
|
||||||
expect(config!.pieceCategories).toEqual([
|
expect(config!.pieceCategories).toEqual([
|
||||||
{ name: 'Quick Start', pieces: ['default'], children: [] },
|
{ name: 'Quick Start', pieces: ['default'], children: [] },
|
||||||
@ -113,6 +130,7 @@ piece_categories:
|
|||||||
{ name: 'Quick Start', pieces: ['default'], children: [] },
|
{ name: 'Quick Start', pieces: ['default'], children: [] },
|
||||||
]);
|
]);
|
||||||
expect(config!.userPieceCategories).toEqual([]);
|
expect(config!.userPieceCategories).toEqual([]);
|
||||||
|
expect(config!.hasUserCategories).toBe(false);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should use builtin categories when user overlay file is missing', () => {
|
it('should use builtin categories when user overlay file is missing', () => {
|
||||||
@ -125,17 +143,18 @@ show_others_category: true
|
|||||||
others_category_name: Others
|
others_category_name: Others
|
||||||
`);
|
`);
|
||||||
|
|
||||||
const config = getPieceCategories();
|
const config = getPieceCategories(testDir);
|
||||||
expect(config).not.toBeNull();
|
expect(config).not.toBeNull();
|
||||||
expect(config!.pieceCategories).toEqual([
|
expect(config!.pieceCategories).toEqual([
|
||||||
{ name: 'Main', pieces: ['default'], children: [] },
|
{ name: 'Main', pieces: ['default'], children: [] },
|
||||||
]);
|
]);
|
||||||
expect(config!.userPieceCategories).toEqual([]);
|
expect(config!.userPieceCategories).toEqual([]);
|
||||||
|
expect(config!.hasUserCategories).toBe(false);
|
||||||
expect(config!.showOthersCategory).toBe(true);
|
expect(config!.showOthersCategory).toBe(true);
|
||||||
expect(config!.othersCategoryName).toBe('Others');
|
expect(config!.othersCategoryName).toBe('Others');
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should merge user overlay categories with builtin categories', () => {
|
it('should separate user categories from builtin categories with builtin wrapper', () => {
|
||||||
writeYaml(join(resourcesDir, 'piece-categories.yaml'), `
|
writeYaml(join(resourcesDir, 'piece-categories.yaml'), `
|
||||||
piece_categories:
|
piece_categories:
|
||||||
Main:
|
Main:
|
||||||
@ -165,18 +184,25 @@ show_others_category: false
|
|||||||
others_category_name: Unclassified
|
others_category_name: Unclassified
|
||||||
`);
|
`);
|
||||||
|
|
||||||
const config = getPieceCategories();
|
const config = getPieceCategories(testDir);
|
||||||
expect(config).not.toBeNull();
|
expect(config).not.toBeNull();
|
||||||
expect(config!.pieceCategories).toEqual([
|
expect(config!.pieceCategories).toEqual([
|
||||||
|
{ name: 'Main', pieces: ['custom'], children: [] },
|
||||||
|
{ name: 'My Team', pieces: ['team-flow'], children: [] },
|
||||||
|
{
|
||||||
|
name: BUILTIN_CATEGORY_NAME,
|
||||||
|
pieces: [],
|
||||||
|
children: [
|
||||||
{
|
{
|
||||||
name: 'Main',
|
name: 'Main',
|
||||||
pieces: ['custom'],
|
pieces: ['default', 'coding'],
|
||||||
children: [
|
children: [
|
||||||
{ name: 'Child', pieces: ['nested'], children: [] },
|
{ name: 'Child', pieces: ['nested'], children: [] },
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
{ name: 'Review', pieces: ['review-only', 'e2e-test'], children: [] },
|
{ name: 'Review', pieces: ['review-only', 'e2e-test'], children: [] },
|
||||||
{ name: 'My Team', pieces: ['team-flow'], children: [] },
|
],
|
||||||
|
},
|
||||||
]);
|
]);
|
||||||
expect(config!.builtinPieceCategories).toEqual([
|
expect(config!.builtinPieceCategories).toEqual([
|
||||||
{
|
{
|
||||||
@ -192,6 +218,7 @@ others_category_name: Unclassified
|
|||||||
{ name: 'Main', pieces: ['custom'], children: [] },
|
{ name: 'Main', pieces: ['custom'], children: [] },
|
||||||
{ name: 'My Team', pieces: ['team-flow'], children: [] },
|
{ name: 'My Team', pieces: ['team-flow'], children: [] },
|
||||||
]);
|
]);
|
||||||
|
expect(config!.hasUserCategories).toBe(true);
|
||||||
expect(config!.showOthersCategory).toBe(false);
|
expect(config!.showOthersCategory).toBe(false);
|
||||||
expect(config!.othersCategoryName).toBe('Unclassified');
|
expect(config!.othersCategoryName).toBe('Unclassified');
|
||||||
});
|
});
|
||||||
@ -207,7 +234,7 @@ piece_categories:
|
|||||||
- e2e-test
|
- e2e-test
|
||||||
`);
|
`);
|
||||||
|
|
||||||
const config = getPieceCategories();
|
const config = getPieceCategories(testDir);
|
||||||
expect(config).not.toBeNull();
|
expect(config).not.toBeNull();
|
||||||
expect(config!.pieceCategories).toEqual([
|
expect(config!.pieceCategories).toEqual([
|
||||||
{ name: 'レビュー', pieces: ['review-only', 'e2e-test'], children: [] },
|
{ name: 'レビュー', pieces: ['review-only', 'e2e-test'], children: [] },
|
||||||
@ -232,7 +259,7 @@ show_others_category: false
|
|||||||
others_category_name: Unclassified
|
others_category_name: Unclassified
|
||||||
`);
|
`);
|
||||||
|
|
||||||
const config = getPieceCategories();
|
const config = getPieceCategories(testDir);
|
||||||
expect(config).not.toBeNull();
|
expect(config).not.toBeNull();
|
||||||
expect(config!.pieceCategories).toEqual([
|
expect(config!.pieceCategories).toEqual([
|
||||||
{ name: 'Main', pieces: ['default'], children: [] },
|
{ name: 'Main', pieces: ['default'], children: [] },
|
||||||
@ -243,6 +270,7 @@ others_category_name: Unclassified
|
|||||||
{ name: 'Review', pieces: ['review-only'], children: [] },
|
{ name: 'Review', pieces: ['review-only'], children: [] },
|
||||||
]);
|
]);
|
||||||
expect(config!.userPieceCategories).toEqual([]);
|
expect(config!.userPieceCategories).toEqual([]);
|
||||||
|
expect(config!.hasUserCategories).toBe(false);
|
||||||
expect(config!.showOthersCategory).toBe(false);
|
expect(config!.showOthersCategory).toBe(false);
|
||||||
expect(config!.othersCategoryName).toBe('Unclassified');
|
expect(config!.othersCategoryName).toBe('Unclassified');
|
||||||
});
|
});
|
||||||
@ -274,11 +302,12 @@ describe('buildCategorizedPieces', () => {
|
|||||||
userPieceCategories: [
|
userPieceCategories: [
|
||||||
{ name: 'My Team', pieces: ['missing-user-piece'], children: [] },
|
{ name: 'My Team', pieces: ['missing-user-piece'], children: [] },
|
||||||
],
|
],
|
||||||
|
hasUserCategories: true,
|
||||||
showOthersCategory: true,
|
showOthersCategory: true,
|
||||||
othersCategoryName: 'Others',
|
othersCategoryName: 'Others',
|
||||||
};
|
};
|
||||||
|
|
||||||
const categorized = buildCategorizedPieces(allPieces, config);
|
const categorized = buildCategorizedPieces(allPieces, config, process.cwd());
|
||||||
expect(categorized.categories).toEqual([
|
expect(categorized.categories).toEqual([
|
||||||
{
|
{
|
||||||
name: 'Main',
|
name: 'Main',
|
||||||
@ -306,11 +335,12 @@ describe('buildCategorizedPieces', () => {
|
|||||||
{ name: 'Main', pieces: ['default'], children: [] },
|
{ name: 'Main', pieces: ['default'], children: [] },
|
||||||
],
|
],
|
||||||
userPieceCategories: [],
|
userPieceCategories: [],
|
||||||
|
hasUserCategories: false,
|
||||||
showOthersCategory: true,
|
showOthersCategory: true,
|
||||||
othersCategoryName: 'Others',
|
othersCategoryName: 'Others',
|
||||||
};
|
};
|
||||||
|
|
||||||
const categorized = buildCategorizedPieces(allPieces, config);
|
const categorized = buildCategorizedPieces(allPieces, config, process.cwd());
|
||||||
expect(categorized.categories).toEqual([
|
expect(categorized.categories).toEqual([
|
||||||
{ name: 'Main', pieces: ['default'], children: [] },
|
{ name: 'Main', pieces: ['default'], children: [] },
|
||||||
{ name: 'Others', pieces: ['extra'], children: [] },
|
{ name: 'Others', pieces: ['extra'], children: [] },
|
||||||
@ -330,13 +360,60 @@ describe('buildCategorizedPieces', () => {
|
|||||||
{ name: 'Main', pieces: ['default'], children: [] },
|
{ name: 'Main', pieces: ['default'], children: [] },
|
||||||
],
|
],
|
||||||
userPieceCategories: [],
|
userPieceCategories: [],
|
||||||
|
hasUserCategories: false,
|
||||||
showOthersCategory: false,
|
showOthersCategory: false,
|
||||||
othersCategoryName: 'Others',
|
othersCategoryName: 'Others',
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const categorized = buildCategorizedPieces(allPieces, config, process.cwd());
|
||||||
|
expect(categorized.categories).toEqual([
|
||||||
|
{ name: 'Main', pieces: ['default'], children: [] },
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should categorize pieces through builtin wrapper node', () => {
|
||||||
|
const allPieces = createPieceMap([
|
||||||
|
{ name: 'custom', source: 'user' },
|
||||||
|
{ name: 'default', source: 'builtin' },
|
||||||
|
{ name: 'review-only', source: 'builtin' },
|
||||||
|
{ name: 'extra', source: 'builtin' },
|
||||||
|
]);
|
||||||
|
const config = {
|
||||||
|
pieceCategories: [
|
||||||
|
{ name: 'My Team', pieces: ['custom'], children: [] },
|
||||||
|
{
|
||||||
|
name: BUILTIN_CATEGORY_NAME,
|
||||||
|
pieces: [],
|
||||||
|
children: [
|
||||||
|
{ name: 'Quick Start', pieces: ['default'], children: [] },
|
||||||
|
{ name: 'Review', pieces: ['review-only'], children: [] },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
builtinPieceCategories: [
|
||||||
|
{ name: 'Quick Start', pieces: ['default'], children: [] },
|
||||||
|
{ name: 'Review', pieces: ['review-only'], children: [] },
|
||||||
|
],
|
||||||
|
userPieceCategories: [
|
||||||
|
{ name: 'My Team', pieces: ['custom'], children: [] },
|
||||||
|
],
|
||||||
|
hasUserCategories: true,
|
||||||
|
showOthersCategory: true,
|
||||||
|
othersCategoryName: 'Others',
|
||||||
|
};
|
||||||
|
|
||||||
const categorized = buildCategorizedPieces(allPieces, config);
|
const categorized = buildCategorizedPieces(allPieces, config);
|
||||||
expect(categorized.categories).toEqual([
|
expect(categorized.categories).toEqual([
|
||||||
{ name: 'Main', pieces: ['default'], children: [] },
|
{ name: 'My Team', pieces: ['custom'], children: [] },
|
||||||
|
{
|
||||||
|
name: BUILTIN_CATEGORY_NAME,
|
||||||
|
pieces: [],
|
||||||
|
children: [
|
||||||
|
{ name: 'Quick Start', pieces: ['default'], children: [] },
|
||||||
|
{ name: 'Review', pieces: ['review-only'], children: [] },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{ name: 'Others', pieces: ['extra'], children: [] },
|
||||||
]);
|
]);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@ -40,7 +40,7 @@ const configMock = vi.hoisted(() => ({
|
|||||||
getPieceCategories: vi.fn(),
|
getPieceCategories: vi.fn(),
|
||||||
buildCategorizedPieces: vi.fn(),
|
buildCategorizedPieces: vi.fn(),
|
||||||
getCurrentPiece: vi.fn(),
|
getCurrentPiece: vi.fn(),
|
||||||
findPieceCategories: vi.fn(() => []),
|
resolveConfigValue: vi.fn(),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../infra/config/index.js', () => configMock);
|
vi.mock('../infra/config/index.js', () => configMock);
|
||||||
@ -242,6 +242,65 @@ describe('selectPieceFromCategorizedPieces', () => {
|
|||||||
// Should NOT contain the parent category again
|
// Should NOT contain the parent category again
|
||||||
expect(labels.some((l) => l.includes('Dev'))).toBe(false);
|
expect(labels.some((l) => l.includes('Dev'))).toBe(false);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should navigate into builtin wrapper category and select a piece', async () => {
|
||||||
|
const categorized: CategorizedPieces = {
|
||||||
|
categories: [
|
||||||
|
{ name: 'My Team', pieces: ['custom'], children: [] },
|
||||||
|
{
|
||||||
|
name: 'builtin',
|
||||||
|
pieces: [],
|
||||||
|
children: [
|
||||||
|
{ name: 'Quick Start', pieces: ['default'], children: [] },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
allPieces: createPieceMap([
|
||||||
|
{ name: 'custom', source: 'user' },
|
||||||
|
{ name: 'default', source: 'builtin' },
|
||||||
|
]),
|
||||||
|
missingPieces: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
// Select builtin category → Quick Start subcategory → piece
|
||||||
|
selectOptionMock
|
||||||
|
.mockResolvedValueOnce('__custom_category__:builtin')
|
||||||
|
.mockResolvedValueOnce('__category__:Quick Start')
|
||||||
|
.mockResolvedValueOnce('default');
|
||||||
|
|
||||||
|
const selected = await selectPieceFromCategorizedPieces(categorized, '');
|
||||||
|
expect(selected).toBe('default');
|
||||||
|
expect(selectOptionMock).toHaveBeenCalledTimes(3);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should show builtin wrapper as a folder in top-level options', async () => {
|
||||||
|
const categorized: CategorizedPieces = {
|
||||||
|
categories: [
|
||||||
|
{ name: 'My Team', pieces: ['custom'], children: [] },
|
||||||
|
{
|
||||||
|
name: 'builtin',
|
||||||
|
pieces: [],
|
||||||
|
children: [
|
||||||
|
{ name: 'Quick Start', pieces: ['default'], children: [] },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
allPieces: createPieceMap([
|
||||||
|
{ name: 'custom', source: 'user' },
|
||||||
|
{ name: 'default', source: 'builtin' },
|
||||||
|
]),
|
||||||
|
missingPieces: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
selectOptionMock.mockResolvedValueOnce(null);
|
||||||
|
|
||||||
|
await selectPieceFromCategorizedPieces(categorized, '');
|
||||||
|
|
||||||
|
const firstCallOptions = selectOptionMock.mock.calls[0]![1] as { label: string; value: string }[];
|
||||||
|
const labels = firstCallOptions.map((o) => o.label);
|
||||||
|
expect(labels.some((l) => l.includes('My Team'))).toBe(true);
|
||||||
|
expect(labels.some((l) => l.includes('builtin'))).toBe(true);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('selectPiece', () => {
|
describe('selectPiece', () => {
|
||||||
@ -258,13 +317,13 @@ describe('selectPiece', () => {
|
|||||||
configMock.loadAllPiecesWithSources.mockReset();
|
configMock.loadAllPiecesWithSources.mockReset();
|
||||||
configMock.getPieceCategories.mockReset();
|
configMock.getPieceCategories.mockReset();
|
||||||
configMock.buildCategorizedPieces.mockReset();
|
configMock.buildCategorizedPieces.mockReset();
|
||||||
configMock.getCurrentPiece.mockReset();
|
configMock.resolveConfigValue.mockReset();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return default piece when no pieces found and fallbackToDefault is true', async () => {
|
it('should return default piece when no pieces found and fallbackToDefault is true', async () => {
|
||||||
configMock.getPieceCategories.mockReturnValue(null);
|
configMock.getPieceCategories.mockReturnValue(null);
|
||||||
configMock.listPieces.mockReturnValue([]);
|
configMock.listPieces.mockReturnValue([]);
|
||||||
configMock.getCurrentPiece.mockReturnValue('default');
|
configMock.resolveConfigValue.mockReturnValue('default');
|
||||||
|
|
||||||
const result = await selectPiece('/cwd');
|
const result = await selectPiece('/cwd');
|
||||||
|
|
||||||
@ -274,7 +333,7 @@ describe('selectPiece', () => {
|
|||||||
it('should return null when no pieces found and fallbackToDefault is false', async () => {
|
it('should return null when no pieces found and fallbackToDefault is false', async () => {
|
||||||
configMock.getPieceCategories.mockReturnValue(null);
|
configMock.getPieceCategories.mockReturnValue(null);
|
||||||
configMock.listPieces.mockReturnValue([]);
|
configMock.listPieces.mockReturnValue([]);
|
||||||
configMock.getCurrentPiece.mockReturnValue('default');
|
configMock.resolveConfigValue.mockReturnValue('default');
|
||||||
|
|
||||||
const result = await selectPiece('/cwd', { fallbackToDefault: false });
|
const result = await selectPiece('/cwd', { fallbackToDefault: false });
|
||||||
|
|
||||||
@ -287,7 +346,7 @@ describe('selectPiece', () => {
|
|||||||
configMock.listPieceEntries.mockReturnValue([
|
configMock.listPieceEntries.mockReturnValue([
|
||||||
{ name: 'only-piece', path: '/tmp/only-piece.yaml', source: 'user' },
|
{ name: 'only-piece', path: '/tmp/only-piece.yaml', source: 'user' },
|
||||||
]);
|
]);
|
||||||
configMock.getCurrentPiece.mockReturnValue('only-piece');
|
configMock.resolveConfigValue.mockReturnValue('only-piece');
|
||||||
selectOptionMock.mockResolvedValueOnce('only-piece');
|
selectOptionMock.mockResolvedValueOnce('only-piece');
|
||||||
|
|
||||||
const result = await selectPiece('/cwd');
|
const result = await selectPiece('/cwd');
|
||||||
@ -307,7 +366,7 @@ describe('selectPiece', () => {
|
|||||||
configMock.getPieceCategories.mockReturnValue({ categories: ['Dev'] });
|
configMock.getPieceCategories.mockReturnValue({ categories: ['Dev'] });
|
||||||
configMock.loadAllPiecesWithSources.mockReturnValue(pieceMap);
|
configMock.loadAllPiecesWithSources.mockReturnValue(pieceMap);
|
||||||
configMock.buildCategorizedPieces.mockReturnValue(categorized);
|
configMock.buildCategorizedPieces.mockReturnValue(categorized);
|
||||||
configMock.getCurrentPiece.mockReturnValue('my-piece');
|
configMock.resolveConfigValue.mockReturnValue('my-piece');
|
||||||
|
|
||||||
selectOptionMock.mockResolvedValueOnce('__current__');
|
selectOptionMock.mockResolvedValueOnce('__current__');
|
||||||
|
|
||||||
@ -321,7 +380,7 @@ describe('selectPiece', () => {
|
|||||||
configMock.getPieceCategories.mockReturnValue(null);
|
configMock.getPieceCategories.mockReturnValue(null);
|
||||||
configMock.listPieces.mockReturnValue(['piece-a', 'piece-b']);
|
configMock.listPieces.mockReturnValue(['piece-a', 'piece-b']);
|
||||||
configMock.listPieceEntries.mockReturnValue(entries);
|
configMock.listPieceEntries.mockReturnValue(entries);
|
||||||
configMock.getCurrentPiece.mockReturnValue('piece-a');
|
configMock.resolveConfigValue.mockReturnValue('piece-a');
|
||||||
|
|
||||||
selectOptionMock
|
selectOptionMock
|
||||||
.mockResolvedValueOnce('custom')
|
.mockResolvedValueOnce('custom')
|
||||||
|
|||||||
@ -90,7 +90,15 @@ vi.mock('../infra/config/index.js', () => ({
|
|||||||
updatePersonaSession: vi.fn(),
|
updatePersonaSession: vi.fn(),
|
||||||
loadWorktreeSessions: vi.fn().mockReturnValue({}),
|
loadWorktreeSessions: vi.fn().mockReturnValue({}),
|
||||||
updateWorktreeSession: vi.fn(),
|
updateWorktreeSession: vi.fn(),
|
||||||
loadGlobalConfig: vi.fn().mockReturnValue({ provider: 'claude' }),
|
resolvePieceConfigValues: vi.fn().mockReturnValue({
|
||||||
|
notificationSound: true,
|
||||||
|
notificationSoundEvents: {},
|
||||||
|
provider: 'claude',
|
||||||
|
runtime: undefined,
|
||||||
|
preventSleep: false,
|
||||||
|
model: undefined,
|
||||||
|
observability: undefined,
|
||||||
|
}),
|
||||||
saveSessionState: vi.fn(),
|
saveSessionState: vi.fn(),
|
||||||
ensureDir: vi.fn(),
|
ensureDir: vi.fn(),
|
||||||
writeFileAtomic: vi.fn(),
|
writeFileAtomic: vi.fn(),
|
||||||
|
|||||||
@ -59,7 +59,15 @@ vi.mock('../infra/config/index.js', () => ({
|
|||||||
updatePersonaSession: vi.fn(),
|
updatePersonaSession: vi.fn(),
|
||||||
loadWorktreeSessions: mockLoadWorktreeSessions,
|
loadWorktreeSessions: mockLoadWorktreeSessions,
|
||||||
updateWorktreeSession: vi.fn(),
|
updateWorktreeSession: vi.fn(),
|
||||||
loadGlobalConfig: vi.fn().mockReturnValue({ provider: 'claude' }),
|
resolvePieceConfigValues: vi.fn().mockReturnValue({
|
||||||
|
notificationSound: true,
|
||||||
|
notificationSoundEvents: {},
|
||||||
|
provider: 'claude',
|
||||||
|
runtime: undefined,
|
||||||
|
preventSleep: false,
|
||||||
|
model: undefined,
|
||||||
|
observability: undefined,
|
||||||
|
}),
|
||||||
saveSessionState: vi.fn(),
|
saveSessionState: vi.fn(),
|
||||||
ensureDir: vi.fn(),
|
ensureDir: vi.fn(),
|
||||||
writeFileAtomic: vi.fn(),
|
writeFileAtomic: vi.fn(),
|
||||||
|
|||||||
116
src/__tests__/postExecution.test.ts
Normal file
116
src/__tests__/postExecution.test.ts
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
/**
|
||||||
|
* Tests for postExecution.ts
|
||||||
|
*
|
||||||
|
* Verifies branching logic: existing PR → comment, no PR → create.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||||
|
|
||||||
|
const { mockAutoCommitAndPush, mockPushBranch, mockFindExistingPr, mockCommentOnPr, mockCreatePullRequest, mockBuildPrBody } =
|
||||||
|
vi.hoisted(() => ({
|
||||||
|
mockAutoCommitAndPush: vi.fn(),
|
||||||
|
mockPushBranch: vi.fn(),
|
||||||
|
mockFindExistingPr: vi.fn(),
|
||||||
|
mockCommentOnPr: vi.fn(),
|
||||||
|
mockCreatePullRequest: vi.fn(),
|
||||||
|
mockBuildPrBody: vi.fn(() => 'pr-body'),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../infra/task/index.js', () => ({
|
||||||
|
autoCommitAndPush: (...args: unknown[]) => mockAutoCommitAndPush(...args),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../infra/github/index.js', () => ({
|
||||||
|
pushBranch: (...args: unknown[]) => mockPushBranch(...args),
|
||||||
|
findExistingPr: (...args: unknown[]) => mockFindExistingPr(...args),
|
||||||
|
commentOnPr: (...args: unknown[]) => mockCommentOnPr(...args),
|
||||||
|
createPullRequest: (...args: unknown[]) => mockCreatePullRequest(...args),
|
||||||
|
buildPrBody: (...args: unknown[]) => mockBuildPrBody(...args),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../infra/config/index.js', () => ({
|
||||||
|
resolvePieceConfigValue: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/prompt/index.js', () => ({
|
||||||
|
confirm: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/ui/index.js', () => ({
|
||||||
|
info: vi.fn(),
|
||||||
|
error: vi.fn(),
|
||||||
|
success: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/utils/index.js', async (importOriginal) => ({
|
||||||
|
...(await importOriginal<Record<string, unknown>>()),
|
||||||
|
createLogger: () => ({
|
||||||
|
info: vi.fn(),
|
||||||
|
debug: vi.fn(),
|
||||||
|
error: vi.fn(),
|
||||||
|
}),
|
||||||
|
}));
|
||||||
|
|
||||||
|
import { postExecutionFlow } from '../features/tasks/execute/postExecution.js';
|
||||||
|
|
||||||
|
const baseOptions = {
|
||||||
|
execCwd: '/clone',
|
||||||
|
projectCwd: '/project',
|
||||||
|
task: 'Fix the bug',
|
||||||
|
branch: 'task/fix-the-bug',
|
||||||
|
baseBranch: 'main',
|
||||||
|
shouldCreatePr: true,
|
||||||
|
pieceIdentifier: 'default',
|
||||||
|
};
|
||||||
|
|
||||||
|
describe('postExecutionFlow', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
mockAutoCommitAndPush.mockReturnValue({ success: true, commitHash: 'abc123' });
|
||||||
|
mockPushBranch.mockReturnValue(undefined);
|
||||||
|
mockCommentOnPr.mockReturnValue({ success: true });
|
||||||
|
mockCreatePullRequest.mockReturnValue({ success: true, url: 'https://github.com/org/repo/pull/1' });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('既存PRがない場合は createPullRequest を呼ぶ', async () => {
|
||||||
|
mockFindExistingPr.mockReturnValue(undefined);
|
||||||
|
|
||||||
|
await postExecutionFlow(baseOptions);
|
||||||
|
|
||||||
|
expect(mockCreatePullRequest).toHaveBeenCalledTimes(1);
|
||||||
|
expect(mockCommentOnPr).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('既存PRがある場合は commentOnPr を呼び createPullRequest は呼ばない', async () => {
|
||||||
|
mockFindExistingPr.mockReturnValue({ number: 42, url: 'https://github.com/org/repo/pull/42' });
|
||||||
|
|
||||||
|
await postExecutionFlow(baseOptions);
|
||||||
|
|
||||||
|
expect(mockCommentOnPr).toHaveBeenCalledWith('/project', 42, 'pr-body');
|
||||||
|
expect(mockCreatePullRequest).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('shouldCreatePr が false の場合は PR 関連処理をスキップする', async () => {
|
||||||
|
await postExecutionFlow({ ...baseOptions, shouldCreatePr: false });
|
||||||
|
|
||||||
|
expect(mockFindExistingPr).not.toHaveBeenCalled();
|
||||||
|
expect(mockCommentOnPr).not.toHaveBeenCalled();
|
||||||
|
expect(mockCreatePullRequest).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('commit がない場合は PR 関連処理をスキップする', async () => {
|
||||||
|
mockAutoCommitAndPush.mockReturnValue({ success: true, commitHash: undefined });
|
||||||
|
|
||||||
|
await postExecutionFlow(baseOptions);
|
||||||
|
|
||||||
|
expect(mockFindExistingPr).not.toHaveBeenCalled();
|
||||||
|
expect(mockCreatePullRequest).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('branch がない場合は PR 関連処理をスキップする', async () => {
|
||||||
|
await postExecutionFlow({ ...baseOptions, branch: undefined });
|
||||||
|
|
||||||
|
expect(mockFindExistingPr).not.toHaveBeenCalled();
|
||||||
|
expect(mockCreatePullRequest).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
});
|
||||||
@ -2,9 +2,10 @@
|
|||||||
* Tests for prompt module (cursor-based interactive menu)
|
* Tests for prompt module (cursor-based interactive menu)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||||
import { Readable } from 'node:stream';
|
import { Readable } from 'node:stream';
|
||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
|
import { setupRawStdin, restoreStdin } from './helpers/stdinSimulator.js';
|
||||||
import type { SelectOptionItem, KeyInputResult } from '../shared/prompt/index.js';
|
import type { SelectOptionItem, KeyInputResult } from '../shared/prompt/index.js';
|
||||||
import {
|
import {
|
||||||
renderMenu,
|
renderMenu,
|
||||||
@ -331,6 +332,74 @@ describe('prompt', () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('selectOptionWithDefault (stdin E2E)', () => {
|
||||||
|
afterEach(() => {
|
||||||
|
restoreStdin();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should place cursor on default value and confirm it with Enter', async () => {
|
||||||
|
// Enter key only — confirms whatever the cursor is on
|
||||||
|
setupRawStdin(['\r']);
|
||||||
|
|
||||||
|
const { selectOptionWithDefault } = await import('../shared/prompt/index.js');
|
||||||
|
const options = [
|
||||||
|
{ label: 'plan', value: 'plan' },
|
||||||
|
{ label: 'implement', value: 'implement' },
|
||||||
|
{ label: 'review', value: 'review' },
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await selectOptionWithDefault('Start from:', options, 'review');
|
||||||
|
|
||||||
|
// If cursor starts at 'review' (index 2), Enter should select it
|
||||||
|
expect(result).toBe('review');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should place cursor on first item when default is the first option', async () => {
|
||||||
|
setupRawStdin(['\r']);
|
||||||
|
|
||||||
|
const { selectOptionWithDefault } = await import('../shared/prompt/index.js');
|
||||||
|
const options = [
|
||||||
|
{ label: 'plan', value: 'plan' },
|
||||||
|
{ label: 'implement', value: 'implement' },
|
||||||
|
{ label: 'review', value: 'review' },
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await selectOptionWithDefault('Start from:', options, 'plan');
|
||||||
|
|
||||||
|
expect(result).toBe('plan');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should place cursor on middle item when default is the middle option', async () => {
|
||||||
|
setupRawStdin(['\r']);
|
||||||
|
|
||||||
|
const { selectOptionWithDefault } = await import('../shared/prompt/index.js');
|
||||||
|
const options = [
|
||||||
|
{ label: 'plan', value: 'plan' },
|
||||||
|
{ label: 'implement', value: 'implement' },
|
||||||
|
{ label: 'review', value: 'review' },
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await selectOptionWithDefault('Start from:', options, 'implement');
|
||||||
|
|
||||||
|
expect(result).toBe('implement');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should fall back to first item when default value does not exist', async () => {
|
||||||
|
setupRawStdin(['\r']);
|
||||||
|
|
||||||
|
const { selectOptionWithDefault } = await import('../shared/prompt/index.js');
|
||||||
|
const options = [
|
||||||
|
{ label: 'plan', value: 'plan' },
|
||||||
|
{ label: 'implement', value: 'implement' },
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await selectOptionWithDefault('Start from:', options, 'nonexistent');
|
||||||
|
|
||||||
|
// defaultValue not found → falls back to index 0
|
||||||
|
expect(result).toBe('plan');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
describe('isFullWidth', () => {
|
describe('isFullWidth', () => {
|
||||||
it('should return true for CJK ideographs', () => {
|
it('should return true for CJK ideographs', () => {
|
||||||
expect(isFullWidth('漢'.codePointAt(0)!)).toBe(true);
|
expect(isFullWidth('漢'.codePointAt(0)!)).toBe(true);
|
||||||
|
|||||||
@ -37,12 +37,13 @@ describe('generateReportDir', () => {
|
|||||||
vi.useRealTimers();
|
vi.useRealTimers();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should preserve Japanese characters in summary', () => {
|
it('should strip CJK characters from summary', () => {
|
||||||
vi.useFakeTimers();
|
vi.useFakeTimers();
|
||||||
vi.setSystemTime(new Date('2025-06-01T12:00:00.000Z'));
|
vi.setSystemTime(new Date('2025-06-01T12:00:00.000Z'));
|
||||||
|
|
||||||
const result = generateReportDir('タスク指示書の実装');
|
const result = generateReportDir('タスク指示書の実装');
|
||||||
expect(result).toContain('タスク指示書の実装');
|
// CJK characters are removed by slugify, leaving empty → falls back to 'task'
|
||||||
|
expect(result).toBe('20250601-120000-task');
|
||||||
|
|
||||||
vi.useRealTimers();
|
vi.useRealTimers();
|
||||||
});
|
});
|
||||||
@ -53,7 +54,7 @@ describe('generateReportDir', () => {
|
|||||||
|
|
||||||
const result = generateReportDir('Fix: bug (#42)');
|
const result = generateReportDir('Fix: bug (#42)');
|
||||||
const slug = result.replace(/^20250101-000000-/, '');
|
const slug = result.replace(/^20250101-000000-/, '');
|
||||||
expect(slug).not.toMatch(/[^a-z0-9\u3040-\u309f\u30a0-\u30ff\u4e00-\u9faf-]/);
|
expect(slug).not.toMatch(/[^a-z0-9-]/);
|
||||||
|
|
||||||
vi.useRealTimers();
|
vi.useRealTimers();
|
||||||
});
|
});
|
||||||
|
|||||||
54
src/__tests__/reset-global-config.test.ts
Normal file
54
src/__tests__/reset-global-config.test.ts
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { mkdtempSync, mkdirSync, readFileSync, writeFileSync, existsSync, rmSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { tmpdir } from 'node:os';
|
||||||
|
import { resetGlobalConfigToTemplate } from '../infra/config/global/resetConfig.js';
|
||||||
|
|
||||||
|
describe('resetGlobalConfigToTemplate', () => {
|
||||||
|
const originalEnv = process.env;
|
||||||
|
let testRoot: string;
|
||||||
|
let taktDir: string;
|
||||||
|
let configPath: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
testRoot = mkdtempSync(join(tmpdir(), 'takt-reset-config-'));
|
||||||
|
taktDir = join(testRoot, '.takt');
|
||||||
|
mkdirSync(taktDir, { recursive: true });
|
||||||
|
configPath = join(taktDir, 'config.yaml');
|
||||||
|
process.env = { ...originalEnv, TAKT_CONFIG_DIR: taktDir };
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
process.env = originalEnv;
|
||||||
|
rmSync(testRoot, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should backup existing config and replace with language-matched template', () => {
|
||||||
|
writeFileSync(configPath, ['language: ja', 'provider: mock'].join('\n'), 'utf-8');
|
||||||
|
|
||||||
|
const result = resetGlobalConfigToTemplate(new Date('2026-02-19T12:00:00Z'));
|
||||||
|
|
||||||
|
expect(result.language).toBe('ja');
|
||||||
|
expect(result.backupPath).toBeDefined();
|
||||||
|
expect(existsSync(result.backupPath!)).toBe(true);
|
||||||
|
expect(readFileSync(result.backupPath!, 'utf-8')).toContain('provider: mock');
|
||||||
|
|
||||||
|
const newConfig = readFileSync(configPath, 'utf-8');
|
||||||
|
expect(newConfig).toContain('language: ja');
|
||||||
|
expect(newConfig).toContain('branch_name_strategy: ai');
|
||||||
|
expect(newConfig).toContain('concurrency: 2');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create config from default language template when config does not exist', () => {
|
||||||
|
rmSync(configPath, { force: true });
|
||||||
|
|
||||||
|
const result = resetGlobalConfigToTemplate(new Date('2026-02-19T12:00:00Z'));
|
||||||
|
|
||||||
|
expect(result.backupPath).toBeUndefined();
|
||||||
|
expect(result.language).toBe('en');
|
||||||
|
expect(existsSync(configPath)).toBe(true);
|
||||||
|
const newConfig = readFileSync(configPath, 'utf-8');
|
||||||
|
expect(newConfig).toContain('language: en');
|
||||||
|
expect(newConfig).toContain('branch_name_strategy: ai');
|
||||||
|
});
|
||||||
|
});
|
||||||
@ -31,13 +31,14 @@ describe('resetCategoriesToDefault', () => {
|
|||||||
|
|
||||||
it('should reset user category overlay and show updated message', async () => {
|
it('should reset user category overlay and show updated message', async () => {
|
||||||
// Given
|
// Given
|
||||||
|
const cwd = '/tmp/test-cwd';
|
||||||
|
|
||||||
// When
|
// When
|
||||||
await resetCategoriesToDefault();
|
await resetCategoriesToDefault(cwd);
|
||||||
|
|
||||||
// Then
|
// Then
|
||||||
expect(mockHeader).toHaveBeenCalledWith('Reset Categories');
|
expect(mockHeader).toHaveBeenCalledWith('Reset Categories');
|
||||||
expect(mockResetPieceCategories).toHaveBeenCalledTimes(1);
|
expect(mockResetPieceCategories).toHaveBeenCalledWith(cwd);
|
||||||
expect(mockSuccess).toHaveBeenCalledWith('User category overlay reset.');
|
expect(mockSuccess).toHaveBeenCalledWith('User category overlay reset.');
|
||||||
expect(mockInfo).toHaveBeenCalledWith(' /tmp/user-piece-categories.yaml');
|
expect(mockInfo).toHaveBeenCalledWith(' /tmp/user-piece-categories.yaml');
|
||||||
});
|
});
|
||||||
|
|||||||
86
src/__tests__/resolveTask.test.ts
Normal file
86
src/__tests__/resolveTask.test.ts
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
/**
|
||||||
|
* Tests for task execution resolution.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, afterEach } from 'vitest';
|
||||||
|
import * as fs from 'node:fs';
|
||||||
|
import * as os from 'node:os';
|
||||||
|
import * as path from 'node:path';
|
||||||
|
import type { TaskInfo } from '../infra/task/index.js';
|
||||||
|
import { resolveTaskExecution } from '../features/tasks/execute/resolveTask.js';
|
||||||
|
|
||||||
|
const tempRoots = new Set<string>();
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
for (const root of tempRoots) {
|
||||||
|
fs.rmSync(root, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
tempRoots.clear();
|
||||||
|
});
|
||||||
|
|
||||||
|
function createTempProjectDir(): string {
|
||||||
|
const root = fs.mkdtempSync(path.join(os.tmpdir(), 'takt-resolve-task-test-'));
|
||||||
|
tempRoots.add(root);
|
||||||
|
return root;
|
||||||
|
}
|
||||||
|
|
||||||
|
function createTask(overrides: Partial<TaskInfo>): TaskInfo {
|
||||||
|
return {
|
||||||
|
filePath: '/tasks/task.yaml',
|
||||||
|
name: 'task-name',
|
||||||
|
content: 'Run task',
|
||||||
|
createdAt: '2026-01-01T00:00:00.000Z',
|
||||||
|
status: 'pending',
|
||||||
|
data: { task: 'Run task' },
|
||||||
|
...overrides,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('resolveTaskExecution', () => {
|
||||||
|
it('should return defaults when task data is null', async () => {
|
||||||
|
const root = createTempProjectDir();
|
||||||
|
const task = createTask({ data: null });
|
||||||
|
|
||||||
|
const result = await resolveTaskExecution(task, root, 'default');
|
||||||
|
|
||||||
|
expect(result).toEqual({
|
||||||
|
execCwd: root,
|
||||||
|
execPiece: 'default',
|
||||||
|
isWorktree: false,
|
||||||
|
autoPr: false,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should generate report context and copy issue-bearing task spec', async () => {
|
||||||
|
const root = createTempProjectDir();
|
||||||
|
const taskDir = '.takt/tasks/issue-task-123';
|
||||||
|
const sourceTaskDir = path.join(root, taskDir);
|
||||||
|
const sourceOrderPath = path.join(sourceTaskDir, 'order.md');
|
||||||
|
fs.mkdirSync(sourceTaskDir, { recursive: true });
|
||||||
|
fs.writeFileSync(sourceOrderPath, '# task instruction');
|
||||||
|
|
||||||
|
const task = createTask({
|
||||||
|
taskDir,
|
||||||
|
data: {
|
||||||
|
task: 'Run issue task',
|
||||||
|
issue: 12345,
|
||||||
|
auto_pr: true,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = await resolveTaskExecution(task, root, 'default');
|
||||||
|
const expectedReportOrderPath = path.join(root, '.takt', 'runs', 'issue-task-123', 'context', 'task', 'order.md');
|
||||||
|
|
||||||
|
expect(result).toMatchObject({
|
||||||
|
execCwd: root,
|
||||||
|
execPiece: 'default',
|
||||||
|
isWorktree: false,
|
||||||
|
autoPr: true,
|
||||||
|
reportDirName: 'issue-task-123',
|
||||||
|
issueNumber: 12345,
|
||||||
|
taskPrompt: expect.stringContaining('Primary spec: `.takt/runs/issue-task-123/context/task/order.md`'),
|
||||||
|
});
|
||||||
|
expect(fs.existsSync(expectedReportOrderPath)).toBe(true);
|
||||||
|
expect(fs.readFileSync(expectedReportOrderPath, 'utf-8')).toBe('# task instruction');
|
||||||
|
});
|
||||||
|
});
|
||||||
@ -9,6 +9,7 @@ function createRetryContext(overrides?: Partial<RetryContext>): RetryContext {
|
|||||||
return {
|
return {
|
||||||
failure: {
|
failure: {
|
||||||
taskName: 'my-task',
|
taskName: 'my-task',
|
||||||
|
taskContent: 'Do something',
|
||||||
createdAt: '2026-02-15T10:00:00Z',
|
createdAt: '2026-02-15T10:00:00Z',
|
||||||
failedMovement: 'review',
|
failedMovement: 'review',
|
||||||
error: 'Timeout',
|
error: 'Timeout',
|
||||||
@ -23,6 +24,7 @@ function createRetryContext(overrides?: Partial<RetryContext>): RetryContext {
|
|||||||
movementPreviews: [],
|
movementPreviews: [],
|
||||||
},
|
},
|
||||||
run: null,
|
run: null,
|
||||||
|
previousOrderContent: null,
|
||||||
...overrides,
|
...overrides,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -44,6 +46,7 @@ describe('buildRetryTemplateVars', () => {
|
|||||||
const ctx = createRetryContext({
|
const ctx = createRetryContext({
|
||||||
failure: {
|
failure: {
|
||||||
taskName: 'task',
|
taskName: 'task',
|
||||||
|
taskContent: 'Do something',
|
||||||
createdAt: '2026-01-01T00:00:00Z',
|
createdAt: '2026-01-01T00:00:00Z',
|
||||||
failedMovement: '',
|
failedMovement: '',
|
||||||
error: 'Error',
|
error: 'Error',
|
||||||
@ -129,10 +132,27 @@ describe('buildRetryTemplateVars', () => {
|
|||||||
expect(vars.movementDetails).toContain('Architect');
|
expect(vars.movementDetails).toContain('Architect');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should set hasOrderContent=false and empty orderContent when previousOrderContent is null (via ctx)', () => {
|
||||||
|
const ctx = createRetryContext({ previousOrderContent: null });
|
||||||
|
const vars = buildRetryTemplateVars(ctx, 'en');
|
||||||
|
|
||||||
|
expect(vars.hasOrderContent).toBe(false);
|
||||||
|
expect(vars.orderContent).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set hasOrderContent=true and populate orderContent when provided via parameter', () => {
|
||||||
|
const ctx = createRetryContext();
|
||||||
|
const vars = buildRetryTemplateVars(ctx, 'en', '# Order content');
|
||||||
|
|
||||||
|
expect(vars.hasOrderContent).toBe(true);
|
||||||
|
expect(vars.orderContent).toBe('# Order content');
|
||||||
|
});
|
||||||
|
|
||||||
it('should include retryNote when present', () => {
|
it('should include retryNote when present', () => {
|
||||||
const ctx = createRetryContext({
|
const ctx = createRetryContext({
|
||||||
failure: {
|
failure: {
|
||||||
taskName: 'task',
|
taskName: 'task',
|
||||||
|
taskContent: 'Do something',
|
||||||
createdAt: '2026-01-01T00:00:00Z',
|
createdAt: '2026-01-01T00:00:00Z',
|
||||||
failedMovement: '',
|
failedMovement: '',
|
||||||
error: 'Error',
|
error: 'Error',
|
||||||
@ -144,4 +164,28 @@ describe('buildRetryTemplateVars', () => {
|
|||||||
|
|
||||||
expect(vars.retryNote).toBe('Added more specific error handling');
|
expect(vars.retryNote).toBe('Added more specific error handling');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should set hasOrderContent=false when previousOrderContent is null', () => {
|
||||||
|
const ctx = createRetryContext();
|
||||||
|
const vars = buildRetryTemplateVars(ctx, 'en', null);
|
||||||
|
|
||||||
|
expect(vars.hasOrderContent).toBe(false);
|
||||||
|
expect(vars.orderContent).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set hasOrderContent=true and populate orderContent when provided', () => {
|
||||||
|
const ctx = createRetryContext();
|
||||||
|
const vars = buildRetryTemplateVars(ctx, 'en', '# Previous Order\nDo the thing');
|
||||||
|
|
||||||
|
expect(vars.hasOrderContent).toBe(true);
|
||||||
|
expect(vars.orderContent).toBe('# Previous Order\nDo the thing');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should default hasOrderContent to false when previousOrderContent is omitted', () => {
|
||||||
|
const ctx = createRetryContext();
|
||||||
|
const vars = buildRetryTemplateVars(ctx, 'en');
|
||||||
|
|
||||||
|
expect(vars.hasOrderContent).toBe(false);
|
||||||
|
expect(vars.orderContent).toBe('');
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
198
src/__tests__/retrySlashCommand.test.ts
Normal file
198
src/__tests__/retrySlashCommand.test.ts
Normal file
@ -0,0 +1,198 @@
|
|||||||
|
/**
|
||||||
|
* Tests for /retry slash command in the conversation loop.
|
||||||
|
*
|
||||||
|
* Verifies:
|
||||||
|
* - /retry with previousOrderContent returns execute action with order content
|
||||||
|
* - /retry without previousOrderContent shows error and continues loop
|
||||||
|
* - /retry in retry mode with order.md context in system prompt
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { mkdirSync, rmSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { tmpdir } from 'node:os';
|
||||||
|
import {
|
||||||
|
setupRawStdin,
|
||||||
|
restoreStdin,
|
||||||
|
toRawInputs,
|
||||||
|
createMockProvider,
|
||||||
|
type MockProviderCapture,
|
||||||
|
} from './helpers/stdinSimulator.js';
|
||||||
|
|
||||||
|
// --- Mocks (infrastructure only) ---
|
||||||
|
|
||||||
|
vi.mock('../infra/fs/session.js', () => ({
|
||||||
|
loadNdjsonLog: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../infra/config/global/globalConfig.js', () => ({
|
||||||
|
loadGlobalConfig: vi.fn(() => ({ provider: 'mock', language: 'en' })),
|
||||||
|
getBuiltinPiecesEnabled: vi.fn().mockReturnValue(true),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../infra/providers/index.js', () => ({
|
||||||
|
getProvider: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/utils/index.js', async (importOriginal) => ({
|
||||||
|
...(await importOriginal<Record<string, unknown>>()),
|
||||||
|
createLogger: () => ({
|
||||||
|
info: vi.fn(),
|
||||||
|
debug: vi.fn(),
|
||||||
|
error: vi.fn(),
|
||||||
|
}),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/context.js', () => ({
|
||||||
|
isQuietMode: vi.fn(() => false),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../infra/config/paths.js', async (importOriginal) => ({
|
||||||
|
...(await importOriginal<Record<string, unknown>>()),
|
||||||
|
loadPersonaSessions: vi.fn(() => ({})),
|
||||||
|
updatePersonaSession: vi.fn(),
|
||||||
|
getProjectConfigDir: vi.fn(() => '/tmp'),
|
||||||
|
loadSessionState: vi.fn(() => null),
|
||||||
|
clearSessionState: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/ui/index.js', () => ({
|
||||||
|
info: vi.fn(),
|
||||||
|
error: vi.fn(),
|
||||||
|
blankLine: vi.fn(),
|
||||||
|
StreamDisplay: vi.fn().mockImplementation(() => ({
|
||||||
|
createHandler: vi.fn(() => vi.fn()),
|
||||||
|
flush: vi.fn(),
|
||||||
|
})),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/prompt/index.js', () => ({
|
||||||
|
selectOption: vi.fn().mockResolvedValue('execute'),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../shared/i18n/index.js', () => ({
|
||||||
|
getLabel: vi.fn((_key: string, _lang: string) => 'Mock label'),
|
||||||
|
getLabelObject: vi.fn(() => ({
|
||||||
|
intro: 'Retry intro',
|
||||||
|
resume: 'Resume',
|
||||||
|
noConversation: 'No conversation',
|
||||||
|
summarizeFailed: 'Summarize failed',
|
||||||
|
continuePrompt: 'Continue?',
|
||||||
|
proposed: 'Proposed:',
|
||||||
|
actionPrompt: 'What next?',
|
||||||
|
playNoTask: 'No task',
|
||||||
|
cancelled: 'Cancelled',
|
||||||
|
retryNoOrder: 'No previous order found.',
|
||||||
|
actions: { execute: 'Execute', saveTask: 'Save', continue: 'Continue' },
|
||||||
|
})),
|
||||||
|
}));
|
||||||
|
|
||||||
|
// --- Imports (after mocks) ---
|
||||||
|
|
||||||
|
import { getProvider } from '../infra/providers/index.js';
|
||||||
|
import { runRetryMode, type RetryContext } from '../features/interactive/retryMode.js';
|
||||||
|
import { info } from '../shared/ui/index.js';
|
||||||
|
|
||||||
|
const mockGetProvider = vi.mocked(getProvider);
|
||||||
|
const mockInfo = vi.mocked(info);
|
||||||
|
|
||||||
|
function createTmpDir(): string {
|
||||||
|
const dir = join(tmpdir(), `takt-retry-cmd-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||||
|
mkdirSync(dir, { recursive: true });
|
||||||
|
return dir;
|
||||||
|
}
|
||||||
|
|
||||||
|
function setupProvider(responses: string[]): MockProviderCapture {
|
||||||
|
const { provider, capture } = createMockProvider(responses);
|
||||||
|
mockGetProvider.mockReturnValue(provider);
|
||||||
|
return capture;
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildRetryContext(overrides?: Partial<RetryContext>): RetryContext {
|
||||||
|
return {
|
||||||
|
failure: {
|
||||||
|
taskName: 'test-task',
|
||||||
|
taskContent: 'Test task content',
|
||||||
|
createdAt: '2026-02-15T10:00:00Z',
|
||||||
|
failedMovement: 'implement',
|
||||||
|
error: 'Some error',
|
||||||
|
lastMessage: '',
|
||||||
|
retryNote: '',
|
||||||
|
},
|
||||||
|
branchName: 'takt/test-task',
|
||||||
|
pieceContext: {
|
||||||
|
name: 'default',
|
||||||
|
description: '',
|
||||||
|
pieceStructure: '',
|
||||||
|
movementPreviews: [],
|
||||||
|
},
|
||||||
|
run: null,
|
||||||
|
previousOrderContent: null,
|
||||||
|
...overrides,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Tests ---
|
||||||
|
|
||||||
|
describe('/retry slash command', () => {
|
||||||
|
let tmpDir: string;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
tmpDir = createTmpDir();
|
||||||
|
vi.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
restoreStdin();
|
||||||
|
rmSync(tmpDir, { recursive: true, force: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should execute with previous order content when /retry is used', async () => {
|
||||||
|
const orderContent = '# Task Order\n\nImplement feature X with tests.';
|
||||||
|
setupRawStdin(toRawInputs(['/retry']));
|
||||||
|
setupProvider([]);
|
||||||
|
|
||||||
|
const retryContext = buildRetryContext({ previousOrderContent: orderContent });
|
||||||
|
const result = await runRetryMode(tmpDir, retryContext, orderContent);
|
||||||
|
|
||||||
|
expect(result.action).toBe('execute');
|
||||||
|
expect(result.task).toBe(orderContent);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should show error and continue when /retry is used without order', async () => {
|
||||||
|
setupRawStdin(toRawInputs(['/retry', '/cancel']));
|
||||||
|
setupProvider([]);
|
||||||
|
|
||||||
|
const retryContext = buildRetryContext({ previousOrderContent: null });
|
||||||
|
const result = await runRetryMode(tmpDir, retryContext, null);
|
||||||
|
|
||||||
|
expect(mockInfo).toHaveBeenCalledWith('No previous order found.');
|
||||||
|
expect(result.action).toBe('cancel');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should inject order.md content into retry system prompt', async () => {
|
||||||
|
const orderContent = '# Build login page\n\nWith OAuth2 support.';
|
||||||
|
setupRawStdin(toRawInputs(['check the order', '/cancel']));
|
||||||
|
const capture = setupProvider(['I see the order content.']);
|
||||||
|
|
||||||
|
const retryContext = buildRetryContext({ previousOrderContent: orderContent });
|
||||||
|
await runRetryMode(tmpDir, retryContext, orderContent);
|
||||||
|
|
||||||
|
expect(capture.systemPrompts.length).toBeGreaterThan(0);
|
||||||
|
const systemPrompt = capture.systemPrompts[0]!;
|
||||||
|
expect(systemPrompt).toContain('Previous Order');
|
||||||
|
expect(systemPrompt).toContain(orderContent);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not include order section when no order content', async () => {
|
||||||
|
setupRawStdin(toRawInputs(['check the order', '/cancel']));
|
||||||
|
const capture = setupProvider(['No order found.']);
|
||||||
|
|
||||||
|
const retryContext = buildRetryContext({ previousOrderContent: null });
|
||||||
|
await runRetryMode(tmpDir, retryContext, null);
|
||||||
|
|
||||||
|
expect(capture.systemPrompts.length).toBeGreaterThan(0);
|
||||||
|
const systemPrompt = capture.systemPrompts[0]!;
|
||||||
|
expect(systemPrompt).not.toContain('Previous Order');
|
||||||
|
});
|
||||||
|
});
|
||||||
@ -5,25 +5,44 @@
|
|||||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||||
import type { TaskInfo } from '../infra/task/index.js';
|
import type { TaskInfo } from '../infra/task/index.js';
|
||||||
|
|
||||||
// Mock dependencies before importing the module under test
|
const { mockLoadConfigRaw } = vi.hoisted(() => ({
|
||||||
vi.mock('../infra/config/index.js', () => ({
|
mockLoadConfigRaw: vi.fn(() => ({
|
||||||
loadPieceByIdentifier: vi.fn(),
|
|
||||||
isPiecePath: vi.fn(() => false),
|
|
||||||
loadGlobalConfig: vi.fn(() => ({
|
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
concurrency: 1,
|
concurrency: 1,
|
||||||
taskPollIntervalMs: 500,
|
taskPollIntervalMs: 500,
|
||||||
})),
|
})),
|
||||||
loadProjectConfig: vi.fn(() => ({
|
|
||||||
piece: 'default',
|
|
||||||
permissionMode: 'default',
|
|
||||||
})),
|
|
||||||
}));
|
}));
|
||||||
|
|
||||||
import { loadGlobalConfig } from '../infra/config/index.js';
|
// Mock dependencies before importing the module under test
|
||||||
const mockLoadGlobalConfig = vi.mocked(loadGlobalConfig);
|
vi.mock('../infra/config/index.js', () => ({
|
||||||
|
loadPieceByIdentifier: vi.fn(),
|
||||||
|
isPiecePath: vi.fn(() => false),
|
||||||
|
loadConfig: (...args: unknown[]) => {
|
||||||
|
const raw = mockLoadConfigRaw(...args) as Record<string, unknown>;
|
||||||
|
if ('global' in raw && 'project' in raw) {
|
||||||
|
return raw;
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
global: raw,
|
||||||
|
project: { piece: 'default' },
|
||||||
|
};
|
||||||
|
},
|
||||||
|
resolvePieceConfigValues: (_projectDir: string, keys: readonly string[]) => {
|
||||||
|
const raw = mockLoadConfigRaw() as Record<string, unknown>;
|
||||||
|
const config = ('global' in raw && 'project' in raw)
|
||||||
|
? { ...raw.global as Record<string, unknown>, ...raw.project as Record<string, unknown> }
|
||||||
|
: { ...raw, piece: 'default', provider: 'claude', verbose: false };
|
||||||
|
const result: Record<string, unknown> = {};
|
||||||
|
for (const key of keys) {
|
||||||
|
result[key] = config[key];
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
const mockLoadConfig = mockLoadConfigRaw;
|
||||||
|
|
||||||
const {
|
const {
|
||||||
mockClaimNextTasks,
|
mockClaimNextTasks,
|
||||||
@ -167,7 +186,7 @@ beforeEach(() => {
|
|||||||
describe('runAllTasks concurrency', () => {
|
describe('runAllTasks concurrency', () => {
|
||||||
describe('sequential execution (concurrency=1)', () => {
|
describe('sequential execution (concurrency=1)', () => {
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
@ -210,7 +229,7 @@ describe('runAllTasks concurrency', () => {
|
|||||||
|
|
||||||
describe('parallel execution (concurrency>1)', () => {
|
describe('parallel execution (concurrency>1)', () => {
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
@ -288,7 +307,7 @@ describe('runAllTasks concurrency', () => {
|
|||||||
describe('default concurrency', () => {
|
describe('default concurrency', () => {
|
||||||
it('should default to sequential when concurrency is not set', async () => {
|
it('should default to sequential when concurrency is not set', async () => {
|
||||||
// Given: Config without explicit concurrency (defaults to 1)
|
// Given: Config without explicit concurrency (defaults to 1)
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
@ -324,7 +343,7 @@ describe('runAllTasks concurrency', () => {
|
|||||||
};
|
};
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
@ -371,7 +390,7 @@ describe('runAllTasks concurrency', () => {
|
|||||||
|
|
||||||
it('should fill slots immediately when a task completes (no batch waiting)', async () => {
|
it('should fill slots immediately when a task completes (no batch waiting)', async () => {
|
||||||
// Given: 3 tasks, concurrency=2, task1 finishes quickly, task2 takes longer
|
// Given: 3 tasks, concurrency=2, task1 finishes quickly, task2 takes longer
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
@ -413,7 +432,7 @@ describe('runAllTasks concurrency', () => {
|
|||||||
|
|
||||||
it('should count partial failures correctly', async () => {
|
it('should count partial failures correctly', async () => {
|
||||||
// Given: 3 tasks, 1 fails, 2 succeed
|
// Given: 3 tasks, 1 fails, 2 succeed
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
@ -495,7 +514,7 @@ describe('runAllTasks concurrency', () => {
|
|||||||
|
|
||||||
it('should pass abortSignal but not taskPrefix in sequential mode', async () => {
|
it('should pass abortSignal but not taskPrefix in sequential mode', async () => {
|
||||||
// Given: Sequential mode
|
// Given: Sequential mode
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
@ -525,7 +544,7 @@ describe('runAllTasks concurrency', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should only notify once at run completion when multiple tasks succeed', async () => {
|
it('should only notify once at run completion when multiple tasks succeed', async () => {
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
@ -550,7 +569,7 @@ describe('runAllTasks concurrency', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should not notify run completion when runComplete is explicitly false', async () => {
|
it('should not notify run completion when runComplete is explicitly false', async () => {
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
@ -572,7 +591,7 @@ describe('runAllTasks concurrency', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should notify run completion by default when notification_sound_events is not set', async () => {
|
it('should notify run completion by default when notification_sound_events is not set', async () => {
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
@ -594,7 +613,7 @@ describe('runAllTasks concurrency', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should notify run abort by default when notification_sound_events is not set', async () => {
|
it('should notify run abort by default when notification_sound_events is not set', async () => {
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
@ -617,7 +636,7 @@ describe('runAllTasks concurrency', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should not notify run abort when runAbort is explicitly false', async () => {
|
it('should not notify run abort when runAbort is explicitly false', async () => {
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
@ -640,7 +659,7 @@ describe('runAllTasks concurrency', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should notify run abort and rethrow when worker pool throws', async () => {
|
it('should notify run abort and rethrow when worker pool throws', async () => {
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
@ -675,7 +694,7 @@ describe('runAllTasks concurrency', () => {
|
|||||||
};
|
};
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
mockLoadConfig.mockReturnValue({
|
||||||
language: 'en',
|
language: 'en',
|
||||||
defaultPiece: 'default',
|
defaultPiece: 'default',
|
||||||
logLevel: 'info',
|
logLevel: 'info',
|
||||||
|
|||||||
@ -4,6 +4,18 @@ import * as path from 'node:path';
|
|||||||
import { tmpdir } from 'node:os';
|
import { tmpdir } from 'node:os';
|
||||||
import { parse as parseYaml } from 'yaml';
|
import { parse as parseYaml } from 'yaml';
|
||||||
|
|
||||||
|
vi.mock('../infra/task/summarize.js', () => ({
|
||||||
|
summarizeTaskName: vi.fn().mockImplementation((content: string) => {
|
||||||
|
const slug = content.split('\n')[0]!
|
||||||
|
.toLowerCase()
|
||||||
|
.replace(/[^a-z0-9]+/g, '-')
|
||||||
|
.replace(/^-+|-+$/g, '')
|
||||||
|
.slice(0, 30)
|
||||||
|
.replace(/-+$/, '');
|
||||||
|
return Promise.resolve(slug || 'task');
|
||||||
|
}),
|
||||||
|
}));
|
||||||
|
|
||||||
vi.mock('../shared/ui/index.js', () => ({
|
vi.mock('../shared/ui/index.js', () => ({
|
||||||
success: vi.fn(),
|
success: vi.fn(),
|
||||||
info: vi.fn(),
|
info: vi.fn(),
|
||||||
@ -66,6 +78,8 @@ describe('saveTaskFile', () => {
|
|||||||
expect(tasks).toHaveLength(1);
|
expect(tasks).toHaveLength(1);
|
||||||
expect(tasks[0]?.content).toBeUndefined();
|
expect(tasks[0]?.content).toBeUndefined();
|
||||||
expect(tasks[0]?.task_dir).toBeTypeOf('string');
|
expect(tasks[0]?.task_dir).toBeTypeOf('string');
|
||||||
|
expect(tasks[0]?.slug).toBeTypeOf('string');
|
||||||
|
expect(tasks[0]?.summary).toBe('Implement feature X');
|
||||||
const taskDir = path.join(testDir, String(tasks[0]?.task_dir));
|
const taskDir = path.join(testDir, String(tasks[0]?.task_dir));
|
||||||
expect(fs.existsSync(path.join(taskDir, 'order.md'))).toBe(true);
|
expect(fs.existsSync(path.join(taskDir, 'order.md'))).toBe(true);
|
||||||
expect(fs.readFileSync(path.join(taskDir, 'order.md'), 'utf-8')).toContain('Implement feature X');
|
expect(fs.readFileSync(path.join(taskDir, 'order.md'), 'utf-8')).toContain('Implement feature X');
|
||||||
|
|||||||
@ -9,6 +9,7 @@ const {
|
|||||||
mockCompleteTask,
|
mockCompleteTask,
|
||||||
mockFailTask,
|
mockFailTask,
|
||||||
mockExecuteTask,
|
mockExecuteTask,
|
||||||
|
mockResolvePieceConfigValue,
|
||||||
} = vi.hoisted(() => ({
|
} = vi.hoisted(() => ({
|
||||||
mockAddTask: vi.fn(() => ({
|
mockAddTask: vi.fn(() => ({
|
||||||
name: 'test-task',
|
name: 'test-task',
|
||||||
@ -21,6 +22,7 @@ const {
|
|||||||
mockCompleteTask: vi.fn(),
|
mockCompleteTask: vi.fn(),
|
||||||
mockFailTask: vi.fn(),
|
mockFailTask: vi.fn(),
|
||||||
mockExecuteTask: vi.fn(),
|
mockExecuteTask: vi.fn(),
|
||||||
|
mockResolvePieceConfigValue: vi.fn((_: string, key: string) => (key === 'autoPr' ? undefined : 'default')),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../shared/prompt/index.js', () => ({
|
vi.mock('../shared/prompt/index.js', () => ({
|
||||||
@ -28,11 +30,10 @@ vi.mock('../shared/prompt/index.js', () => ({
|
|||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../infra/config/index.js', () => ({
|
vi.mock('../infra/config/index.js', () => ({
|
||||||
getCurrentPiece: vi.fn(),
|
resolvePieceConfigValue: (...args: unknown[]) => mockResolvePieceConfigValue(...args),
|
||||||
listPieces: vi.fn(() => ['default']),
|
listPieces: vi.fn(() => ['default']),
|
||||||
listPieceEntries: vi.fn(() => []),
|
listPieceEntries: vi.fn(() => []),
|
||||||
isPiecePath: vi.fn(() => false),
|
isPiecePath: vi.fn(() => false),
|
||||||
loadGlobalConfig: vi.fn(() => ({})),
|
|
||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../infra/task/index.js', () => ({
|
vi.mock('../infra/task/index.js', () => ({
|
||||||
@ -102,7 +103,7 @@ beforeEach(() => {
|
|||||||
|
|
||||||
describe('resolveAutoPr default in selectAndExecuteTask', () => {
|
describe('resolveAutoPr default in selectAndExecuteTask', () => {
|
||||||
it('should call auto-PR confirm with default true when no CLI option or config', async () => {
|
it('should call auto-PR confirm with default true when no CLI option or config', async () => {
|
||||||
// Given: worktree is enabled via override, no autoPr option, no global config autoPr
|
// Given: worktree is enabled via override, no autoPr option, no config autoPr
|
||||||
mockConfirm.mockResolvedValue(true);
|
mockConfirm.mockResolvedValue(true);
|
||||||
mockSummarizeTaskName.mockResolvedValue('test-task');
|
mockSummarizeTaskName.mockResolvedValue('test-task');
|
||||||
mockCreateSharedClone.mockReturnValue({
|
mockCreateSharedClone.mockReturnValue({
|
||||||
@ -121,10 +122,7 @@ describe('resolveAutoPr default in selectAndExecuteTask', () => {
|
|||||||
createWorktree: true,
|
createWorktree: true,
|
||||||
});
|
});
|
||||||
|
|
||||||
// Then: the 'Create pull request?' confirm is called with default true
|
const autoPrCall = mockConfirm.mock.calls.find((call) => call[0] === 'Create pull request?');
|
||||||
const autoPrCall = mockConfirm.mock.calls.find(
|
|
||||||
(call) => call[0] === 'Create pull request?',
|
|
||||||
);
|
|
||||||
expect(autoPrCall).toBeDefined();
|
expect(autoPrCall).toBeDefined();
|
||||||
expect(autoPrCall![1]).toBe(true);
|
expect(autoPrCall![1]).toBe(true);
|
||||||
});
|
});
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
/**
|
/**
|
||||||
* Unit tests for slugify utility
|
* Unit tests for slugify utility
|
||||||
*
|
*
|
||||||
* Tests URL/filename-safe slug generation with CJK support.
|
* Tests URL/filename-safe slug generation (a-z 0-9 hyphen, max 30 chars).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { describe, it, expect } from 'vitest';
|
import { describe, it, expect } from 'vitest';
|
||||||
@ -25,17 +25,17 @@ describe('slugify', () => {
|
|||||||
expect(slugify(' hello ')).toBe('hello');
|
expect(slugify(' hello ')).toBe('hello');
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should truncate to 50 characters', () => {
|
it('should truncate to 30 characters', () => {
|
||||||
const long = 'a'.repeat(100);
|
const long = 'a'.repeat(100);
|
||||||
expect(slugify(long).length).toBeLessThanOrEqual(50);
|
expect(slugify(long).length).toBeLessThanOrEqual(30);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should preserve CJK characters', () => {
|
it('should strip CJK characters', () => {
|
||||||
expect(slugify('タスク指示書')).toBe('タスク指示書');
|
expect(slugify('タスク指示書')).toBe('');
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle mixed ASCII and CJK', () => {
|
it('should handle mixed ASCII and CJK', () => {
|
||||||
expect(slugify('Add タスク Feature')).toBe('add-タスク-feature');
|
expect(slugify('Add タスク Feature')).toBe('add-feature');
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle numbers', () => {
|
it('should handle numbers', () => {
|
||||||
@ -43,11 +43,18 @@ describe('slugify', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
it('should handle empty result after stripping', () => {
|
it('should handle empty result after stripping', () => {
|
||||||
// All special characters → becomes empty string
|
|
||||||
expect(slugify('!@#$%')).toBe('');
|
expect(slugify('!@#$%')).toBe('');
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle typical GitHub issue titles', () => {
|
it('should handle typical GitHub issue titles', () => {
|
||||||
expect(slugify('Fix: login not working (#42)')).toBe('fix-login-not-working-42');
|
expect(slugify('Fix: login not working (#42)')).toBe('fix-login-not-working-42');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should strip trailing hyphen after truncation', () => {
|
||||||
|
// 30 chars of slug that ends with a hyphen after slice
|
||||||
|
const input = 'abcdefghijklmnopqrstuvwxyz-abc-xyz';
|
||||||
|
const result = slugify(input);
|
||||||
|
expect(result.length).toBeLessThanOrEqual(30);
|
||||||
|
expect(result).not.toMatch(/-$/);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@ -6,7 +6,7 @@ import { beforeEach, describe, expect, it, vi } from 'vitest';
|
|||||||
|
|
||||||
vi.mock('../infra/config/index.js', () => ({
|
vi.mock('../infra/config/index.js', () => ({
|
||||||
loadPiece: vi.fn(() => null),
|
loadPiece: vi.fn(() => null),
|
||||||
getCurrentPiece: vi.fn(() => 'default'),
|
resolveConfigValue: vi.fn(() => 'default'),
|
||||||
setCurrentPiece: vi.fn(),
|
setCurrentPiece: vi.fn(),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
@ -20,11 +20,11 @@ vi.mock('../shared/ui/index.js', () => ({
|
|||||||
error: vi.fn(),
|
error: vi.fn(),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
import { getCurrentPiece, loadPiece, setCurrentPiece } from '../infra/config/index.js';
|
import { resolveConfigValue, loadPiece, setCurrentPiece } from '../infra/config/index.js';
|
||||||
import { selectPiece } from '../features/pieceSelection/index.js';
|
import { selectPiece } from '../features/pieceSelection/index.js';
|
||||||
import { switchPiece } from '../features/config/switchPiece.js';
|
import { switchPiece } from '../features/config/switchPiece.js';
|
||||||
|
|
||||||
const mockGetCurrentPiece = vi.mocked(getCurrentPiece);
|
const mockResolveConfigValue = vi.mocked(resolveConfigValue);
|
||||||
const mockLoadPiece = vi.mocked(loadPiece);
|
const mockLoadPiece = vi.mocked(loadPiece);
|
||||||
const mockSetCurrentPiece = vi.mocked(setCurrentPiece);
|
const mockSetCurrentPiece = vi.mocked(setCurrentPiece);
|
||||||
const mockSelectPiece = vi.mocked(selectPiece);
|
const mockSelectPiece = vi.mocked(selectPiece);
|
||||||
@ -32,6 +32,7 @@ const mockSelectPiece = vi.mocked(selectPiece);
|
|||||||
describe('switchPiece', () => {
|
describe('switchPiece', () => {
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
vi.clearAllMocks();
|
vi.clearAllMocks();
|
||||||
|
mockResolveConfigValue.mockReturnValue('default');
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should call selectPiece with fallbackToDefault: false', async () => {
|
it('should call selectPiece with fallbackToDefault: false', async () => {
|
||||||
|
|||||||
@ -15,6 +15,16 @@ describe('TaskPrefixWriter', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
describe('constructor', () => {
|
describe('constructor', () => {
|
||||||
|
it('should use issue number when provided', () => {
|
||||||
|
const writer = new TaskPrefixWriter({ taskName: 'my-task', colorIndex: 0, issue: 123, writeFn });
|
||||||
|
|
||||||
|
writer.writeLine('Issue task');
|
||||||
|
|
||||||
|
expect(output).toHaveLength(1);
|
||||||
|
expect(output[0]).toContain('[#123]');
|
||||||
|
expect(output[0]).not.toContain('[my-t]');
|
||||||
|
});
|
||||||
|
|
||||||
it('should cycle colors for different colorIndex values', () => {
|
it('should cycle colors for different colorIndex values', () => {
|
||||||
const writer0 = new TaskPrefixWriter({ taskName: 'task-a', colorIndex: 0, writeFn });
|
const writer0 = new TaskPrefixWriter({ taskName: 'task-a', colorIndex: 0, writeFn });
|
||||||
const writer4 = new TaskPrefixWriter({ taskName: 'task-a', colorIndex: 4, writeFn });
|
const writer4 = new TaskPrefixWriter({ taskName: 'task-a', colorIndex: 4, writeFn });
|
||||||
@ -27,6 +37,16 @@ describe('TaskPrefixWriter', () => {
|
|||||||
expect(output[1]).toContain('\x1b[36m');
|
expect(output[1]).toContain('\x1b[36m');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should use display label when provided', () => {
|
||||||
|
const writer = new TaskPrefixWriter({ taskName: 'my-task', colorIndex: 0, displayLabel: '#12345', writeFn });
|
||||||
|
|
||||||
|
writer.writeLine('Hello World');
|
||||||
|
|
||||||
|
expect(output).toHaveLength(1);
|
||||||
|
expect(output[0]).toContain('[#12345]');
|
||||||
|
expect(output[0]).not.toContain('[my-t]');
|
||||||
|
});
|
||||||
|
|
||||||
it('should assign correct colors in order', () => {
|
it('should assign correct colors in order', () => {
|
||||||
const writers = [0, 1, 2, 3].map(
|
const writers = [0, 1, 2, 3].map(
|
||||||
(i) => new TaskPrefixWriter({ taskName: `t${i}`, colorIndex: i, writeFn }),
|
(i) => new TaskPrefixWriter({ taskName: `t${i}`, colorIndex: i, writeFn }),
|
||||||
|
|||||||
@ -1,65 +1,54 @@
|
|||||||
/**
|
/**
|
||||||
* Tests for resolveTaskExecution
|
* Tests for execute task option propagation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import * as fs from 'node:fs';
|
|
||||||
import * as os from 'node:os';
|
|
||||||
import * as path from 'node:path';
|
|
||||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||||
|
import type { TaskInfo } from '../infra/task/index.js';
|
||||||
|
|
||||||
|
const { mockResolveTaskExecution, mockExecutePiece, mockLoadPieceByIdentifier, mockResolvePieceConfigValues, mockBuildTaskResult, mockPersistTaskResult, mockPersistTaskError, mockPostExecutionFlow } =
|
||||||
|
vi.hoisted(() => ({
|
||||||
|
mockResolveTaskExecution: vi.fn(),
|
||||||
|
mockExecutePiece: vi.fn(),
|
||||||
|
mockLoadPieceByIdentifier: vi.fn(),
|
||||||
|
mockResolvePieceConfigValues: vi.fn(),
|
||||||
|
mockBuildTaskResult: vi.fn(),
|
||||||
|
mockPersistTaskResult: vi.fn(),
|
||||||
|
mockPersistTaskError: vi.fn(),
|
||||||
|
mockPostExecutionFlow: vi.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../features/tasks/execute/resolveTask.js', () => ({
|
||||||
|
resolveTaskExecution: (...args: unknown[]) => mockResolveTaskExecution(...args),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../features/tasks/execute/pieceExecution.js', () => ({
|
||||||
|
executePiece: (...args: unknown[]) => mockExecutePiece(...args),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../features/tasks/execute/taskResultHandler.js', () => ({
|
||||||
|
buildTaskResult: (...args: unknown[]) => mockBuildTaskResult(...args),
|
||||||
|
persistTaskResult: (...args: unknown[]) => mockPersistTaskResult(...args),
|
||||||
|
persistTaskError: (...args: unknown[]) => mockPersistTaskError(...args),
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('../features/tasks/execute/postExecution.js', () => ({
|
||||||
|
postExecutionFlow: (...args: unknown[]) => mockPostExecutionFlow(...args),
|
||||||
|
}));
|
||||||
|
|
||||||
// Mock dependencies before importing the module under test
|
|
||||||
vi.mock('../infra/config/index.js', () => ({
|
vi.mock('../infra/config/index.js', () => ({
|
||||||
loadPieceByIdentifier: vi.fn(),
|
loadPieceByIdentifier: (...args: unknown[]) => mockLoadPieceByIdentifier(...args),
|
||||||
isPiecePath: vi.fn(() => false),
|
isPiecePath: () => false,
|
||||||
loadGlobalConfig: vi.fn(() => ({})),
|
resolvePieceConfigValues: (...args: unknown[]) => mockResolvePieceConfigValues(...args),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
import { loadGlobalConfig } from '../infra/config/index.js';
|
vi.mock('../shared/ui/index.js', () => ({
|
||||||
const mockLoadGlobalConfig = vi.mocked(loadGlobalConfig);
|
|
||||||
|
|
||||||
vi.mock('../infra/task/index.js', async (importOriginal) => ({
|
|
||||||
...(await importOriginal<Record<string, unknown>>()),
|
|
||||||
TaskRunner: vi.fn(),
|
|
||||||
}));
|
|
||||||
|
|
||||||
vi.mock('../infra/task/clone.js', async (importOriginal) => ({
|
|
||||||
...(await importOriginal<Record<string, unknown>>()),
|
|
||||||
createSharedClone: vi.fn(),
|
|
||||||
removeClone: vi.fn(),
|
|
||||||
}));
|
|
||||||
|
|
||||||
vi.mock('../infra/task/git.js', async (importOriginal) => ({
|
|
||||||
...(await importOriginal<Record<string, unknown>>()),
|
|
||||||
getCurrentBranch: vi.fn(() => 'main'),
|
|
||||||
}));
|
|
||||||
|
|
||||||
vi.mock('../infra/task/autoCommit.js', async (importOriginal) => ({
|
|
||||||
...(await importOriginal<Record<string, unknown>>()),
|
|
||||||
autoCommitAndPush: vi.fn(),
|
|
||||||
}));
|
|
||||||
|
|
||||||
vi.mock('../infra/task/summarize.js', async (importOriginal) => ({
|
|
||||||
...(await importOriginal<Record<string, unknown>>()),
|
|
||||||
summarizeTaskName: vi.fn(),
|
|
||||||
}));
|
|
||||||
|
|
||||||
vi.mock('../shared/ui/index.js', () => {
|
|
||||||
const info = vi.fn();
|
|
||||||
return {
|
|
||||||
header: vi.fn(),
|
header: vi.fn(),
|
||||||
info,
|
info: vi.fn(),
|
||||||
error: vi.fn(),
|
error: vi.fn(),
|
||||||
success: vi.fn(),
|
|
||||||
status: vi.fn(),
|
status: vi.fn(),
|
||||||
|
success: vi.fn(),
|
||||||
blankLine: vi.fn(),
|
blankLine: vi.fn(),
|
||||||
withProgress: vi.fn(async (start, done, operation) => {
|
}));
|
||||||
info(start);
|
|
||||||
const result = await operation();
|
|
||||||
info(typeof done === 'function' ? done(result) : done);
|
|
||||||
return result;
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
vi.mock('../shared/utils/index.js', async (importOriginal) => ({
|
vi.mock('../shared/utils/index.js', async (importOriginal) => ({
|
||||||
...(await importOriginal<Record<string, unknown>>()),
|
...(await importOriginal<Record<string, unknown>>()),
|
||||||
@ -68,560 +57,89 @@ vi.mock('../shared/utils/index.js', async (importOriginal) => ({
|
|||||||
debug: vi.fn(),
|
debug: vi.fn(),
|
||||||
error: vi.fn(),
|
error: vi.fn(),
|
||||||
}),
|
}),
|
||||||
getErrorMessage: vi.fn((e) => e.message),
|
getErrorMessage: vi.fn((error: unknown) => String(error)),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../features/tasks/execute/pieceExecution.js', () => ({
|
vi.mock('../shared/i18n/index.js', () => ({
|
||||||
executePiece: vi.fn(),
|
getLabel: vi.fn((key: string) => key),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../shared/context.js', () => ({
|
import { executeAndCompleteTask } from '../features/tasks/execute/taskExecution.js';
|
||||||
isQuietMode: vi.fn(() => false),
|
|
||||||
}));
|
|
||||||
|
|
||||||
vi.mock('../shared/constants.js', () => ({
|
const createTask = (name: string): TaskInfo => ({
|
||||||
DEFAULT_PIECE_NAME: 'default',
|
name,
|
||||||
DEFAULT_LANGUAGE: 'en',
|
content: `Task: ${name}`,
|
||||||
}));
|
filePath: `/tasks/${name}.yaml`,
|
||||||
|
createdAt: '2026-02-16T00:00:00.000Z',
|
||||||
import { createSharedClone } from '../infra/task/clone.js';
|
status: 'pending',
|
||||||
import { getCurrentBranch } from '../infra/task/git.js';
|
data: { task: `Task: ${name}` },
|
||||||
import { summarizeTaskName } from '../infra/task/summarize.js';
|
|
||||||
import { info } from '../shared/ui/index.js';
|
|
||||||
import { resolveTaskExecution } from '../features/tasks/index.js';
|
|
||||||
import type { TaskInfo } from '../infra/task/index.js';
|
|
||||||
|
|
||||||
const mockCreateSharedClone = vi.mocked(createSharedClone);
|
|
||||||
const mockGetCurrentBranch = vi.mocked(getCurrentBranch);
|
|
||||||
const mockSummarizeTaskName = vi.mocked(summarizeTaskName);
|
|
||||||
const mockInfo = vi.mocked(info);
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
vi.clearAllMocks();
|
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('resolveTaskExecution', () => {
|
describe('executeAndCompleteTask', () => {
|
||||||
it('should return defaults when task has no data', async () => {
|
beforeEach(() => {
|
||||||
// Given: Task without structured data
|
vi.clearAllMocks();
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'simple-task',
|
|
||||||
content: 'Simple task content',
|
|
||||||
filePath: '/tasks/simple-task.yaml',
|
|
||||||
createdAt: '2026-02-09T00:00:00.000Z',
|
|
||||||
status: 'pending',
|
|
||||||
data: null,
|
|
||||||
};
|
|
||||||
|
|
||||||
// When
|
mockLoadPieceByIdentifier.mockReturnValue({
|
||||||
const result = await resolveTaskExecution(task, '/project', 'default');
|
name: 'default',
|
||||||
|
movements: [],
|
||||||
// Then
|
});
|
||||||
expect(result).toEqual({
|
mockResolvePieceConfigValues.mockReturnValue({
|
||||||
|
language: 'en',
|
||||||
|
provider: 'claude',
|
||||||
|
model: undefined,
|
||||||
|
personaProviders: {},
|
||||||
|
providerProfiles: {},
|
||||||
|
providerOptions: {
|
||||||
|
claude: { sandbox: { allowUnsandboxedCommands: true } },
|
||||||
|
},
|
||||||
|
notificationSound: true,
|
||||||
|
notificationSoundEvents: {},
|
||||||
|
concurrency: 1,
|
||||||
|
taskPollIntervalMs: 500,
|
||||||
|
});
|
||||||
|
mockBuildTaskResult.mockReturnValue({ success: true });
|
||||||
|
mockResolveTaskExecution.mockResolvedValue({
|
||||||
execCwd: '/project',
|
execCwd: '/project',
|
||||||
execPiece: 'default',
|
execPiece: 'default',
|
||||||
isWorktree: false,
|
isWorktree: false,
|
||||||
autoPr: false,
|
autoPr: false,
|
||||||
});
|
taskPrompt: undefined,
|
||||||
expect(mockSummarizeTaskName).not.toHaveBeenCalled();
|
reportDirName: undefined,
|
||||||
expect(mockCreateSharedClone).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return defaults when data has no worktree option', async () => {
|
|
||||||
// Given: Task with data but no worktree
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-with-data',
|
|
||||||
content: 'Task content',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// When
|
|
||||||
const result = await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(result.isWorktree).toBe(false);
|
|
||||||
expect(mockSummarizeTaskName).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should create shared clone with AI-summarized slug when worktree option is true', async () => {
|
|
||||||
// Given: Task with worktree option
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'japanese-task',
|
|
||||||
content: '認証機能を追加する',
|
|
||||||
filePath: '/tasks/japanese-task.yaml',
|
|
||||||
data: {
|
|
||||||
task: '認証機能を追加する',
|
|
||||||
worktree: true,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
mockSummarizeTaskName.mockResolvedValue('add-auth');
|
|
||||||
mockCreateSharedClone.mockReturnValue({
|
|
||||||
path: '/project/../20260128T0504-add-auth',
|
|
||||||
branch: 'takt/20260128T0504-add-auth',
|
|
||||||
});
|
|
||||||
|
|
||||||
// When
|
|
||||||
const result = await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(mockSummarizeTaskName).toHaveBeenCalledWith('認証機能を追加する', { cwd: '/project' });
|
|
||||||
expect(mockCreateSharedClone).toHaveBeenCalledWith('/project', {
|
|
||||||
worktree: true,
|
|
||||||
branch: undefined,
|
branch: undefined,
|
||||||
taskSlug: 'add-auth',
|
worktreePath: undefined,
|
||||||
});
|
baseBranch: undefined,
|
||||||
expect(mockGetCurrentBranch).toHaveBeenCalledWith('/project');
|
startMovement: undefined,
|
||||||
expect(result).toEqual({
|
retryNote: undefined,
|
||||||
execCwd: '/project/../20260128T0504-add-auth',
|
issueNumber: undefined,
|
||||||
execPiece: 'default',
|
|
||||||
isWorktree: true,
|
|
||||||
autoPr: false,
|
|
||||||
branch: 'takt/20260128T0504-add-auth',
|
|
||||||
worktreePath: '/project/../20260128T0504-add-auth',
|
|
||||||
baseBranch: 'main',
|
|
||||||
});
|
});
|
||||||
|
mockExecutePiece.mockResolvedValue({ success: true });
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should display generating message before AI call', async () => {
|
it('should pass taskDisplayLabel from parallel options into executePiece', async () => {
|
||||||
// Given: Task with worktree
|
// Given: Parallel execution passes an issue-style taskDisplayLabel.
|
||||||
const task: TaskInfo = {
|
const task = createTask('task-with-issue');
|
||||||
name: 'test-task',
|
const taskDisplayLabel = '#12345';
|
||||||
content: 'Test task',
|
const abortController = new AbortController();
|
||||||
filePath: '/tasks/test.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Test task',
|
|
||||||
worktree: true,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
mockSummarizeTaskName.mockResolvedValue('test-task');
|
|
||||||
mockCreateSharedClone.mockReturnValue({
|
|
||||||
path: '/project/../test-task',
|
|
||||||
branch: 'takt/test-task',
|
|
||||||
});
|
|
||||||
|
|
||||||
// When
|
// When
|
||||||
await resolveTaskExecution(task, '/project', 'default');
|
await executeAndCompleteTask(task, {} as never, '/project', 'default', undefined, {
|
||||||
|
abortSignal: abortController.signal,
|
||||||
// Then
|
taskPrefix: taskDisplayLabel,
|
||||||
expect(mockInfo).toHaveBeenCalledWith('Generating branch name...');
|
taskColorIndex: 0,
|
||||||
expect(mockInfo).toHaveBeenCalledWith('Branch name generated: test-task');
|
taskDisplayLabel,
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should use task content (not name) for AI summarization', async () => {
|
// Then: executePiece receives the propagated display label.
|
||||||
// Given: Task where name differs from content
|
expect(mockExecutePiece).toHaveBeenCalledTimes(1);
|
||||||
const task: TaskInfo = {
|
const pieceExecutionOptions = mockExecutePiece.mock.calls[0]?.[3] as {
|
||||||
name: 'old-file-name',
|
taskDisplayLabel?: string;
|
||||||
content: 'New feature implementation details',
|
taskPrefix?: string;
|
||||||
filePath: '/tasks/old-file-name.yaml',
|
providerOptions?: unknown;
|
||||||
data: {
|
|
||||||
task: 'New feature implementation details',
|
|
||||||
worktree: true,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
expect(pieceExecutionOptions?.taskDisplayLabel).toBe(taskDisplayLabel);
|
||||||
mockSummarizeTaskName.mockResolvedValue('new-feature');
|
expect(pieceExecutionOptions?.taskPrefix).toBe(taskDisplayLabel);
|
||||||
mockCreateSharedClone.mockReturnValue({
|
expect(pieceExecutionOptions?.providerOptions).toEqual({
|
||||||
path: '/project/../new-feature',
|
claude: { sandbox: { allowUnsandboxedCommands: true } },
|
||||||
branch: 'takt/new-feature',
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// When
|
|
||||||
await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then: Should use content, not file name
|
|
||||||
expect(mockSummarizeTaskName).toHaveBeenCalledWith('New feature implementation details', { cwd: '/project' });
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should use piece override from task data', async () => {
|
|
||||||
// Given: Task with piece override
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-with-piece',
|
|
||||||
content: 'Task content',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
piece: 'custom-piece',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// When
|
|
||||||
const result = await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(result.execPiece).toBe('custom-piece');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should pass branch option to createSharedClone when specified', async () => {
|
|
||||||
// Given: Task with custom branch
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-with-branch',
|
|
||||||
content: 'Task content',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
worktree: true,
|
|
||||||
branch: 'feature/custom-branch',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
mockSummarizeTaskName.mockResolvedValue('custom-task');
|
|
||||||
mockCreateSharedClone.mockReturnValue({
|
|
||||||
path: '/project/../custom-task',
|
|
||||||
branch: 'feature/custom-branch',
|
|
||||||
});
|
|
||||||
|
|
||||||
// When
|
|
||||||
await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(mockCreateSharedClone).toHaveBeenCalledWith('/project', {
|
|
||||||
worktree: true,
|
|
||||||
branch: 'feature/custom-branch',
|
|
||||||
taskSlug: 'custom-task',
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should display clone creation info', async () => {
|
|
||||||
// Given: Task with worktree
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'info-task',
|
|
||||||
content: 'Info task',
|
|
||||||
filePath: '/tasks/info.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Info task',
|
|
||||||
worktree: true,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
mockSummarizeTaskName.mockResolvedValue('info-task');
|
|
||||||
mockCreateSharedClone.mockReturnValue({
|
|
||||||
path: '/project/../20260128-info-task',
|
|
||||||
branch: 'takt/20260128-info-task',
|
|
||||||
});
|
|
||||||
|
|
||||||
// When
|
|
||||||
await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(mockInfo).toHaveBeenCalledWith(
|
|
||||||
'Clone created: /project/../20260128-info-task (branch: takt/20260128-info-task)'
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return autoPr from task YAML when specified', async () => {
|
|
||||||
// Given: Task with auto_pr option
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-with-auto-pr',
|
|
||||||
content: 'Task content',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
auto_pr: true,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// When
|
|
||||||
const result = await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(result.autoPr).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return autoPr: false from task YAML when specified as false', async () => {
|
|
||||||
// Given: Task with auto_pr: false
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-no-auto-pr',
|
|
||||||
content: 'Task content',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
auto_pr: false,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// When
|
|
||||||
const result = await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(result.autoPr).toBe(false);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should fall back to global config autoPr when task YAML does not specify', async () => {
|
|
||||||
// Given: Task without auto_pr, global config has autoPr
|
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
|
||||||
language: 'en',
|
|
||||||
defaultPiece: 'default',
|
|
||||||
logLevel: 'info',
|
|
||||||
autoPr: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-no-auto-pr-setting',
|
|
||||||
content: 'Task content',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// When
|
|
||||||
const result = await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(result.autoPr).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return false autoPr when neither task nor config specifies', async () => {
|
|
||||||
// Given: Neither task nor config has autoPr
|
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
|
||||||
language: 'en',
|
|
||||||
defaultPiece: 'default',
|
|
||||||
logLevel: 'info',
|
|
||||||
});
|
|
||||||
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-default',
|
|
||||||
content: 'Task content',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// When
|
|
||||||
const result = await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(result.autoPr).toBe(false);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should prioritize task YAML auto_pr over global config', async () => {
|
|
||||||
// Given: Task has auto_pr: false, global config has autoPr: true
|
|
||||||
mockLoadGlobalConfig.mockReturnValue({
|
|
||||||
language: 'en',
|
|
||||||
defaultPiece: 'default',
|
|
||||||
logLevel: 'info',
|
|
||||||
autoPr: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-override',
|
|
||||||
content: 'Task content',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
auto_pr: false,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// When
|
|
||||||
const result = await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(result.autoPr).toBe(false);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should capture baseBranch from getCurrentBranch when worktree is used', async () => {
|
|
||||||
// Given: Task with worktree, on 'develop' branch
|
|
||||||
mockGetCurrentBranch.mockReturnValue('develop');
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-on-develop',
|
|
||||||
content: 'Task on develop branch',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task on develop branch',
|
|
||||||
worktree: true,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
mockSummarizeTaskName.mockResolvedValue('task-develop');
|
|
||||||
mockCreateSharedClone.mockReturnValue({
|
|
||||||
path: '/project/../task-develop',
|
|
||||||
branch: 'takt/task-develop',
|
|
||||||
});
|
|
||||||
|
|
||||||
// When
|
|
||||||
const result = await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(mockGetCurrentBranch).toHaveBeenCalledWith('/project');
|
|
||||||
expect(result.baseBranch).toBe('develop');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not set baseBranch when worktree is not used', async () => {
|
|
||||||
// Given: Task without worktree
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-no-worktree',
|
|
||||||
content: 'Task without worktree',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task without worktree',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// When
|
|
||||||
const result = await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(mockGetCurrentBranch).not.toHaveBeenCalled();
|
|
||||||
expect(result.baseBranch).toBeUndefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return issueNumber from task data when specified', async () => {
|
|
||||||
// Given: Task with issue number
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-with-issue',
|
|
||||||
content: 'Fix authentication bug',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Fix authentication bug',
|
|
||||||
issue: 131,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// When
|
|
||||||
const result = await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(result.issueNumber).toBe(131);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return undefined issueNumber when task data has no issue', async () => {
|
|
||||||
// Given: Task without issue
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-no-issue',
|
|
||||||
content: 'Task content',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// When
|
|
||||||
const result = await resolveTaskExecution(task, '/project', 'default');
|
|
||||||
|
|
||||||
// Then
|
|
||||||
expect(result.issueNumber).toBeUndefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not start clone creation when abortSignal is already aborted', async () => {
|
|
||||||
// Given: Worktree task with pre-aborted signal
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'aborted-before-clone',
|
|
||||||
content: 'Task content',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
worktree: true,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
const controller = new AbortController();
|
|
||||||
controller.abort();
|
|
||||||
|
|
||||||
// When / Then
|
|
||||||
await expect(resolveTaskExecution(task, '/project', 'default', controller.signal)).rejects.toThrow('Task execution aborted');
|
|
||||||
expect(mockSummarizeTaskName).not.toHaveBeenCalled();
|
|
||||||
expect(mockCreateSharedClone).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should stage task_dir spec into run context and return reportDirName', async () => {
|
|
||||||
const tmpRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'takt-taskdir-normal-'));
|
|
||||||
const projectDir = path.join(tmpRoot, 'project');
|
|
||||||
fs.mkdirSync(path.join(projectDir, '.takt', 'tasks', '20260201-015714-foptng'), { recursive: true });
|
|
||||||
const sourceOrder = path.join(projectDir, '.takt', 'tasks', '20260201-015714-foptng', 'order.md');
|
|
||||||
fs.writeFileSync(sourceOrder, '# normal task spec\n', 'utf-8');
|
|
||||||
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-with-dir',
|
|
||||||
content: 'Task content',
|
|
||||||
taskDir: '.takt/tasks/20260201-015714-foptng',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const result = await resolveTaskExecution(task, projectDir, 'default');
|
|
||||||
|
|
||||||
expect(result.reportDirName).toBe('20260201-015714-foptng');
|
|
||||||
expect(result.execCwd).toBe(projectDir);
|
|
||||||
const stagedOrder = path.join(projectDir, '.takt', 'runs', '20260201-015714-foptng', 'context', 'task', 'order.md');
|
|
||||||
expect(fs.existsSync(stagedOrder)).toBe(true);
|
|
||||||
expect(fs.readFileSync(stagedOrder, 'utf-8')).toContain('normal task spec');
|
|
||||||
expect(result.taskPrompt).toContain('Primary spec: `.takt/runs/20260201-015714-foptng/context/task/order.md`.');
|
|
||||||
expect(result.taskPrompt).not.toContain(projectDir);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should throw when taskDir format is invalid', async () => {
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-with-invalid-dir',
|
|
||||||
content: 'Task content',
|
|
||||||
taskDir: '.takt/reports/20260201-015714-foptng',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
await expect(resolveTaskExecution(task, '/project', 'default')).rejects.toThrow(
|
|
||||||
'Invalid task_dir format: .takt/reports/20260201-015714-foptng',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should throw when taskDir contains parent directory segment', async () => {
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-with-parent-dir',
|
|
||||||
content: 'Task content',
|
|
||||||
taskDir: '.takt/tasks/..',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
await expect(resolveTaskExecution(task, '/project', 'default')).rejects.toThrow(
|
|
||||||
'Invalid task_dir format: .takt/tasks/..',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should stage task_dir spec into worktree run context and return run-scoped task prompt', async () => {
|
|
||||||
const tmpRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'takt-taskdir-'));
|
|
||||||
const projectDir = path.join(tmpRoot, 'project');
|
|
||||||
const cloneDir = path.join(tmpRoot, 'clone');
|
|
||||||
fs.mkdirSync(path.join(projectDir, '.takt', 'tasks', '20260201-015714-foptng'), { recursive: true });
|
|
||||||
fs.mkdirSync(cloneDir, { recursive: true });
|
|
||||||
const sourceOrder = path.join(projectDir, '.takt', 'tasks', '20260201-015714-foptng', 'order.md');
|
|
||||||
fs.writeFileSync(sourceOrder, '# webhook task\n', 'utf-8');
|
|
||||||
|
|
||||||
const task: TaskInfo = {
|
|
||||||
name: 'task-with-taskdir-worktree',
|
|
||||||
content: 'Task content',
|
|
||||||
taskDir: '.takt/tasks/20260201-015714-foptng',
|
|
||||||
filePath: '/tasks/task.yaml',
|
|
||||||
data: {
|
|
||||||
task: 'Task content',
|
|
||||||
worktree: true,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
mockSummarizeTaskName.mockResolvedValue('webhook-task');
|
|
||||||
mockCreateSharedClone.mockReturnValue({
|
|
||||||
path: cloneDir,
|
|
||||||
branch: 'takt/webhook-task',
|
|
||||||
});
|
|
||||||
|
|
||||||
const result = await resolveTaskExecution(task, projectDir, 'default');
|
|
||||||
|
|
||||||
const stagedOrder = path.join(cloneDir, '.takt', 'runs', '20260201-015714-foptng', 'context', 'task', 'order.md');
|
|
||||||
expect(fs.existsSync(stagedOrder)).toBe(true);
|
|
||||||
expect(fs.readFileSync(stagedOrder, 'utf-8')).toContain('webhook task');
|
|
||||||
|
|
||||||
expect(result.taskPrompt).toContain('Implement using only the files in `.takt/runs/20260201-015714-foptng/context/task`.');
|
|
||||||
expect(result.taskPrompt).toContain('Primary spec: `.takt/runs/20260201-015714-foptng/context/task/order.md`.');
|
|
||||||
expect(result.taskPrompt).not.toContain(projectDir);
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@ -48,7 +48,7 @@ vi.mock('../infra/task/index.js', () => ({
|
|||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../infra/config/index.js', () => ({
|
vi.mock('../infra/config/index.js', () => ({
|
||||||
loadGlobalConfig: vi.fn(() => ({ interactivePreviewMovements: 3, language: 'en' })),
|
resolvePieceConfigValues: vi.fn(() => ({ interactivePreviewMovements: 3, language: 'en' })),
|
||||||
getPieceDescription: vi.fn(() => ({
|
getPieceDescription: vi.fn(() => ({
|
||||||
name: 'default',
|
name: 'default',
|
||||||
description: 'desc',
|
description: 'desc',
|
||||||
@ -82,6 +82,8 @@ vi.mock('../features/interactive/index.js', () => ({
|
|||||||
listRecentRuns: (...args: unknown[]) => mockListRecentRuns(...args),
|
listRecentRuns: (...args: unknown[]) => mockListRecentRuns(...args),
|
||||||
selectRun: (...args: unknown[]) => mockSelectRun(...args),
|
selectRun: (...args: unknown[]) => mockSelectRun(...args),
|
||||||
loadRunSessionContext: (...args: unknown[]) => mockLoadRunSessionContext(...args),
|
loadRunSessionContext: (...args: unknown[]) => mockLoadRunSessionContext(...args),
|
||||||
|
findRunForTask: vi.fn(() => null),
|
||||||
|
findPreviousOrderContent: vi.fn(() => null),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../features/tasks/execute/taskExecution.js', () => ({
|
vi.mock('../features/tasks/execute/taskExecution.js', () => ({
|
||||||
@ -191,6 +193,7 @@ describe('instructBranch direct execution flow', () => {
|
|||||||
'',
|
'',
|
||||||
expect.anything(),
|
expect.anything(),
|
||||||
undefined,
|
undefined,
|
||||||
|
null,
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -227,6 +230,7 @@ describe('instructBranch direct execution flow', () => {
|
|||||||
'',
|
'',
|
||||||
expect.anything(),
|
expect.anything(),
|
||||||
runContext,
|
runContext,
|
||||||
|
null,
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@ -3,8 +3,8 @@ import { describe, it, expect, vi, beforeEach } from 'vitest';
|
|||||||
const {
|
const {
|
||||||
mockExistsSync,
|
mockExistsSync,
|
||||||
mockSelectPiece,
|
mockSelectPiece,
|
||||||
mockSelectOption,
|
mockSelectOptionWithDefault,
|
||||||
mockLoadGlobalConfig,
|
mockResolvePieceConfigValue,
|
||||||
mockLoadPieceByIdentifier,
|
mockLoadPieceByIdentifier,
|
||||||
mockGetPieceDescription,
|
mockGetPieceDescription,
|
||||||
mockRunRetryMode,
|
mockRunRetryMode,
|
||||||
@ -15,8 +15,8 @@ const {
|
|||||||
} = vi.hoisted(() => ({
|
} = vi.hoisted(() => ({
|
||||||
mockExistsSync: vi.fn(() => true),
|
mockExistsSync: vi.fn(() => true),
|
||||||
mockSelectPiece: vi.fn(),
|
mockSelectPiece: vi.fn(),
|
||||||
mockSelectOption: vi.fn(),
|
mockSelectOptionWithDefault: vi.fn(),
|
||||||
mockLoadGlobalConfig: vi.fn(),
|
mockResolvePieceConfigValue: vi.fn(),
|
||||||
mockLoadPieceByIdentifier: vi.fn(),
|
mockLoadPieceByIdentifier: vi.fn(),
|
||||||
mockGetPieceDescription: vi.fn(() => ({
|
mockGetPieceDescription: vi.fn(() => ({
|
||||||
name: 'default',
|
name: 'default',
|
||||||
@ -41,7 +41,7 @@ vi.mock('../features/pieceSelection/index.js', () => ({
|
|||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../shared/prompt/index.js', () => ({
|
vi.mock('../shared/prompt/index.js', () => ({
|
||||||
selectOption: (...args: unknown[]) => mockSelectOption(...args),
|
selectOptionWithDefault: (...args: unknown[]) => mockSelectOptionWithDefault(...args),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../shared/ui/index.js', () => ({
|
vi.mock('../shared/ui/index.js', () => ({
|
||||||
@ -60,7 +60,7 @@ vi.mock('../shared/utils/index.js', async (importOriginal) => ({
|
|||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../infra/config/index.js', () => ({
|
vi.mock('../infra/config/index.js', () => ({
|
||||||
loadGlobalConfig: (...args: unknown[]) => mockLoadGlobalConfig(...args),
|
resolvePieceConfigValue: (...args: unknown[]) => mockResolvePieceConfigValue(...args),
|
||||||
loadPieceByIdentifier: (...args: unknown[]) => mockLoadPieceByIdentifier(...args),
|
loadPieceByIdentifier: (...args: unknown[]) => mockLoadPieceByIdentifier(...args),
|
||||||
getPieceDescription: (...args: unknown[]) => mockGetPieceDescription(...args),
|
getPieceDescription: (...args: unknown[]) => mockGetPieceDescription(...args),
|
||||||
}));
|
}));
|
||||||
@ -73,6 +73,7 @@ vi.mock('../features/interactive/index.js', () => ({
|
|||||||
runTask: '', runPiece: '', runStatus: '', runMovementLogs: '', runReports: '',
|
runTask: '', runPiece: '', runStatus: '', runMovementLogs: '', runReports: '',
|
||||||
})),
|
})),
|
||||||
runRetryMode: (...args: unknown[]) => mockRunRetryMode(...args),
|
runRetryMode: (...args: unknown[]) => mockRunRetryMode(...args),
|
||||||
|
findPreviousOrderContent: vi.fn(() => null),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../infra/task/index.js', () => ({
|
vi.mock('../infra/task/index.js', () => ({
|
||||||
@ -126,9 +127,9 @@ beforeEach(() => {
|
|||||||
mockExistsSync.mockReturnValue(true);
|
mockExistsSync.mockReturnValue(true);
|
||||||
|
|
||||||
mockSelectPiece.mockResolvedValue('default');
|
mockSelectPiece.mockResolvedValue('default');
|
||||||
mockLoadGlobalConfig.mockReturnValue({ defaultPiece: 'default' });
|
mockResolvePieceConfigValue.mockReturnValue(3);
|
||||||
mockLoadPieceByIdentifier.mockReturnValue(defaultPieceConfig);
|
mockLoadPieceByIdentifier.mockReturnValue(defaultPieceConfig);
|
||||||
mockSelectOption.mockResolvedValue('plan');
|
mockSelectOptionWithDefault.mockResolvedValue('plan');
|
||||||
mockRunRetryMode.mockResolvedValue({ action: 'execute', task: '追加指示A' });
|
mockRunRetryMode.mockResolvedValue({ action: 'execute', task: '追加指示A' });
|
||||||
mockStartReExecution.mockReturnValue({
|
mockStartReExecution.mockReturnValue({
|
||||||
name: 'my-task',
|
name: 'my-task',
|
||||||
@ -151,14 +152,31 @@ describe('retryFailedTask', () => {
|
|||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
failure: expect.objectContaining({ taskName: 'my-task', taskContent: 'Do something' }),
|
failure: expect.objectContaining({ taskName: 'my-task', taskContent: 'Do something' }),
|
||||||
}),
|
}),
|
||||||
|
null,
|
||||||
);
|
);
|
||||||
expect(mockStartReExecution).toHaveBeenCalledWith('my-task', ['failed'], undefined, '追加指示A');
|
expect(mockStartReExecution).toHaveBeenCalledWith('my-task', ['failed'], undefined, '追加指示A');
|
||||||
expect(mockExecuteAndCompleteTask).toHaveBeenCalled();
|
expect(mockExecuteAndCompleteTask).toHaveBeenCalled();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should pass failed movement as default to selectOptionWithDefault', async () => {
|
||||||
|
const task = makeFailedTask(); // failure.movement = 'review'
|
||||||
|
|
||||||
|
await retryFailedTask(task, '/project');
|
||||||
|
|
||||||
|
expect(mockSelectOptionWithDefault).toHaveBeenCalledWith(
|
||||||
|
'Start from movement:',
|
||||||
|
expect.arrayContaining([
|
||||||
|
expect.objectContaining({ value: 'plan' }),
|
||||||
|
expect.objectContaining({ value: 'implement' }),
|
||||||
|
expect.objectContaining({ value: 'review' }),
|
||||||
|
]),
|
||||||
|
'review',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
it('should pass non-initial movement as startMovement', async () => {
|
it('should pass non-initial movement as startMovement', async () => {
|
||||||
const task = makeFailedTask();
|
const task = makeFailedTask();
|
||||||
mockSelectOption.mockResolvedValue('implement');
|
mockSelectOptionWithDefault.mockResolvedValue('implement');
|
||||||
|
|
||||||
await retryFailedTask(task, '/project');
|
await retryFailedTask(task, '/project');
|
||||||
|
|
||||||
|
|||||||
@ -1,39 +1,50 @@
|
|||||||
import { describe, expect, it } from 'vitest';
|
import { describe, expect, it } from 'vitest';
|
||||||
import { formatTaskStatusLabel } from '../features/tasks/list/taskStatusLabel.js';
|
import { formatTaskStatusLabel, formatShortDate } from '../features/tasks/list/taskStatusLabel.js';
|
||||||
import type { TaskListItem } from '../infra/task/types.js';
|
import type { TaskListItem } from '../infra/task/types.js';
|
||||||
|
|
||||||
|
function makeTask(overrides: Partial<TaskListItem>): TaskListItem {
|
||||||
|
return {
|
||||||
|
kind: 'pending',
|
||||||
|
name: 'test-task',
|
||||||
|
createdAt: '2026-02-11T00:00:00.000Z',
|
||||||
|
filePath: '/tmp/task.md',
|
||||||
|
content: 'content',
|
||||||
|
...overrides,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
describe('formatTaskStatusLabel', () => {
|
describe('formatTaskStatusLabel', () => {
|
||||||
it("should format pending task as '[pending] name'", () => {
|
it("should format pending task as '[pending] name'", () => {
|
||||||
// Given: pending タスク
|
const task = makeTask({ kind: 'pending', name: 'implement-test' });
|
||||||
const task: TaskListItem = {
|
expect(formatTaskStatusLabel(task)).toBe('[pending] implement-test');
|
||||||
kind: 'pending',
|
|
||||||
name: 'implement test',
|
|
||||||
createdAt: '2026-02-11T00:00:00.000Z',
|
|
||||||
filePath: '/tmp/task.md',
|
|
||||||
content: 'content',
|
|
||||||
};
|
|
||||||
|
|
||||||
// When: ステータスラベルを生成する
|
|
||||||
const result = formatTaskStatusLabel(task);
|
|
||||||
|
|
||||||
// Then: pending は pending 表示になる
|
|
||||||
expect(result).toBe('[pending] implement test');
|
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should format failed task as '[failed] name'", () => {
|
it("should format failed task as '[failed] name'", () => {
|
||||||
// Given: failed タスク
|
const task = makeTask({ kind: 'failed', name: 'retry-payment' });
|
||||||
const task: TaskListItem = {
|
expect(formatTaskStatusLabel(task)).toBe('[failed] retry-payment');
|
||||||
kind: 'failed',
|
});
|
||||||
name: 'retry payment',
|
|
||||||
createdAt: '2026-02-11T00:00:00.000Z',
|
|
||||||
filePath: '/tmp/task.md',
|
|
||||||
content: 'content',
|
|
||||||
};
|
|
||||||
|
|
||||||
// When: ステータスラベルを生成する
|
it('should include branch when present', () => {
|
||||||
const result = formatTaskStatusLabel(task);
|
const task = makeTask({
|
||||||
|
kind: 'completed',
|
||||||
|
name: 'fix-login-bug',
|
||||||
|
branch: 'takt/284/fix-login-bug',
|
||||||
|
});
|
||||||
|
expect(formatTaskStatusLabel(task)).toBe('[completed] fix-login-bug (takt/284/fix-login-bug)');
|
||||||
|
});
|
||||||
|
|
||||||
// Then: failed は failed 表示になる
|
it('should not include branch when absent', () => {
|
||||||
expect(result).toBe('[failed] retry payment');
|
const task = makeTask({ kind: 'running', name: 'my-task' });
|
||||||
|
expect(formatTaskStatusLabel(task)).toBe('[running] my-task');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('formatShortDate', () => {
|
||||||
|
it('should format ISO string to MM/DD HH:mm', () => {
|
||||||
|
expect(formatShortDate('2025-02-18T14:30:00.000Z')).toBe('02/18 14:30');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should zero-pad single digit values', () => {
|
||||||
|
expect(formatShortDate('2025-01-05T03:07:00.000Z')).toBe('01/05 03:07');
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@ -14,7 +14,7 @@ const {
|
|||||||
mockSuccess,
|
mockSuccess,
|
||||||
mockWarn,
|
mockWarn,
|
||||||
mockError,
|
mockError,
|
||||||
mockGetCurrentPiece,
|
mockResolveConfigValue,
|
||||||
} = vi.hoisted(() => ({
|
} = vi.hoisted(() => ({
|
||||||
mockRecoverInterruptedRunningTasks: vi.fn(),
|
mockRecoverInterruptedRunningTasks: vi.fn(),
|
||||||
mockGetTasksDir: vi.fn(),
|
mockGetTasksDir: vi.fn(),
|
||||||
@ -28,7 +28,7 @@ const {
|
|||||||
mockSuccess: vi.fn(),
|
mockSuccess: vi.fn(),
|
||||||
mockWarn: vi.fn(),
|
mockWarn: vi.fn(),
|
||||||
mockError: vi.fn(),
|
mockError: vi.fn(),
|
||||||
mockGetCurrentPiece: vi.fn(),
|
mockResolveConfigValue: vi.fn(),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../infra/task/index.js', () => ({
|
vi.mock('../infra/task/index.js', () => ({
|
||||||
@ -61,7 +61,7 @@ vi.mock('../shared/i18n/index.js', () => ({
|
|||||||
}));
|
}));
|
||||||
|
|
||||||
vi.mock('../infra/config/index.js', () => ({
|
vi.mock('../infra/config/index.js', () => ({
|
||||||
getCurrentPiece: mockGetCurrentPiece,
|
resolveConfigValue: mockResolveConfigValue,
|
||||||
}));
|
}));
|
||||||
|
|
||||||
import { watchTasks } from '../features/tasks/watch/index.js';
|
import { watchTasks } from '../features/tasks/watch/index.js';
|
||||||
@ -69,7 +69,7 @@ import { watchTasks } from '../features/tasks/watch/index.js';
|
|||||||
describe('watchTasks', () => {
|
describe('watchTasks', () => {
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
vi.clearAllMocks();
|
vi.clearAllMocks();
|
||||||
mockGetCurrentPiece.mockReturnValue('default');
|
mockResolveConfigValue.mockReturnValue('default');
|
||||||
mockRecoverInterruptedRunningTasks.mockReturnValue(0);
|
mockRecoverInterruptedRunningTasks.mockReturnValue(0);
|
||||||
mockGetTasksDir.mockReturnValue('/project/.takt/tasks.yaml');
|
mockGetTasksDir.mockReturnValue('/project/.takt/tasks.yaml');
|
||||||
mockExecuteAndCompleteTask.mockResolvedValue(true);
|
mockExecuteAndCompleteTask.mockResolvedValue(true);
|
||||||
|
|||||||
@ -45,11 +45,17 @@ const mockInfo = vi.mocked(info);
|
|||||||
|
|
||||||
const TEST_POLL_INTERVAL_MS = 50;
|
const TEST_POLL_INTERVAL_MS = 50;
|
||||||
|
|
||||||
function createTask(name: string): TaskInfo {
|
function createTask(name: string, options?: { issue?: number }): TaskInfo {
|
||||||
return {
|
return {
|
||||||
name,
|
name,
|
||||||
content: `Task: ${name}`,
|
content: `Task: ${name}`,
|
||||||
filePath: `/tasks/${name}.yaml`,
|
filePath: `/tasks/${name}.yaml`,
|
||||||
|
createdAt: '2026-01-01T00:00:00.000Z',
|
||||||
|
status: 'pending',
|
||||||
|
data: {
|
||||||
|
task: `Task: ${name}`,
|
||||||
|
...(options?.issue !== undefined ? { issue: options.issue } : {}),
|
||||||
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,10 +141,41 @@ describe('runWithWorkerPool', () => {
|
|||||||
// Then
|
// Then
|
||||||
expect(mockExecuteAndCompleteTask).toHaveBeenCalledTimes(1);
|
expect(mockExecuteAndCompleteTask).toHaveBeenCalledTimes(1);
|
||||||
const parallelOpts = mockExecuteAndCompleteTask.mock.calls[0]?.[5];
|
const parallelOpts = mockExecuteAndCompleteTask.mock.calls[0]?.[5];
|
||||||
expect(parallelOpts).toEqual({
|
expect(parallelOpts).toMatchObject({
|
||||||
abortSignal: expect.any(AbortSignal),
|
abortSignal: expect.any(AbortSignal),
|
||||||
taskPrefix: 'my-task',
|
taskPrefix: 'my-task',
|
||||||
taskColorIndex: 0,
|
taskColorIndex: 0,
|
||||||
|
taskDisplayLabel: undefined,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use full issue number as taskPrefix label when task has issue in parallel execution', async () => {
|
||||||
|
// Given: task with 5-digit issue number should not be truncated
|
||||||
|
const issueNumber = 12345;
|
||||||
|
const tasks = [createTask('issue-task', { issue: issueNumber })];
|
||||||
|
const runner = createMockTaskRunner([]);
|
||||||
|
const stdoutChunks: string[] = [];
|
||||||
|
const writeSpy = vi.spyOn(process.stdout, 'write').mockImplementation((chunk: unknown) => {
|
||||||
|
stdoutChunks.push(String(chunk));
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
|
||||||
|
// When
|
||||||
|
await runWithWorkerPool(runner as never, tasks, 2, '/cwd', 'default', undefined, TEST_POLL_INTERVAL_MS);
|
||||||
|
|
||||||
|
// Then: Issue label is used instead of truncated task name
|
||||||
|
writeSpy.mockRestore();
|
||||||
|
const allOutput = stdoutChunks.join('');
|
||||||
|
expect(allOutput).toContain('[#12345]');
|
||||||
|
expect(allOutput).not.toContain('[#123]');
|
||||||
|
|
||||||
|
expect(mockExecuteAndCompleteTask).toHaveBeenCalledTimes(1);
|
||||||
|
const parallelOpts = mockExecuteAndCompleteTask.mock.calls[0]?.[5];
|
||||||
|
expect(parallelOpts).toEqual({
|
||||||
|
abortSignal: expect.any(AbortSignal),
|
||||||
|
taskPrefix: `#${issueNumber}`,
|
||||||
|
taskDisplayLabel: `#${issueNumber}`,
|
||||||
|
taskColorIndex: 0,
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -153,10 +190,11 @@ describe('runWithWorkerPool', () => {
|
|||||||
// Then
|
// Then
|
||||||
expect(mockExecuteAndCompleteTask).toHaveBeenCalledTimes(1);
|
expect(mockExecuteAndCompleteTask).toHaveBeenCalledTimes(1);
|
||||||
const parallelOpts = mockExecuteAndCompleteTask.mock.calls[0]?.[5];
|
const parallelOpts = mockExecuteAndCompleteTask.mock.calls[0]?.[5];
|
||||||
expect(parallelOpts).toEqual({
|
expect(parallelOpts).toMatchObject({
|
||||||
abortSignal: expect.any(AbortSignal),
|
abortSignal: expect.any(AbortSignal),
|
||||||
taskPrefix: undefined,
|
taskPrefix: undefined,
|
||||||
taskColorIndex: undefined,
|
taskColorIndex: undefined,
|
||||||
|
taskDisplayLabel: undefined,
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
import { existsSync, readFileSync } from 'node:fs';
|
import { existsSync, readFileSync } from 'node:fs';
|
||||||
import { basename, dirname } from 'node:path';
|
import { basename, dirname } from 'node:path';
|
||||||
import { loadCustomAgents, loadAgentPrompt, loadGlobalConfig, loadProjectConfig } from '../infra/config/index.js';
|
import { loadCustomAgents, loadAgentPrompt, resolveConfigValues } from '../infra/config/index.js';
|
||||||
import { getProvider, type ProviderType, type ProviderCallOptions } from '../infra/providers/index.js';
|
import { getProvider, type ProviderType, type ProviderCallOptions } from '../infra/providers/index.js';
|
||||||
import type { AgentResponse, CustomAgentConfig } from '../core/models/index.js';
|
import type { AgentResponse, CustomAgentConfig } from '../core/models/index.js';
|
||||||
import { createLogger } from '../shared/utils/index.js';
|
import { createLogger } from '../shared/utils/index.js';
|
||||||
@ -29,16 +29,10 @@ export class AgentRunner {
|
|||||||
agentConfig?: CustomAgentConfig,
|
agentConfig?: CustomAgentConfig,
|
||||||
): ProviderType {
|
): ProviderType {
|
||||||
if (options?.provider) return options.provider;
|
if (options?.provider) return options.provider;
|
||||||
const projectConfig = loadProjectConfig(cwd);
|
const config = resolveConfigValues(cwd, ['provider']);
|
||||||
if (projectConfig.provider) return projectConfig.provider;
|
if (config.provider) return config.provider;
|
||||||
if (options?.stepProvider) return options.stepProvider;
|
if (options?.stepProvider) return options.stepProvider;
|
||||||
if (agentConfig?.provider) return agentConfig.provider;
|
if (agentConfig?.provider) return agentConfig.provider;
|
||||||
try {
|
|
||||||
const globalConfig = loadGlobalConfig();
|
|
||||||
if (globalConfig.provider) return globalConfig.provider;
|
|
||||||
} catch (error) {
|
|
||||||
log.debug('Global config not available for provider resolution', { error });
|
|
||||||
}
|
|
||||||
return 'claude';
|
return 'claude';
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,14 +49,11 @@ export class AgentRunner {
|
|||||||
if (options?.model) return options.model;
|
if (options?.model) return options.model;
|
||||||
if (options?.stepModel) return options.stepModel;
|
if (options?.stepModel) return options.stepModel;
|
||||||
if (agentConfig?.model) return agentConfig.model;
|
if (agentConfig?.model) return agentConfig.model;
|
||||||
try {
|
if (!options?.cwd) return undefined;
|
||||||
const globalConfig = loadGlobalConfig();
|
const config = resolveConfigValues(options.cwd, ['provider', 'model']);
|
||||||
if (globalConfig.model) {
|
if (config.model) {
|
||||||
const globalProvider = globalConfig.provider ?? 'claude';
|
const defaultProvider = config.provider ?? 'claude';
|
||||||
if (globalProvider === resolvedProvider) return globalConfig.model;
|
if (defaultProvider === resolvedProvider) return config.model;
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
log.debug('Global config not available for model resolution', { error });
|
|
||||||
}
|
}
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
@ -131,7 +122,7 @@ export class AgentRunner {
|
|||||||
name: agentConfig.name,
|
name: agentConfig.name,
|
||||||
systemPrompt: agentConfig.claudeAgent || agentConfig.claudeSkill
|
systemPrompt: agentConfig.claudeAgent || agentConfig.claudeSkill
|
||||||
? undefined
|
? undefined
|
||||||
: loadAgentPrompt(agentConfig),
|
: loadAgentPrompt(agentConfig, options.cwd),
|
||||||
claudeAgent: agentConfig.claudeAgent,
|
claudeAgent: agentConfig.claudeAgent,
|
||||||
claudeSkill: agentConfig.claudeSkill,
|
claudeSkill: agentConfig.claudeSkill,
|
||||||
});
|
});
|
||||||
|
|||||||
@ -1,15 +1,18 @@
|
|||||||
/**
|
/**
|
||||||
* CLI subcommand definitions
|
* CLI subcommand definitions
|
||||||
*
|
*
|
||||||
* Registers all named subcommands (run, watch, add, list, switch, clear, eject, config, prompt, catalog).
|
* Registers all named subcommands (run, watch, add, list, switch, clear, eject, prompt, catalog).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { clearPersonaSessions, getCurrentPiece } from '../../infra/config/index.js';
|
import { join } from 'node:path';
|
||||||
import { success } from '../../shared/ui/index.js';
|
import { clearPersonaSessions, resolveConfigValue } from '../../infra/config/index.js';
|
||||||
|
import { getGlobalConfigDir } from '../../infra/config/paths.js';
|
||||||
|
import { success, info } from '../../shared/ui/index.js';
|
||||||
import { runAllTasks, addTask, watchTasks, listTasks } from '../../features/tasks/index.js';
|
import { runAllTasks, addTask, watchTasks, listTasks } from '../../features/tasks/index.js';
|
||||||
import { switchPiece, switchConfig, ejectBuiltin, ejectFacet, parseFacetType, VALID_FACET_TYPES, resetCategoriesToDefault, deploySkill } from '../../features/config/index.js';
|
import { switchPiece, ejectBuiltin, ejectFacet, parseFacetType, VALID_FACET_TYPES, resetCategoriesToDefault, resetConfigToDefault, deploySkill } from '../../features/config/index.js';
|
||||||
import { previewPrompts } from '../../features/prompt/index.js';
|
import { previewPrompts } from '../../features/prompt/index.js';
|
||||||
import { showCatalog } from '../../features/catalog/index.js';
|
import { showCatalog } from '../../features/catalog/index.js';
|
||||||
|
import { computeReviewMetrics, formatReviewMetrics, parseSinceDuration, purgeOldEvents } from '../../features/analytics/index.js';
|
||||||
import { program, resolvedCwd } from './program.js';
|
import { program, resolvedCwd } from './program.js';
|
||||||
import { resolveAgentOverrides } from './helpers.js';
|
import { resolveAgentOverrides } from './helpers.js';
|
||||||
|
|
||||||
@ -17,7 +20,7 @@ program
|
|||||||
.command('run')
|
.command('run')
|
||||||
.description('Run all pending tasks from .takt/tasks.yaml')
|
.description('Run all pending tasks from .takt/tasks.yaml')
|
||||||
.action(async () => {
|
.action(async () => {
|
||||||
const piece = getCurrentPiece(resolvedCwd);
|
const piece = resolveConfigValue(resolvedCwd, 'piece');
|
||||||
await runAllTasks(resolvedCwd, piece, resolveAgentOverrides(program));
|
await runAllTasks(resolvedCwd, piece, resolveAgentOverrides(program));
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -96,23 +99,22 @@ program
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
program
|
|
||||||
.command('config')
|
|
||||||
.description('Configure settings (permission mode)')
|
|
||||||
.argument('[key]', 'Configuration key')
|
|
||||||
.action(async (key?: string) => {
|
|
||||||
await switchConfig(resolvedCwd, key);
|
|
||||||
});
|
|
||||||
|
|
||||||
const reset = program
|
const reset = program
|
||||||
.command('reset')
|
.command('reset')
|
||||||
.description('Reset settings to defaults');
|
.description('Reset settings to defaults');
|
||||||
|
|
||||||
|
reset
|
||||||
|
.command('config')
|
||||||
|
.description('Reset global config to builtin template (with backup)')
|
||||||
|
.action(async () => {
|
||||||
|
await resetConfigToDefault();
|
||||||
|
});
|
||||||
|
|
||||||
reset
|
reset
|
||||||
.command('categories')
|
.command('categories')
|
||||||
.description('Reset piece categories to builtin defaults')
|
.description('Reset piece categories to builtin defaults')
|
||||||
.action(async () => {
|
.action(async () => {
|
||||||
await resetCategoriesToDefault();
|
await resetCategoriesToDefault(resolvedCwd);
|
||||||
});
|
});
|
||||||
|
|
||||||
program
|
program
|
||||||
@ -137,3 +139,37 @@ program
|
|||||||
.action((type?: string) => {
|
.action((type?: string) => {
|
||||||
showCatalog(resolvedCwd, type);
|
showCatalog(resolvedCwd, type);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const metrics = program
|
||||||
|
.command('metrics')
|
||||||
|
.description('Show analytics metrics');
|
||||||
|
|
||||||
|
metrics
|
||||||
|
.command('review')
|
||||||
|
.description('Show review quality metrics')
|
||||||
|
.option('--since <duration>', 'Time window (e.g. "7d", "30d")', '30d')
|
||||||
|
.action((opts: { since: string }) => {
|
||||||
|
const analytics = resolveConfigValue(resolvedCwd, 'analytics');
|
||||||
|
const eventsDir = analytics?.eventsPath ?? join(getGlobalConfigDir(), 'analytics', 'events');
|
||||||
|
const durationMs = parseSinceDuration(opts.since);
|
||||||
|
const sinceMs = Date.now() - durationMs;
|
||||||
|
const result = computeReviewMetrics(eventsDir, sinceMs);
|
||||||
|
info(formatReviewMetrics(result));
|
||||||
|
});
|
||||||
|
|
||||||
|
program
|
||||||
|
.command('purge')
|
||||||
|
.description('Purge old analytics event files')
|
||||||
|
.option('--retention-days <days>', 'Retention period in days', '30')
|
||||||
|
.action((opts: { retentionDays: string }) => {
|
||||||
|
const analytics = resolveConfigValue(resolvedCwd, 'analytics');
|
||||||
|
const eventsDir = analytics?.eventsPath ?? join(getGlobalConfigDir(), 'analytics', 'events');
|
||||||
|
const retentionDays = analytics?.retentionDays
|
||||||
|
?? parseInt(opts.retentionDays, 10);
|
||||||
|
const deleted = purgeOldEvents(eventsDir, retentionDays, new Date());
|
||||||
|
if (deleted.length === 0) {
|
||||||
|
info('No files to purge.');
|
||||||
|
} else {
|
||||||
|
success(`Purged ${deleted.length} file(s): ${deleted.join(', ')}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|||||||
@ -11,7 +11,7 @@ import { resolve } from 'node:path';
|
|||||||
import {
|
import {
|
||||||
initGlobalDirs,
|
initGlobalDirs,
|
||||||
initProjectDirs,
|
initProjectDirs,
|
||||||
loadGlobalConfig,
|
resolveConfigValues,
|
||||||
isVerboseMode,
|
isVerboseMode,
|
||||||
} from '../../infra/config/index.js';
|
} from '../../infra/config/index.js';
|
||||||
import { setQuietMode } from '../../shared/context.js';
|
import { setQuietMode } from '../../shared/context.js';
|
||||||
@ -51,7 +51,8 @@ program
|
|||||||
.option('--pipeline', 'Pipeline mode: non-interactive, no worktree, direct branch creation')
|
.option('--pipeline', 'Pipeline mode: non-interactive, no worktree, direct branch creation')
|
||||||
.option('--skip-git', 'Skip branch creation, commit, and push (pipeline mode)')
|
.option('--skip-git', 'Skip branch creation, commit, and push (pipeline mode)')
|
||||||
.option('--create-worktree <yes|no>', 'Skip the worktree prompt by explicitly specifying yes or no')
|
.option('--create-worktree <yes|no>', 'Skip the worktree prompt by explicitly specifying yes or no')
|
||||||
.option('-q, --quiet', 'Minimal output mode: suppress AI output (for CI)');
|
.option('-q, --quiet', 'Minimal output mode: suppress AI output (for CI)')
|
||||||
|
.option('-c, --continue', 'Continue from the last assistant session');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Run pre-action hook: common initialization for all commands.
|
* Run pre-action hook: common initialization for all commands.
|
||||||
@ -69,7 +70,7 @@ export async function runPreActionHook(): Promise<void> {
|
|||||||
const verbose = isVerboseMode(resolvedCwd);
|
const verbose = isVerboseMode(resolvedCwd);
|
||||||
initDebugLogger(verbose ? { enabled: true } : undefined, resolvedCwd);
|
initDebugLogger(verbose ? { enabled: true } : undefined, resolvedCwd);
|
||||||
|
|
||||||
const config = loadGlobalConfig();
|
const config = resolveConfigValues(resolvedCwd, ['logLevel', 'minimalOutput']);
|
||||||
|
|
||||||
if (verbose) {
|
if (verbose) {
|
||||||
setVerboseConsole(true);
|
setVerboseConsole(true);
|
||||||
|
|||||||
@ -6,7 +6,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { info, error as logError, withProgress } from '../../shared/ui/index.js';
|
import { info, error as logError, withProgress } from '../../shared/ui/index.js';
|
||||||
import { confirm } from '../../shared/prompt/index.js';
|
|
||||||
import { getErrorMessage } from '../../shared/utils/index.js';
|
import { getErrorMessage } from '../../shared/utils/index.js';
|
||||||
import { getLabel } from '../../shared/i18n/index.js';
|
import { getLabel } from '../../shared/i18n/index.js';
|
||||||
import { fetchIssue, formatIssueAsTask, checkGhCli, parseIssueNumbers, type GitHubIssue } from '../../infra/github/index.js';
|
import { fetchIssue, formatIssueAsTask, checkGhCli, parseIssueNumbers, type GitHubIssue } from '../../infra/github/index.js';
|
||||||
@ -15,7 +14,6 @@ import { executePipeline } from '../../features/pipeline/index.js';
|
|||||||
import {
|
import {
|
||||||
interactiveMode,
|
interactiveMode,
|
||||||
selectInteractiveMode,
|
selectInteractiveMode,
|
||||||
selectRecentSession,
|
|
||||||
passthroughMode,
|
passthroughMode,
|
||||||
quietMode,
|
quietMode,
|
||||||
personaMode,
|
personaMode,
|
||||||
@ -23,8 +21,7 @@ import {
|
|||||||
dispatchConversationAction,
|
dispatchConversationAction,
|
||||||
type InteractiveModeResult,
|
type InteractiveModeResult,
|
||||||
} from '../../features/interactive/index.js';
|
} from '../../features/interactive/index.js';
|
||||||
import { getPieceDescription, loadGlobalConfig } from '../../infra/config/index.js';
|
import { getPieceDescription, resolveConfigValue, resolveConfigValues, loadPersonaSessions } from '../../infra/config/index.js';
|
||||||
import { DEFAULT_PIECE_NAME } from '../../shared/constants.js';
|
|
||||||
import { program, resolvedCwd, pipelineMode } from './program.js';
|
import { program, resolvedCwd, pipelineMode } from './program.js';
|
||||||
import { resolveAgentOverrides, parseCreateWorktreeOption, isDirectTask } from './helpers.js';
|
import { resolveAgentOverrides, parseCreateWorktreeOption, isDirectTask } from './helpers.js';
|
||||||
import { loadTaskHistory } from './taskHistory.js';
|
import { loadTaskHistory } from './taskHistory.js';
|
||||||
@ -85,8 +82,12 @@ export async function executeDefaultAction(task?: string): Promise<void> {
|
|||||||
const opts = program.opts();
|
const opts = program.opts();
|
||||||
const agentOverrides = resolveAgentOverrides(program);
|
const agentOverrides = resolveAgentOverrides(program);
|
||||||
const createWorktreeOverride = parseCreateWorktreeOption(opts.createWorktree as string | undefined);
|
const createWorktreeOverride = parseCreateWorktreeOption(opts.createWorktree as string | undefined);
|
||||||
|
const resolvedPipelinePiece = (opts.piece as string | undefined) ?? resolveConfigValue(resolvedCwd, 'piece');
|
||||||
|
const resolvedPipelineAutoPr = opts.autoPr === true
|
||||||
|
? true
|
||||||
|
: (resolveConfigValue(resolvedCwd, 'autoPr') ?? false);
|
||||||
const selectOptions: SelectAndExecuteOptions = {
|
const selectOptions: SelectAndExecuteOptions = {
|
||||||
autoPr: opts.autoPr === true,
|
autoPr: opts.autoPr === true ? true : undefined,
|
||||||
repo: opts.repo as string | undefined,
|
repo: opts.repo as string | undefined,
|
||||||
piece: opts.piece as string | undefined,
|
piece: opts.piece as string | undefined,
|
||||||
createWorktree: createWorktreeOverride,
|
createWorktree: createWorktreeOverride,
|
||||||
@ -97,9 +98,9 @@ export async function executeDefaultAction(task?: string): Promise<void> {
|
|||||||
const exitCode = await executePipeline({
|
const exitCode = await executePipeline({
|
||||||
issueNumber: opts.issue as number | undefined,
|
issueNumber: opts.issue as number | undefined,
|
||||||
task: opts.task as string | undefined,
|
task: opts.task as string | undefined,
|
||||||
piece: (opts.piece as string | undefined) ?? DEFAULT_PIECE_NAME,
|
piece: resolvedPipelinePiece,
|
||||||
branch: opts.branch as string | undefined,
|
branch: opts.branch as string | undefined,
|
||||||
autoPr: opts.autoPr === true,
|
autoPr: resolvedPipelineAutoPr,
|
||||||
repo: opts.repo as string | undefined,
|
repo: opts.repo as string | undefined,
|
||||||
skipGit: opts.skipGit === true,
|
skipGit: opts.skipGit === true,
|
||||||
cwd: resolvedCwd,
|
cwd: resolvedCwd,
|
||||||
@ -137,7 +138,7 @@ export async function executeDefaultAction(task?: string): Promise<void> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// All paths below go through interactive mode
|
// All paths below go through interactive mode
|
||||||
const globalConfig = loadGlobalConfig();
|
const globalConfig = resolveConfigValues(resolvedCwd, ['language', 'interactivePreviewMovements', 'provider']);
|
||||||
const lang = resolveLanguage(globalConfig.language);
|
const lang = resolveLanguage(globalConfig.language);
|
||||||
|
|
||||||
const pieceId = await determinePiece(resolvedCwd, selectOptions.piece);
|
const pieceId = await determinePiece(resolvedCwd, selectOptions.piece);
|
||||||
@ -169,17 +170,14 @@ export async function executeDefaultAction(task?: string): Promise<void> {
|
|||||||
switch (selectedMode) {
|
switch (selectedMode) {
|
||||||
case 'assistant': {
|
case 'assistant': {
|
||||||
let selectedSessionId: string | undefined;
|
let selectedSessionId: string | undefined;
|
||||||
const provider = globalConfig.provider;
|
if (opts.continue === true) {
|
||||||
if (provider === 'claude') {
|
const providerType = globalConfig.provider;
|
||||||
const shouldSelectSession = await confirm(
|
const savedSessions = loadPersonaSessions(resolvedCwd, providerType);
|
||||||
getLabel('interactive.sessionSelector.confirm', lang),
|
const savedSessionId = savedSessions['interactive'];
|
||||||
false,
|
if (savedSessionId) {
|
||||||
);
|
selectedSessionId = savedSessionId;
|
||||||
if (shouldSelectSession) {
|
} else {
|
||||||
const sessionId = await selectRecentSession(resolvedCwd, lang);
|
info(getLabel('interactive.continueNoSession', lang));
|
||||||
if (sessionId) {
|
|
||||||
selectedSessionId = sessionId;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
result = await interactiveMode(resolvedCwd, initialInput, pieceContext, selectedSessionId);
|
result = await interactiveMode(resolvedCwd, initialInput, pieceContext, selectedSessionId);
|
||||||
|
|||||||
@ -23,6 +23,16 @@ export interface ObservabilityConfig {
|
|||||||
providerEvents?: boolean;
|
providerEvents?: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Analytics configuration for local metrics collection */
|
||||||
|
export interface AnalyticsConfig {
|
||||||
|
/** Whether analytics collection is enabled */
|
||||||
|
enabled?: boolean;
|
||||||
|
/** Custom path for analytics events directory (default: ~/.takt/analytics/events) */
|
||||||
|
eventsPath?: string;
|
||||||
|
/** Retention period in days for analytics event files (default: 30) */
|
||||||
|
retentionDays?: number;
|
||||||
|
}
|
||||||
|
|
||||||
/** Language setting for takt */
|
/** Language setting for takt */
|
||||||
export type Language = 'en' | 'ja';
|
export type Language = 'en' | 'ja';
|
||||||
|
|
||||||
@ -53,11 +63,11 @@ export interface NotificationSoundEventsConfig {
|
|||||||
/** Global configuration for takt */
|
/** Global configuration for takt */
|
||||||
export interface GlobalConfig {
|
export interface GlobalConfig {
|
||||||
language: Language;
|
language: Language;
|
||||||
defaultPiece: string;
|
|
||||||
logLevel: 'debug' | 'info' | 'warn' | 'error';
|
logLevel: 'debug' | 'info' | 'warn' | 'error';
|
||||||
provider?: 'claude' | 'codex' | 'opencode' | 'mock';
|
provider?: 'claude' | 'codex' | 'opencode' | 'mock';
|
||||||
model?: string;
|
model?: string;
|
||||||
observability?: ObservabilityConfig;
|
observability?: ObservabilityConfig;
|
||||||
|
analytics?: AnalyticsConfig;
|
||||||
/** Directory for shared clones (worktree_dir in config). If empty, uses ../{clone-name} relative to project */
|
/** Directory for shared clones (worktree_dir in config). If empty, uses ../{clone-name} relative to project */
|
||||||
worktreeDir?: string;
|
worktreeDir?: string;
|
||||||
/** Auto-create PR after worktree execution (default: prompt in interactive mode) */
|
/** Auto-create PR after worktree execution (default: prompt in interactive mode) */
|
||||||
@ -100,6 +110,8 @@ export interface GlobalConfig {
|
|||||||
notificationSoundEvents?: NotificationSoundEventsConfig;
|
notificationSoundEvents?: NotificationSoundEventsConfig;
|
||||||
/** Number of movement previews to inject into interactive mode (0 to disable, max 10) */
|
/** Number of movement previews to inject into interactive mode (0 to disable, max 10) */
|
||||||
interactivePreviewMovements?: number;
|
interactivePreviewMovements?: number;
|
||||||
|
/** Verbose output mode */
|
||||||
|
verbose?: boolean;
|
||||||
/** Number of tasks to run concurrently in takt run (default: 1 = sequential) */
|
/** Number of tasks to run concurrently in takt run (default: 1 = sequential) */
|
||||||
concurrency: number;
|
concurrency: number;
|
||||||
/** Polling interval in ms for picking up new tasks during takt run (default: 500, range: 100-5000) */
|
/** Polling interval in ms for picking up new tasks during takt run (default: 500, range: 100-5000) */
|
||||||
@ -109,7 +121,6 @@ export interface GlobalConfig {
|
|||||||
/** Project-level configuration */
|
/** Project-level configuration */
|
||||||
export interface ProjectConfig {
|
export interface ProjectConfig {
|
||||||
piece?: string;
|
piece?: string;
|
||||||
agents?: CustomAgentConfig[];
|
|
||||||
provider?: 'claude' | 'codex' | 'opencode' | 'mock';
|
provider?: 'claude' | 'codex' | 'opencode' | 'mock';
|
||||||
providerOptions?: MovementProviderOptions;
|
providerOptions?: MovementProviderOptions;
|
||||||
/** Provider-specific permission profiles */
|
/** Provider-specific permission profiles */
|
||||||
|
|||||||
@ -378,6 +378,13 @@ export const ObservabilityConfigSchema = z.object({
|
|||||||
provider_events: z.boolean().optional(),
|
provider_events: z.boolean().optional(),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
/** Analytics config schema */
|
||||||
|
export const AnalyticsConfigSchema = z.object({
|
||||||
|
enabled: z.boolean().optional(),
|
||||||
|
events_path: z.string().optional(),
|
||||||
|
retention_days: z.number().int().positive().optional(),
|
||||||
|
});
|
||||||
|
|
||||||
/** Language setting schema */
|
/** Language setting schema */
|
||||||
export const LanguageSchema = z.enum(['en', 'ja']);
|
export const LanguageSchema = z.enum(['en', 'ja']);
|
||||||
|
|
||||||
@ -405,11 +412,11 @@ export const PieceCategoryConfigSchema = z.record(z.string(), PieceCategoryConfi
|
|||||||
/** Global config schema */
|
/** Global config schema */
|
||||||
export const GlobalConfigSchema = z.object({
|
export const GlobalConfigSchema = z.object({
|
||||||
language: LanguageSchema.optional().default(DEFAULT_LANGUAGE),
|
language: LanguageSchema.optional().default(DEFAULT_LANGUAGE),
|
||||||
default_piece: z.string().optional().default('default'),
|
|
||||||
log_level: z.enum(['debug', 'info', 'warn', 'error']).optional().default('info'),
|
log_level: z.enum(['debug', 'info', 'warn', 'error']).optional().default('info'),
|
||||||
provider: z.enum(['claude', 'codex', 'opencode', 'mock']).optional().default('claude'),
|
provider: z.enum(['claude', 'codex', 'opencode', 'mock']).optional().default('claude'),
|
||||||
model: z.string().optional(),
|
model: z.string().optional(),
|
||||||
observability: ObservabilityConfigSchema.optional(),
|
observability: ObservabilityConfigSchema.optional(),
|
||||||
|
analytics: AnalyticsConfigSchema.optional(),
|
||||||
/** Directory for shared clones (worktree_dir in config). If empty, uses ../{clone-name} relative to project */
|
/** Directory for shared clones (worktree_dir in config). If empty, uses ../{clone-name} relative to project */
|
||||||
worktree_dir: z.string().optional(),
|
worktree_dir: z.string().optional(),
|
||||||
/** Auto-create PR after worktree execution (default: prompt in interactive mode) */
|
/** Auto-create PR after worktree execution (default: prompt in interactive mode) */
|
||||||
@ -458,6 +465,8 @@ export const GlobalConfigSchema = z.object({
|
|||||||
}).optional(),
|
}).optional(),
|
||||||
/** Number of movement previews to inject into interactive mode (0 to disable, max 10) */
|
/** Number of movement previews to inject into interactive mode (0 to disable, max 10) */
|
||||||
interactive_preview_movements: z.number().int().min(0).max(10).optional().default(3),
|
interactive_preview_movements: z.number().int().min(0).max(10).optional().default(3),
|
||||||
|
/** Verbose output mode */
|
||||||
|
verbose: z.boolean().optional(),
|
||||||
/** Number of tasks to run concurrently in takt run (default: 1 = sequential, max: 10) */
|
/** Number of tasks to run concurrently in takt run (default: 1 = sequential, max: 10) */
|
||||||
concurrency: z.number().int().min(1).max(10).optional().default(1),
|
concurrency: z.number().int().min(1).max(10).optional().default(1),
|
||||||
/** Polling interval in ms for picking up new tasks during takt run (default: 500, range: 100-5000) */
|
/** Polling interval in ms for picking up new tasks during takt run (default: 500, range: 100-5000) */
|
||||||
@ -467,7 +476,6 @@ export const GlobalConfigSchema = z.object({
|
|||||||
/** Project config schema */
|
/** Project config schema */
|
||||||
export const ProjectConfigSchema = z.object({
|
export const ProjectConfigSchema = z.object({
|
||||||
piece: z.string().optional(),
|
piece: z.string().optional(),
|
||||||
agents: z.array(CustomAgentConfigSchema).optional(),
|
|
||||||
provider: z.enum(['claude', 'codex', 'opencode', 'mock']).optional(),
|
provider: z.enum(['claude', 'codex', 'opencode', 'mock']).optional(),
|
||||||
provider_options: MovementProviderOptionsSchema,
|
provider_options: MovementProviderOptionsSchema,
|
||||||
provider_profiles: ProviderPermissionProfilesSchema,
|
provider_profiles: ProviderPermissionProfilesSchema,
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
import { join } from 'node:path';
|
import { join } from 'node:path';
|
||||||
import type { PieceMovement, PieceState, Language } from '../../models/types.js';
|
import type { PieceMovement, PieceState, Language } from '../../models/types.js';
|
||||||
|
import type { MovementProviderOptions } from '../../models/piece-types.js';
|
||||||
import type { RunAgentOptions } from '../../../agents/runner.js';
|
import type { RunAgentOptions } from '../../../agents/runner.js';
|
||||||
import type { PhaseRunnerContext } from '../phase-runner.js';
|
import type { PhaseRunnerContext } from '../phase-runner.js';
|
||||||
import type { PieceEngineOptions, PhaseName } from '../types.js';
|
import type { PieceEngineOptions, PhaseName } from '../types.js';
|
||||||
@ -7,6 +8,27 @@ import { buildSessionKey } from '../session-key.js';
|
|||||||
import { resolveMovementProviderModel } from '../provider-resolution.js';
|
import { resolveMovementProviderModel } from '../provider-resolution.js';
|
||||||
import { DEFAULT_PROVIDER_PERMISSION_PROFILES, resolveMovementPermissionMode } from '../permission-profile-resolution.js';
|
import { DEFAULT_PROVIDER_PERMISSION_PROFILES, resolveMovementPermissionMode } from '../permission-profile-resolution.js';
|
||||||
|
|
||||||
|
function mergeProviderOptions(
|
||||||
|
...layers: (MovementProviderOptions | undefined)[]
|
||||||
|
): MovementProviderOptions | undefined {
|
||||||
|
const result: MovementProviderOptions = {};
|
||||||
|
for (const layer of layers) {
|
||||||
|
if (!layer) continue;
|
||||||
|
if (layer.codex) {
|
||||||
|
result.codex = { ...result.codex, ...layer.codex };
|
||||||
|
}
|
||||||
|
if (layer.opencode) {
|
||||||
|
result.opencode = { ...result.opencode, ...layer.opencode };
|
||||||
|
}
|
||||||
|
if (layer.claude?.sandbox) {
|
||||||
|
result.claude = {
|
||||||
|
sandbox: { ...result.claude?.sandbox, ...layer.claude.sandbox },
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Object.keys(result).length > 0 ? result : undefined;
|
||||||
|
}
|
||||||
|
|
||||||
export class OptionsBuilder {
|
export class OptionsBuilder {
|
||||||
constructor(
|
constructor(
|
||||||
private readonly engineOptions: PieceEngineOptions,
|
private readonly engineOptions: PieceEngineOptions,
|
||||||
@ -34,9 +56,7 @@ export class OptionsBuilder {
|
|||||||
|
|
||||||
const resolvedProviderForPermissions =
|
const resolvedProviderForPermissions =
|
||||||
this.engineOptions.provider
|
this.engineOptions.provider
|
||||||
?? this.engineOptions.projectProvider
|
|
||||||
?? resolved.provider
|
?? resolved.provider
|
||||||
?? this.engineOptions.globalProvider
|
|
||||||
?? 'claude';
|
?? 'claude';
|
||||||
|
|
||||||
return {
|
return {
|
||||||
@ -51,10 +71,13 @@ export class OptionsBuilder {
|
|||||||
movementName: step.name,
|
movementName: step.name,
|
||||||
requiredPermissionMode: step.requiredPermissionMode,
|
requiredPermissionMode: step.requiredPermissionMode,
|
||||||
provider: resolvedProviderForPermissions,
|
provider: resolvedProviderForPermissions,
|
||||||
projectProviderProfiles: this.engineOptions.projectProviderProfiles,
|
projectProviderProfiles: this.engineOptions.providerProfiles,
|
||||||
globalProviderProfiles: this.engineOptions.globalProviderProfiles ?? DEFAULT_PROVIDER_PERMISSION_PROFILES,
|
globalProviderProfiles: DEFAULT_PROVIDER_PERMISSION_PROFILES,
|
||||||
}),
|
}),
|
||||||
providerOptions: step.providerOptions,
|
providerOptions: mergeProviderOptions(
|
||||||
|
this.engineOptions.providerOptions,
|
||||||
|
step.providerOptions,
|
||||||
|
),
|
||||||
language: this.getLanguage(),
|
language: this.getLanguage(),
|
||||||
onStream: this.engineOptions.onStream,
|
onStream: this.engineOptions.onStream,
|
||||||
onPermissionRequest: this.engineOptions.onPermissionRequest,
|
onPermissionRequest: this.engineOptions.onPermissionRequest,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user