| 作成 | src/task/git.ts | stageAndCommit() 共通関数。git commit ロジックのDRY化 |

| 作成 | `src/workflow/instruction-context.ts` | `instruction-builder.ts` からコンテキスト組立ロジック抽出 |
| 作成 | `src/workflow/status-rules.ts` | `instruction-builder.ts` からステータスルールロジック抽出 |
| 変更 | 35ファイル | `getErrorMessage()` 統一、`projectCwd` required 化、`process.cwd()` デフォルト除去、`sacrificeMode` 削除、`loadGlobalConfig` キャッシュ、`console.log` → `blankLine()`、`executeTask` options object 化 |

resolved #44
This commit is contained in:
nrslib 2026-02-01 22:58:49 +09:00
parent 1ee73c525c
commit d6ac71f0e6
41 changed files with 516 additions and 399 deletions

View File

@ -32,7 +32,7 @@ vi.mock('../config/paths.js', async (importOriginal) => {
});
// Import after mocking
const { loadGlobalConfig, saveGlobalConfig, resolveAnthropicApiKey, resolveOpenaiApiKey } = await import('../config/globalConfig.js');
const { loadGlobalConfig, saveGlobalConfig, resolveAnthropicApiKey, resolveOpenaiApiKey, invalidateGlobalConfigCache } = await import('../config/globalConfig.js');
describe('GlobalConfigSchema API key fields', () => {
it('should accept config without API keys', () => {
@ -72,6 +72,7 @@ describe('GlobalConfigSchema API key fields', () => {
describe('GlobalConfig load/save with API keys', () => {
beforeEach(() => {
invalidateGlobalConfigCache();
mkdirSync(taktDir, { recursive: true });
});
@ -155,6 +156,7 @@ describe('resolveAnthropicApiKey', () => {
const originalEnv = process.env['TAKT_ANTHROPIC_API_KEY'];
beforeEach(() => {
invalidateGlobalConfigCache();
mkdirSync(taktDir, { recursive: true });
});
@ -228,6 +230,7 @@ describe('resolveOpenaiApiKey', () => {
const originalEnv = process.env['TAKT_OPENAI_API_KEY'];
beforeEach(() => {
invalidateGlobalConfigCache();
mkdirSync(taktDir, { recursive: true });
});

View File

@ -200,22 +200,22 @@ steps:
describe('loadWorkflow (builtin fallback)', () => {
it('should load builtin workflow when user workflow does not exist', () => {
const workflow = loadWorkflow('default');
const workflow = loadWorkflow('default', process.cwd());
expect(workflow).not.toBeNull();
expect(workflow!.name).toBe('default');
});
it('should return null for non-existent workflow', () => {
const workflow = loadWorkflow('does-not-exist');
const workflow = loadWorkflow('does-not-exist', process.cwd());
expect(workflow).toBeNull();
});
it('should load builtin workflows like simple, research', () => {
const simple = loadWorkflow('simple');
const simple = loadWorkflow('simple', process.cwd());
expect(simple).not.toBeNull();
expect(simple!.name).toBe('simple');
const research = loadWorkflow('research');
const research = loadWorkflow('research', process.cwd());
expect(research).not.toBeNull();
expect(research!.name).toBe('research');
});

View File

@ -90,7 +90,7 @@ describe('WorkflowEngine: Abort (SIGINT)', () => {
describe('abort() before run loop iteration', () => {
it('should abort immediately when abort() called before step execution', async () => {
const config = makeSimpleConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
const abortFn = vi.fn();
engine.on('workflow:abort', abortFn);
@ -112,7 +112,7 @@ describe('WorkflowEngine: Abort (SIGINT)', () => {
describe('abort() during step execution', () => {
it('should abort when abort() is called during runAgent', async () => {
const config = makeSimpleConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
// Simulate abort during step execution: runAgent rejects after abort() is called
vi.mocked(runAgent).mockImplementation(async () => {
@ -135,7 +135,7 @@ describe('WorkflowEngine: Abort (SIGINT)', () => {
describe('abort() calls interruptAllQueries', () => {
it('should call interruptAllQueries when abort() is called', () => {
const config = makeSimpleConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
engine.abort();
@ -146,7 +146,7 @@ describe('WorkflowEngine: Abort (SIGINT)', () => {
describe('abort() idempotency', () => {
it('should only call interruptAllQueries once on multiple abort() calls', () => {
const config = makeSimpleConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
engine.abort();
engine.abort();
@ -159,14 +159,14 @@ describe('WorkflowEngine: Abort (SIGINT)', () => {
describe('isAbortRequested()', () => {
it('should return false initially', () => {
const config = makeSimpleConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
expect(engine.isAbortRequested()).toBe(false);
});
it('should return true after abort()', () => {
const config = makeSimpleConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
engine.abort();
@ -177,7 +177,7 @@ describe('WorkflowEngine: Abort (SIGINT)', () => {
describe('abort between steps', () => {
it('should stop after completing current step when abort() is called', async () => {
const config = makeSimpleConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
// First step completes normally, but abort is called during it
vi.mocked(runAgent).mockImplementation(async () => {

View File

@ -62,6 +62,7 @@ describe('WorkflowEngine agent overrides', () => {
mockDetectMatchedRuleSequence([{ index: 0, method: 'phase1_tag' }]);
const engine = new WorkflowEngine(config, '/tmp/project', 'override task', {
projectCwd: '/tmp/project',
provider: 'codex',
model: 'cli-model',
});
@ -90,6 +91,7 @@ describe('WorkflowEngine agent overrides', () => {
mockDetectMatchedRuleSequence([{ index: 0, method: 'phase1_tag' }]);
const engine = new WorkflowEngine(config, '/tmp/project', 'override task', {
projectCwd: '/tmp/project',
provider: 'codex',
model: 'cli-model',
});
@ -119,7 +121,7 @@ describe('WorkflowEngine agent overrides', () => {
]);
mockDetectMatchedRuleSequence([{ index: 0, method: 'phase1_tag' }]);
const engine = new WorkflowEngine(config, '/tmp/project', 'step task');
const engine = new WorkflowEngine(config, '/tmp/project', 'step task', { projectCwd: '/tmp/project' });
await engine.run();
const options = vi.mocked(runAgent).mock.calls[0][2];

View File

@ -59,7 +59,7 @@ describe('WorkflowEngine Integration: Blocked Handling', () => {
it('should abort when blocked and no onUserInput callback', async () => {
const config = buildDefaultWorkflowConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', status: 'blocked', content: 'Need clarification' }),
@ -84,7 +84,7 @@ describe('WorkflowEngine Integration: Blocked Handling', () => {
it('should abort when blocked and onUserInput returns null', async () => {
const config = buildDefaultWorkflowConfig();
const onUserInput = vi.fn().mockResolvedValue(null);
const engine = new WorkflowEngine(config, tmpDir, 'test task', { onUserInput });
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir, onUserInput });
mockRunAgentSequence([
makeResponse({ agent: 'plan', status: 'blocked', content: 'Need info' }),
@ -103,7 +103,7 @@ describe('WorkflowEngine Integration: Blocked Handling', () => {
it('should continue when blocked and onUserInput provides input', async () => {
const config = buildDefaultWorkflowConfig();
const onUserInput = vi.fn().mockResolvedValueOnce('User provided clarification');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { onUserInput });
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir, onUserInput });
mockRunAgentSequence([
// First: plan is blocked

View File

@ -68,7 +68,7 @@ describe('WorkflowEngine Integration: Error Handling', () => {
describe('No rule matched', () => {
it('should abort when detectMatchedRule returns undefined', async () => {
const config = buildDefaultWorkflowConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Unclear output' }),
@ -94,7 +94,7 @@ describe('WorkflowEngine Integration: Error Handling', () => {
describe('runAgent throws', () => {
it('should abort when runAgent throws an error', async () => {
const config = buildDefaultWorkflowConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
vi.mocked(runAgent).mockRejectedValueOnce(new Error('API connection failed'));
@ -126,7 +126,7 @@ describe('WorkflowEngine Integration: Error Handling', () => {
],
});
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
for (let i = 0; i < 5; i++) {
vi.mocked(runAgent).mockResolvedValueOnce(
@ -156,7 +156,7 @@ describe('WorkflowEngine Integration: Error Handling', () => {
describe('Iteration limit', () => {
it('should abort when max iterations reached without onIterationLimit callback', async () => {
const config = buildDefaultWorkflowConfig({ maxIterations: 2 });
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Plan done' }),
@ -190,6 +190,7 @@ describe('WorkflowEngine Integration: Error Handling', () => {
const onIterationLimit = vi.fn().mockResolvedValueOnce(10);
const engine = new WorkflowEngine(config, tmpDir, 'test task', {
projectCwd: tmpDir,
onIterationLimit,
});

View File

@ -71,7 +71,7 @@ describe('WorkflowEngine Integration: Happy Path', () => {
describe('Happy path', () => {
it('should complete: plan → implement → ai_review → reviewers(all approved) → supervise → COMPLETE', async () => {
const config = buildDefaultWorkflowConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Plan complete' }),
@ -110,7 +110,7 @@ describe('WorkflowEngine Integration: Happy Path', () => {
describe('Review reject and fix loop', () => {
it('should handle: reviewers(needs_fix) → fix → reviewers(all approved) → supervise → COMPLETE', async () => {
const config = buildDefaultWorkflowConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Plan done' }),
@ -156,7 +156,7 @@ describe('WorkflowEngine Integration: Happy Path', () => {
describe('AI review reject and fix', () => {
it('should handle: ai_review(issues) → ai_fix → reviewers → supervise → COMPLETE', async () => {
const config = buildDefaultWorkflowConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Plan done' }),
@ -193,7 +193,7 @@ describe('WorkflowEngine Integration: Happy Path', () => {
describe('ABORT transition', () => {
it('should abort when step transitions to ABORT', async () => {
const config = buildDefaultWorkflowConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Requirements unclear' }),
@ -220,7 +220,7 @@ describe('WorkflowEngine Integration: Happy Path', () => {
describe('Event emissions', () => {
it('should emit step:start and step:complete for each step', async () => {
const config = buildDefaultWorkflowConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Plan' }),
@ -267,7 +267,7 @@ describe('WorkflowEngine Integration: Happy Path', () => {
}),
],
};
const engine = new WorkflowEngine(simpleConfig, tmpDir, 'test task');
const engine = new WorkflowEngine(simpleConfig, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Plan done' }),
@ -290,7 +290,7 @@ describe('WorkflowEngine Integration: Happy Path', () => {
it('should pass empty instruction to step:start for parallel steps', async () => {
const config = buildDefaultWorkflowConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Plan' }),
@ -328,7 +328,7 @@ describe('WorkflowEngine Integration: Happy Path', () => {
it('should emit iteration:limit when max iterations reached', async () => {
const config = buildDefaultWorkflowConfig({ maxIterations: 1 });
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Plan' }),
@ -352,7 +352,7 @@ describe('WorkflowEngine Integration: Happy Path', () => {
describe('Step output tracking', () => {
it('should store outputs for all executed steps', async () => {
const config = buildDefaultWorkflowConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Plan output' }),
@ -390,7 +390,7 @@ describe('WorkflowEngine Integration: Happy Path', () => {
const config = buildDefaultWorkflowConfig({ initialStep: 'nonexistent' });
expect(() => {
new WorkflowEngine(config, tmpDir, 'test task');
new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
}).toThrow('Unknown step: nonexistent');
});
@ -407,7 +407,7 @@ describe('WorkflowEngine Integration: Happy Path', () => {
};
expect(() => {
new WorkflowEngine(config, tmpDir, 'test task');
new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
}).toThrow('nonexistent_step');
});
});

View File

@ -60,7 +60,7 @@ describe('WorkflowEngine Integration: Parallel Step Aggregation', () => {
it('should aggregate sub-step outputs with ## headers and --- separators', async () => {
const config = buildDefaultWorkflowConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Plan done' }),
@ -97,7 +97,7 @@ describe('WorkflowEngine Integration: Parallel Step Aggregation', () => {
it('should store individual sub-step outputs in stepOutputs', async () => {
const config = buildDefaultWorkflowConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Plan' }),
@ -129,7 +129,7 @@ describe('WorkflowEngine Integration: Parallel Step Aggregation', () => {
it('should execute sub-steps concurrently (both runAgent calls happen)', async () => {
const config = buildDefaultWorkflowConfig();
const engine = new WorkflowEngine(config, tmpDir, 'test task');
const engine = new WorkflowEngine(config, tmpDir, 'test task', { projectCwd: tmpDir });
mockRunAgentSequence([
makeResponse({ agent: 'plan', content: 'Plan' }),

View File

@ -20,11 +20,12 @@ vi.mock('node:os', async () => {
});
// Import after mocks are set up
const { loadGlobalConfig, saveGlobalConfig } = await import('../config/globalConfig.js');
const { loadGlobalConfig, saveGlobalConfig, invalidateGlobalConfigCache } = await import('../config/globalConfig.js');
const { getGlobalConfigPath } = await import('../config/paths.js');
describe('loadGlobalConfig', () => {
beforeEach(() => {
invalidateGlobalConfigCache();
mkdirSync(testHomeDir, { recursive: true });
});
@ -47,12 +48,20 @@ describe('loadGlobalConfig', () => {
expect(config.pipeline).toBeUndefined();
});
it('should return a fresh copy each time (no shared reference)', () => {
it('should return the same cached object on subsequent calls', () => {
const config1 = loadGlobalConfig();
const config2 = loadGlobalConfig();
config1.trustedDirectories.push('/tmp/test');
expect(config2.trustedDirectories).toEqual([]);
expect(config1).toBe(config2);
});
it('should return a fresh object after cache invalidation', () => {
const config1 = loadGlobalConfig();
invalidateGlobalConfigCache();
const config2 = loadGlobalConfig();
expect(config1).not.toBe(config2);
expect(config1).toEqual(config2);
});
it('should load from config.yaml when it exists', () => {
@ -105,10 +114,41 @@ describe('loadGlobalConfig', () => {
commitMessageTemplate: 'feat: {title} (#{issue})',
};
saveGlobalConfig(config);
invalidateGlobalConfigCache();
const reloaded = loadGlobalConfig();
expect(reloaded.pipeline).toBeDefined();
expect(reloaded.pipeline!.defaultBranchPrefix).toBe('takt/');
expect(reloaded.pipeline!.commitMessageTemplate).toBe('feat: {title} (#{issue})');
});
it('should read from cache without hitting disk on second call', () => {
const taktDir = join(testHomeDir, '.takt');
mkdirSync(taktDir, { recursive: true });
writeFileSync(
getGlobalConfigPath(),
'language: ja\nprovider: codex\n',
'utf-8',
);
const config1 = loadGlobalConfig();
expect(config1.language).toBe('ja');
// Overwrite file on disk - cached result should still be returned
writeFileSync(
getGlobalConfigPath(),
'language: en\nprovider: claude\n',
'utf-8',
);
const config2 = loadGlobalConfig();
expect(config2.language).toBe('ja');
expect(config2).toBe(config1);
// After invalidation, the new file content is read
invalidateGlobalConfigCache();
const config3 = loadGlobalConfig();
expect(config3.language).toBe('en');
expect(config3).not.toBe(config1);
});
});

View File

@ -27,6 +27,8 @@ vi.mock('../config/paths.js', () => ({
vi.mock('../utils/ui.js', () => ({
info: vi.fn(),
error: vi.fn(),
blankLine: vi.fn(),
StreamDisplay: vi.fn().mockImplementation(() => ({
createHandler: vi.fn(() => vi.fn()),
flush: vi.fn(),

View File

@ -49,6 +49,7 @@ vi.mock('../utils/ui.js', () => ({
error: vi.fn(),
success: vi.fn(),
status: vi.fn(),
blankLine: vi.fn(),
StreamDisplay: vi.fn().mockImplementation(() => ({
createHandler: () => vi.fn(),
flush: vi.fn(),

View File

@ -168,6 +168,7 @@ describe('Workflow Engine IT: Happy Path', () => {
const config = buildSimpleWorkflow(agentPaths);
const engine = new WorkflowEngine(config, testDir, 'Test task', {
projectCwd: testDir,
provider: 'mock',
});
@ -184,6 +185,7 @@ describe('Workflow Engine IT: Happy Path', () => {
const config = buildSimpleWorkflow(agentPaths);
const engine = new WorkflowEngine(config, testDir, 'Vague task', {
projectCwd: testDir,
provider: 'mock',
});
@ -226,6 +228,7 @@ describe('Workflow Engine IT: Fix Loop', () => {
const config = buildLoopWorkflow(agentPaths);
const engine = new WorkflowEngine(config, testDir, 'Task needing fix', {
projectCwd: testDir,
provider: 'mock',
});
@ -245,6 +248,7 @@ describe('Workflow Engine IT: Fix Loop', () => {
const config = buildLoopWorkflow(agentPaths);
const engine = new WorkflowEngine(config, testDir, 'Unfixable task', {
projectCwd: testDir,
provider: 'mock',
});
@ -282,6 +286,7 @@ describe('Workflow Engine IT: Max Iterations', () => {
config.maxIterations = 5;
const engine = new WorkflowEngine(config, testDir, 'Looping task', {
projectCwd: testDir,
provider: 'mock',
});
@ -317,6 +322,7 @@ describe('Workflow Engine IT: Step Output Tracking', () => {
const config = buildSimpleWorkflow(agentPaths);
const engine = new WorkflowEngine(config, testDir, 'Track outputs', {
projectCwd: testDir,
provider: 'mock',
});

View File

@ -49,6 +49,7 @@ vi.mock('../utils/ui.js', () => ({
error: vi.fn(),
success: vi.fn(),
status: vi.fn(),
blankLine: vi.fn(),
}));
// Mock debug logger
@ -146,13 +147,13 @@ describe('executePipeline', () => {
});
expect(exitCode).toBe(0);
expect(mockExecuteTask).toHaveBeenCalledWith(
'Fix the bug',
'/tmp/test',
'default',
'/tmp/test',
undefined,
);
expect(mockExecuteTask).toHaveBeenCalledWith({
task: 'Fix the bug',
cwd: '/tmp/test',
workflowIdentifier: 'default',
projectCwd: '/tmp/test',
agentOverrides: undefined,
});
});
it('passes provider/model overrides to task execution', async () => {
@ -168,13 +169,13 @@ describe('executePipeline', () => {
});
expect(exitCode).toBe(0);
expect(mockExecuteTask).toHaveBeenCalledWith(
'Fix the bug',
'/tmp/test',
'default',
'/tmp/test',
{ provider: 'codex', model: 'codex-model' },
);
expect(mockExecuteTask).toHaveBeenCalledWith({
task: 'Fix the bug',
cwd: '/tmp/test',
workflowIdentifier: 'default',
projectCwd: '/tmp/test',
agentOverrides: { provider: 'codex', model: 'codex-model' },
});
});
it('should return exit code 5 when PR creation fails', async () => {
@ -225,13 +226,13 @@ describe('executePipeline', () => {
});
expect(exitCode).toBe(0);
expect(mockExecuteTask).toHaveBeenCalledWith(
'From --task flag',
'/tmp/test',
'magi',
'/tmp/test',
undefined,
);
expect(mockExecuteTask).toHaveBeenCalledWith({
task: 'From --task flag',
cwd: '/tmp/test',
workflowIdentifier: 'magi',
projectCwd: '/tmp/test',
agentOverrides: undefined,
});
});
describe('PipelineConfig template expansion', () => {
@ -385,13 +386,13 @@ describe('executePipeline', () => {
});
expect(exitCode).toBe(0);
expect(mockExecuteTask).toHaveBeenCalledWith(
'Fix the bug',
'/tmp/test',
'default',
'/tmp/test',
undefined,
);
expect(mockExecuteTask).toHaveBeenCalledWith({
task: 'Fix the bug',
cwd: '/tmp/test',
workflowIdentifier: 'default',
projectCwd: '/tmp/test',
agentOverrides: undefined,
});
// No git operations should have been called
const gitCalls = mockExecFileSync.mock.calls.filter(

View File

@ -34,6 +34,7 @@ vi.mock('../utils/ui.js', () => ({
error: vi.fn(),
success: vi.fn(),
status: vi.fn(),
blankLine: vi.fn(),
}));
vi.mock('../utils/debug.js', () => ({

View File

@ -14,7 +14,7 @@ import { describe, it, expect } from 'vitest';
import { loadWorkflow } from '../config/loader.js';
describe('expert workflow parallel structure', () => {
const workflow = loadWorkflow('expert');
const workflow = loadWorkflow('expert', process.cwd());
it('should load successfully', () => {
expect(workflow).not.toBeNull();
@ -95,7 +95,7 @@ describe('expert workflow parallel structure', () => {
});
describe('expert-cqrs workflow parallel structure', () => {
const workflow = loadWorkflow('expert-cqrs');
const workflow = loadWorkflow('expert-cqrs', process.cwd());
it('should load successfully', () => {
expect(workflow).not.toBeNull();

View File

@ -75,7 +75,7 @@ describe('loadWorkflowByIdentifier', () => {
});
it('should load workflow by name (builtin)', () => {
const workflow = loadWorkflowByIdentifier('default');
const workflow = loadWorkflowByIdentifier('default', process.cwd());
expect(workflow).not.toBeNull();
expect(workflow!.name).toBe('default');
});
@ -108,7 +108,7 @@ describe('loadWorkflowByIdentifier', () => {
});
it('should return null for non-existent name', () => {
const workflow = loadWorkflowByIdentifier('non-existent-workflow-xyz');
const workflow = loadWorkflowByIdentifier('non-existent-workflow-xyz', process.cwd());
expect(workflow).toBeNull();
});

View File

@ -15,6 +15,7 @@ import {
type PermissionMode,
} from '@anthropic-ai/claude-agent-sdk';
import { createLogger } from '../utils/debug.js';
import { getErrorMessage } from '../utils/error.js';
import {
generateQueryId,
registerQuery,
@ -220,7 +221,7 @@ function handleQueryError(
};
}
const errorMessage = error instanceof Error ? error.message : String(error);
const errorMessage = getErrorMessage(error);
if (hasResultMessage && success) {
log.info('Claude query completed with post-completion error (ignoring)', {

View File

@ -48,6 +48,7 @@ import { autoCommitAndPush } from './task/autoCommit.js';
import { summarizeTaskName } from './task/summarize.js';
import { DEFAULT_WORKFLOW_NAME } from './constants.js';
import { checkForUpdates } from './utils/updateNotifier.js';
import { getErrorMessage } from './utils/error.js';
import { resolveIssueTask, isIssueReference } from './github/issue.js';
import { createPullRequest, buildPrBody } from './github/pr.js';
import type { TaskExecutionOptions } from './commands/taskExecution.js';
@ -137,7 +138,13 @@ async function selectAndExecuteTask(
);
log.info('Starting task execution', { workflow: workflowIdentifier, worktree: isWorktree });
const taskSuccess = await executeTask(task, execCwd, workflowIdentifier, cwd, agentOverrides);
const taskSuccess = await executeTask({
task,
cwd: execCwd,
workflowIdentifier,
projectCwd: cwd,
agentOverrides,
});
if (taskSuccess && isWorktree) {
const commitResult = autoCommitAndPush(execCwd, task, cwd);
@ -449,7 +456,7 @@ program
const resolvedTask = resolveIssueTask(`#${issueFromOption}`);
await selectAndExecuteTask(resolvedCwd, resolvedTask, selectOptions, agentOverrides);
} catch (e) {
error(e instanceof Error ? e.message : String(e));
error(getErrorMessage(e));
process.exit(1);
}
return;
@ -463,7 +470,7 @@ program
info('Fetching GitHub Issue...');
resolvedTask = resolveIssueTask(task);
} catch (e) {
error(e instanceof Error ? e.message : String(e));
error(getErrorMessage(e));
process.exit(1);
}
}

View File

@ -8,6 +8,7 @@ import { Codex } from '@openai/codex-sdk';
import type { AgentResponse, Status } from '../models/types.js';
import type { StreamCallback } from '../claude/process.js';
import { createLogger } from '../utils/debug.js';
import { getErrorMessage } from '../utils/error.js';
const log = createLogger('codex-sdk');
@ -486,7 +487,7 @@ export async function callCodex(
sessionId: threadId,
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
const message = getErrorMessage(error);
emitResult(options.onStream, false, message, threadId);
return {

View File

@ -14,6 +14,7 @@ import { summarizeTaskName } from '../task/summarize.js';
import { loadGlobalConfig } from '../config/globalConfig.js';
import { getProvider, type ProviderType } from '../providers/index.js';
import { createLogger } from '../utils/debug.js';
import { getErrorMessage } from '../utils/error.js';
import { listWorkflows } from '../config/workflowLoader.js';
import { getCurrentWorkflow } from '../config/paths.js';
import { interactiveMode } from './interactive.js';
@ -87,7 +88,7 @@ export async function addTask(cwd: string, task?: string): Promise<void> {
try {
taskContent = resolveIssueTask(task);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
const msg = getErrorMessage(e);
log.error('Failed to fetch GitHub Issue', { task, error: msg });
info(`Failed to fetch issue ${task}: ${msg}`);
return;

View File

@ -75,14 +75,9 @@ function getPermissionModeOptions(currentMode: PermissionMode): {
*/
export function getCurrentPermissionMode(cwd: string): PermissionMode {
const config = loadProjectConfig(cwd);
// Support both old sacrificeMode boolean and new permissionMode string
if (config.permissionMode) {
return config.permissionMode as PermissionMode;
}
// Legacy: convert sacrificeMode boolean to new format
if (config.sacrificeMode) {
return 'sacrifice-my-pc';
}
return 'default';
}
@ -91,8 +86,6 @@ export function getCurrentPermissionMode(cwd: string): PermissionMode {
*/
export function setPermissionMode(cwd: string, mode: PermissionMode): void {
updateProjectConfig(cwd, 'permissionMode', mode);
// @deprecated TODO: Remove in v1.0 - legacy sacrificeMode for backwards compatibility
updateProjectConfig(cwd, 'sacrificeMode', mode === 'sacrifice-my-pc');
}
/**

View File

@ -9,7 +9,7 @@ import { existsSync, readdirSync, statSync, readFileSync, writeFileSync, mkdirSy
import { join, dirname } from 'node:path';
import { getGlobalWorkflowsDir, getGlobalAgentsDir, getBuiltinWorkflowsDir, getBuiltinAgentsDir } from '../config/paths.js';
import { getLanguage } from '../config/globalConfig.js';
import { header, success, info, warn, error } from '../utils/ui.js';
import { header, success, info, warn, error, blankLine } from '../utils/ui.js';
/**
* Eject a builtin workflow to user space for customization.
@ -90,7 +90,7 @@ function listAvailableBuiltins(builtinWorkflowsDir: string): void {
}
info('Available builtin workflows:');
console.log();
blankLine();
for (const entry of readdirSync(builtinWorkflowsDir).sort()) {
if (!entry.endsWith('.yaml') && !entry.endsWith('.yml')) continue;
@ -100,7 +100,7 @@ function listAvailableBuiltins(builtinWorkflowsDir: string): void {
info(` ${name}`);
}
console.log();
blankLine();
info('Usage: takt eject {name}');
}

View File

@ -17,7 +17,8 @@ import { isQuietMode } from '../cli.js';
import { loadAgentSessions, updateAgentSession } from '../config/paths.js';
import { getProvider, type ProviderType } from '../providers/index.js';
import { createLogger } from '../utils/debug.js';
import { info, StreamDisplay } from '../utils/ui.js';
import { getErrorMessage } from '../utils/error.js';
import { info, error, blankLine, StreamDisplay } from '../utils/ui.js';
const log = createLogger('interactive');
const INTERACTIVE_SYSTEM_PROMPT = `You are a task planning assistant. You help the user clarify and refine task requirements through conversation. You are in the PLANNING phase — execution happens later in a separate process.
@ -148,7 +149,7 @@ export async function interactiveMode(cwd: string, initialInput?: string): Promi
if (sessionId) {
info('Resuming previous session');
}
console.log();
blankLine();
/** Call AI with automatic retry on session error (stale/invalid session ID). */
async function callAIWithRetry(prompt: string): Promise<CallAIResult | null> {
@ -173,10 +174,10 @@ export async function interactiveMode(cwd: string, initialInput?: string): Promi
}
return result;
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
const msg = getErrorMessage(e);
log.error('AI call failed', { error: msg });
console.log(chalk.red(`Error: ${msg}`));
console.log();
error(msg);
blankLine();
return null;
}
}
@ -189,7 +190,7 @@ export async function interactiveMode(cwd: string, initialInput?: string): Promi
const result = await callAIWithRetry(initialInput);
if (result) {
history.push({ role: 'assistant', content: result.content });
console.log();
blankLine();
} else {
history.pop();
}
@ -200,7 +201,7 @@ export async function interactiveMode(cwd: string, initialInput?: string): Promi
// EOF (Ctrl+D)
if (input === null) {
console.log();
blankLine();
info('Cancelled');
return { confirmed: false, task: '' };
}
@ -238,7 +239,7 @@ export async function interactiveMode(cwd: string, initialInput?: string): Promi
const result = await callAIWithRetry(trimmed);
if (result) {
history.push({ role: 'assistant', content: result.content });
console.log();
blankLine();
} else {
history.pop();
}

View File

@ -22,8 +22,9 @@ import {
} from '../task/branchList.js';
import { autoCommitAndPush } from '../task/autoCommit.js';
import { selectOption, confirm, promptInput } from '../prompt/index.js';
import { info, success, error as logError, warn } from '../utils/ui.js';
import { info, success, error as logError, warn, header, blankLine } from '../utils/ui.js';
import { createLogger } from '../utils/debug.js';
import { getErrorMessage } from '../utils/error.js';
import { executeTask, type TaskExecutionOptions } from './taskExecution.js';
import { listWorkflows } from '../config/workflowLoader.js';
import { getCurrentWorkflow } from '../config/paths.js';
@ -80,12 +81,11 @@ async function showDiffAndPromptAction(
defaultBranch: string,
item: BranchListItem,
): Promise<ListAction | null> {
console.log();
console.log(chalk.bold.cyan(`=== ${item.info.branch} ===`));
header(item.info.branch);
if (item.originalInstruction) {
console.log(chalk.dim(` ${item.originalInstruction}`));
}
console.log();
blankLine();
// Show diff stat
try {
@ -132,7 +132,7 @@ export function tryMergeBranch(projectDir: string, item: BranchListItem): boolea
log.info('Try-merge (squash) completed', { branch });
return true;
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
const msg = getErrorMessage(err);
logError(`Squash merge failed: ${msg}`);
logError('You may need to resolve conflicts manually.');
log.error('Try-merge (squash) failed', { branch, error: msg });
@ -180,7 +180,7 @@ export function mergeBranch(projectDir: string, item: BranchListItem): boolean {
log.info('Branch merged & cleaned up', { branch, alreadyMerged });
return true;
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
const msg = getErrorMessage(err);
logError(`Merge failed: ${msg}`);
logError('You may need to resolve conflicts manually.');
log.error('Merge & cleanup failed', { branch, error: msg });
@ -210,7 +210,7 @@ export function deleteBranch(projectDir: string, item: BranchListItem): boolean
log.info('Branch deleted', { branch });
return true;
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
const msg = getErrorMessage(err);
logError(`Delete failed: ${msg}`);
log.error('Delete failed', { branch, error: msg });
return false;
@ -324,7 +324,13 @@ export async function instructBranch(
: instruction;
// 5. Execute task on temp clone
const taskSuccess = await executeTask(fullInstruction, clone.path, selectedWorkflow, projectDir, options);
const taskSuccess = await executeTask({
task: fullInstruction,
cwd: clone.path,
workflowIdentifier: selectedWorkflow,
projectCwd: projectDir,
agentOverrides: options,
});
// 6. Auto-commit+push if successful
if (taskSuccess) {

View File

@ -12,10 +12,12 @@
import { execFileSync } from 'node:child_process';
import { fetchIssue, formatIssueAsTask, checkGhCli, type GitHubIssue } from '../github/issue.js';
import { createPullRequest, pushBranch, buildPrBody } from '../github/pr.js';
import { stageAndCommit } from '../task/git.js';
import { executeTask, type TaskExecutionOptions } from './taskExecution.js';
import { loadGlobalConfig } from '../config/globalConfig.js';
import { info, error, success, status } from '../utils/ui.js';
import { info, error, success, status, blankLine } from '../utils/ui.js';
import { createLogger } from '../utils/debug.js';
import { getErrorMessage } from '../utils/error.js';
import type { PipelineConfig } from '../models/types.js';
import {
EXIT_ISSUE_FETCH_FAILED,
@ -74,29 +76,6 @@ function createBranch(cwd: string, branch: string): void {
});
}
/** Stage all changes and create a commit */
function commitChanges(cwd: string, message: string): string | undefined {
execFileSync('git', ['add', '-A'], { cwd, stdio: 'pipe' });
const statusOutput = execFileSync('git', ['status', '--porcelain'], {
cwd,
stdio: 'pipe',
encoding: 'utf-8',
});
if (!statusOutput.trim()) {
return undefined;
}
execFileSync('git', ['commit', '-m', message], { cwd, stdio: 'pipe' });
return execFileSync('git', ['rev-parse', '--short', 'HEAD'], {
cwd,
stdio: 'pipe',
encoding: 'utf-8',
}).trim();
}
/** Build commit message from template or defaults */
function buildCommitMessage(
pipelineConfig: PipelineConfig | undefined,
@ -159,7 +138,7 @@ export async function executePipeline(options: PipelineExecutionOptions): Promis
task = formatIssueAsTask(issue);
success(`Issue #${options.issueNumber} fetched: "${issue.title}"`);
} catch (err) {
error(`Failed to fetch issue #${options.issueNumber}: ${err instanceof Error ? err.message : String(err)}`);
error(`Failed to fetch issue #${options.issueNumber}: ${getErrorMessage(err)}`);
return EXIT_ISSUE_FETCH_FAILED;
}
} else if (options.task) {
@ -178,7 +157,7 @@ export async function executePipeline(options: PipelineExecutionOptions): Promis
createBranch(cwd, branch);
success(`Branch created: ${branch}`);
} catch (err) {
error(`Failed to create branch: ${err instanceof Error ? err.message : String(err)}`);
error(`Failed to create branch: ${getErrorMessage(err)}`);
return EXIT_GIT_OPERATION_FAILED;
}
}
@ -191,7 +170,13 @@ export async function executePipeline(options: PipelineExecutionOptions): Promis
? { provider: options.provider, model: options.model }
: undefined;
const taskSuccess = await executeTask(task, cwd, workflow, cwd, agentOverrides);
const taskSuccess = await executeTask({
task,
cwd,
workflowIdentifier: workflow,
projectCwd: cwd,
agentOverrides,
});
if (!taskSuccess) {
error(`Workflow '${workflow}' failed`);
@ -205,7 +190,7 @@ export async function executePipeline(options: PipelineExecutionOptions): Promis
info('Committing changes...');
try {
const commitHash = commitChanges(cwd, commitMessage);
const commitHash = stageAndCommit(cwd, commitMessage);
if (commitHash) {
success(`Changes committed: ${commitHash}`);
} else {
@ -216,7 +201,7 @@ export async function executePipeline(options: PipelineExecutionOptions): Promis
pushBranch(cwd, branch);
success(`Pushed to origin/${branch}`);
} catch (err) {
error(`Git operation failed: ${err instanceof Error ? err.message : String(err)}`);
error(`Git operation failed: ${getErrorMessage(err)}`);
return EXIT_GIT_OPERATION_FAILED;
}
}
@ -248,7 +233,7 @@ export async function executePipeline(options: PipelineExecutionOptions): Promis
}
// --- Summary ---
console.log();
blankLine();
status('Issue', issue ? `#${issue.number} "${issue.title}"` : 'N/A');
status('Branch', branch ?? '(current)');
status('Workflow', workflow);

View File

@ -13,6 +13,7 @@ import {
error,
success,
status,
blankLine,
} from '../utils/ui.js';
import { createLogger } from '../utils/debug.js';
import { getErrorMessage } from '../utils/error.js';
@ -27,24 +28,25 @@ export interface TaskExecutionOptions {
model?: string;
}
export interface ExecuteTaskOptions {
/** Task content */
task: string;
/** Working directory (may be a clone path) */
cwd: string;
/** Workflow name or path (auto-detected by isWorkflowPath) */
workflowIdentifier: string;
/** Project root (where .takt/ lives) */
projectCwd: string;
/** Agent provider/model overrides */
agentOverrides?: TaskExecutionOptions;
}
/**
* Execute a single task with workflow.
*
* @param task - Task content
* @param cwd - Working directory (may be a clone path)
* @param workflowIdentifier - Workflow name or path (auto-detected by isWorkflowPath)
* @param projectCwd - Project root (where .takt/ lives). Defaults to cwd.
*/
export async function executeTask(
task: string,
cwd: string,
workflowIdentifier: string = DEFAULT_WORKFLOW_NAME,
projectCwd?: string,
options?: TaskExecutionOptions
): Promise<boolean> {
const effectiveProjectCwd = projectCwd || cwd;
const workflowConfig = loadWorkflowByIdentifier(workflowIdentifier, effectiveProjectCwd);
export async function executeTask(options: ExecuteTaskOptions): Promise<boolean> {
const { task, cwd, workflowIdentifier, projectCwd, agentOverrides } = options;
const workflowConfig = loadWorkflowByIdentifier(workflowIdentifier, projectCwd);
if (!workflowConfig) {
if (isWorkflowPath(workflowIdentifier)) {
@ -66,8 +68,8 @@ export async function executeTask(
const result = await executeWorkflow(workflowConfig, task, cwd, {
projectCwd,
language: globalConfig.language,
provider: options?.provider,
model: options?.model,
provider: agentOverrides?.provider,
model: agentOverrides?.model,
});
return result.success;
}
@ -94,7 +96,13 @@ export async function executeAndCompleteTask(
const { execCwd, execWorkflow, isWorktree } = await resolveTaskExecution(task, cwd, workflowName);
// cwd is always the project root; pass it as projectCwd so reports/sessions go there
const taskSuccess = await executeTask(task.content, execCwd, execWorkflow, cwd, options);
const taskSuccess = await executeTask({
task: task.content,
cwd: execCwd,
workflowIdentifier: execWorkflow,
projectCwd: cwd,
agentOverrides: options,
});
const completedAt = new Date().toISOString();
if (taskSuccess && isWorktree) {
@ -169,9 +177,9 @@ export async function runAllTasks(
let failCount = 0;
while (task) {
console.log();
blankLine();
info(`=== Task: ${task.name} ===`);
console.log();
blankLine();
const taskSuccess = await executeAndCompleteTask(task, taskRunner, cwd, workflowName, options);
@ -186,7 +194,7 @@ export async function runAllTasks(
}
const totalCount = successCount + failCount;
console.log();
blankLine();
header('Tasks Summary');
status('Total', String(totalCount));
status('Success', String(successCount), successCount === totalCount ? 'green' : undefined);

View File

@ -13,6 +13,7 @@ import {
info,
success,
status,
blankLine,
} from '../utils/ui.js';
import { executeAndCompleteTask } from './taskExecution.js';
import { DEFAULT_WORKFLOW_NAME } from '../constants.js';
@ -35,11 +36,11 @@ export async function watchTasks(cwd: string, options?: TaskExecutionOptions): P
info(`Workflow: ${workflowName}`);
info(`Watching: ${taskRunner.getTasksDir()}`);
info('Waiting for tasks... (Ctrl+C to stop)');
console.log();
blankLine();
// Graceful shutdown on SIGINT
const onSigInt = () => {
console.log();
blankLine();
info('Stopping watch...');
watcher.stop();
};
@ -48,9 +49,9 @@ export async function watchTasks(cwd: string, options?: TaskExecutionOptions): P
try {
await watcher.watch(async (task: TaskInfo) => {
taskCount++;
console.log();
blankLine();
info(`=== Task ${taskCount}: ${task.name} ===`);
console.log();
blankLine();
const taskSuccess = await executeAndCompleteTask(task, taskRunner, cwd, workflowName, options);
@ -60,7 +61,7 @@ export async function watchTasks(cwd: string, options?: TaskExecutionOptions): P
failCount++;
}
console.log();
blankLine();
info('Waiting for tasks... (Ctrl+C to stop)');
});
} finally {
@ -69,7 +70,7 @@ export async function watchTasks(cwd: string, options?: TaskExecutionOptions): P
// Summary on exit
if (taskCount > 0) {
console.log();
blankLine();
header('Watch Summary');
status('Total', String(taskCount));
status('Success', String(successCount), successCount === taskCount ? 'green' : undefined);

View File

@ -22,6 +22,7 @@ import {
error,
success,
status,
blankLine,
StreamDisplay,
} from '../utils/ui.js';
import {
@ -71,8 +72,8 @@ export interface WorkflowExecutionResult {
export interface WorkflowExecutionOptions {
/** Header prefix for display */
headerPrefix?: string;
/** Project root directory (where .takt/ lives). Defaults to cwd. */
projectCwd?: string;
/** Project root directory (where .takt/ lives). */
projectCwd: string;
/** Language for instruction metadata */
language?: Language;
provider?: ProviderType;
@ -86,14 +87,14 @@ export async function executeWorkflow(
workflowConfig: WorkflowConfig,
task: string,
cwd: string,
options: WorkflowExecutionOptions = {}
options: WorkflowExecutionOptions
): Promise<WorkflowExecutionResult> {
const {
headerPrefix = 'Running Workflow:',
} = options;
// projectCwd is where .takt/ lives (project root, not the clone)
const projectCwd = options.projectCwd ?? cwd;
const projectCwd = options.projectCwd;
// Always continue from previous sessions (use /clear to reset)
log.debug('Continuing session (use /clear to reset)');
@ -144,7 +145,7 @@ export async function executeWorkflow(
displayRef.current = null;
}
console.log();
blankLine();
warn(
`最大イテレーションに到達しました (${request.currentIteration}/${request.maxIterations})`
);
@ -230,7 +231,7 @@ export async function executeWorkflow(
displayRef.current.flush();
displayRef.current = null;
}
console.log();
blankLine();
if (response.matchedRuleIndex != null && step.rules) {
const rule = step.rules[response.matchedRuleIndex];
@ -334,11 +335,11 @@ export async function executeWorkflow(
const onSigInt = () => {
sigintCount++;
if (sigintCount === 1) {
console.log();
blankLine();
warn('Ctrl+C: ワークフローを中断しています...');
engine.abort();
} else {
console.log();
blankLine();
error('Ctrl+C: 強制終了します');
process.exit(EXIT_SIGINT);
}

View File

@ -23,16 +23,29 @@ function createDefaultGlobalConfig(): GlobalConfig {
};
}
/** Module-level cache for global configuration */
let cachedConfig: GlobalConfig | null = null;
/** Invalidate the cached global configuration (call after mutation) */
export function invalidateGlobalConfigCache(): void {
cachedConfig = null;
}
/** Load global configuration */
export function loadGlobalConfig(): GlobalConfig {
if (cachedConfig !== null) {
return cachedConfig;
}
const configPath = getGlobalConfigPath();
if (!existsSync(configPath)) {
return createDefaultGlobalConfig();
const defaultConfig = createDefaultGlobalConfig();
cachedConfig = defaultConfig;
return defaultConfig;
}
const content = readFileSync(configPath, 'utf-8');
const raw = parseYaml(content);
const parsed = GlobalConfigSchema.parse(raw);
return {
const config: GlobalConfig = {
language: parsed.language,
trustedDirectories: parsed.trusted_directories,
defaultWorkflow: parsed.default_workflow,
@ -54,6 +67,8 @@ export function loadGlobalConfig(): GlobalConfig {
} : undefined,
minimalOutput: parsed.minimal_output,
};
cachedConfig = config;
return config;
}
/** Save global configuration */
@ -100,6 +115,7 @@ export function saveGlobalConfig(config: GlobalConfig): void {
raw.minimal_output = config.minimalOutput;
}
writeFileSync(configPath, stringifyYaml(raw), 'utf-8');
invalidateGlobalConfigCache();
}
/** Get list of disabled builtin names */

View File

@ -27,6 +27,7 @@ export {
export {
loadGlobalConfig,
saveGlobalConfig,
invalidateGlobalConfigCache,
addTrustedDirectory,
isDirectoryTrusted,
loadProjectDebugConfig,

View File

@ -28,8 +28,6 @@ export interface ProjectLocalConfig {
provider?: 'claude' | 'codex';
/** Permission mode setting */
permissionMode?: PermissionMode;
/** @deprecated Use permissionMode instead. Auto-approve all permissions in this project */
sacrificeMode?: boolean;
/** Verbose output mode */
verbose?: boolean;
/** Custom settings */

View File

@ -246,10 +246,10 @@ function loadWorkflowFromFile(filePath: string): WorkflowConfig {
/**
* Resolve a path that may be relative, absolute, or home-directory-relative.
* @param pathInput Path to resolve
* @param basePath Base directory for relative paths (defaults to cwd)
* @param basePath Base directory for relative paths
* @returns Absolute resolved path
*/
function resolvePath(pathInput: string, basePath: string = process.cwd()): string {
function resolvePath(pathInput: string, basePath: string): string {
// Home directory expansion
if (pathInput.startsWith('~')) {
const home = homedir();
@ -270,12 +270,12 @@ function resolvePath(pathInput: string, basePath: string = process.cwd()): strin
* Called internally by loadWorkflowByIdentifier when the identifier is detected as a path.
*
* @param filePath Path to workflow file (absolute, relative, or home-dir prefixed with ~)
* @param basePath Base directory for resolving relative paths (default: cwd)
* @param basePath Base directory for resolving relative paths
* @returns WorkflowConfig or null if file not found
*/
function loadWorkflowFromPath(
filePath: string,
basePath: string = process.cwd()
basePath: string
): WorkflowConfig | null {
const resolvedPath = resolvePath(filePath, basePath);
@ -295,11 +295,11 @@ function loadWorkflowFromPath(
* 3. Builtin workflows resources/global/{lang}/workflows/{name}.yaml
*
* @param name Workflow name (not a file path)
* @param projectCwd Project root directory (default: cwd, for project-local workflow resolution)
* @param projectCwd Project root directory (for project-local workflow resolution)
*/
export function loadWorkflow(
name: string,
projectCwd: string = process.cwd()
projectCwd: string
): WorkflowConfig | null {
// 1. Project-local workflow (.takt/workflows/{name}.yaml)
const projectWorkflowsDir = join(getProjectConfigDir(projectCwd), 'workflows');

View File

@ -6,6 +6,7 @@
import { execFileSync } from 'node:child_process';
import { createLogger } from '../utils/debug.js';
import { getErrorMessage } from '../utils/error.js';
import { checkGhCli, type GitHubIssue } from './issue.js';
const log = createLogger('github-pr');
@ -81,7 +82,7 @@ export function createPullRequest(cwd: string, options: CreatePrOptions): Create
return { success: true, url };
} catch (err) {
const errorMessage = err instanceof Error ? err.message : String(err);
const errorMessage = getErrorMessage(err);
log.error('PR creation failed', { error: errorMessage });
return { success: false, error: errorMessage };
}

View File

@ -9,6 +9,8 @@
import { execFileSync } from 'node:child_process';
import { createLogger } from '../utils/debug.js';
import { getErrorMessage } from '../utils/error.js';
import { stageAndCommit } from './git.js';
const log = createLogger('autoCommit');
@ -38,38 +40,14 @@ export function autoCommitAndPush(cloneCwd: string, taskName: string, projectDir
log.info('Auto-commit starting', { cwd: cloneCwd, taskName });
try {
// Stage all changes
execFileSync('git', ['add', '-A'], {
cwd: cloneCwd,
stdio: 'pipe',
});
const commitMessage = `takt: ${taskName}`;
const commitHash = stageAndCommit(cloneCwd, commitMessage);
// Check if there are staged changes
const statusOutput = execFileSync('git', ['status', '--porcelain'], {
cwd: cloneCwd,
stdio: 'pipe',
encoding: 'utf-8',
});
if (!statusOutput.trim()) {
if (!commitHash) {
log.info('No changes to commit');
return { success: true, message: 'No changes to commit' };
}
// Create commit (no co-author)
const commitMessage = `takt: ${taskName}`;
execFileSync('git', ['commit', '-m', commitMessage], {
cwd: cloneCwd,
stdio: 'pipe',
});
// Get the short commit hash
const commitHash = execFileSync('git', ['rev-parse', '--short', 'HEAD'], {
cwd: cloneCwd,
stdio: 'pipe',
encoding: 'utf-8',
}).trim();
log.info('Auto-commit created', { commitHash, message: commitMessage });
// Push directly to the main repo (origin was removed to isolate the clone)
@ -86,7 +64,7 @@ export function autoCommitAndPush(cloneCwd: string, taskName: string, projectDir
message: `Committed & pushed: ${commitHash} - ${commitMessage}`,
};
} catch (err) {
const errorMessage = err instanceof Error ? err.message : String(err);
const errorMessage = getErrorMessage(err);
log.error('Auto-commit failed', { error: errorMessage });
return {

31
src/task/git.ts Normal file
View File

@ -0,0 +1,31 @@
/**
* Shared git operations for task execution
*/
import { execFileSync } from 'node:child_process';
/**
* Stage all changes and create a commit.
* Returns the short commit hash if changes were committed, undefined if no changes.
*/
export function stageAndCommit(cwd: string, message: string): string | undefined {
execFileSync('git', ['add', '-A'], { cwd, stdio: 'pipe' });
const statusOutput = execFileSync('git', ['status', '--porcelain'], {
cwd,
stdio: 'pipe',
encoding: 'utf-8',
});
if (!statusOutput.trim()) {
return undefined;
}
execFileSync('git', ['commit', '-m', message], { cwd, stdio: 'pipe' });
return execFileSync('git', ['rev-parse', '--short', 'HEAD'], {
cwd,
stdio: 'pipe',
encoding: 'utf-8',
}).trim();
}

View File

@ -29,6 +29,11 @@ function shouldLog(level: LogLevel): boolean {
return LOG_PRIORITIES[level] >= LOG_PRIORITIES[currentLogLevel];
}
/** Print a blank line */
export function blankLine(): void {
console.log();
}
/** Log a debug message */
export function debug(message: string): void {
if (shouldLog('debug')) {

View File

@ -28,6 +28,7 @@ import {
incrementStepIteration,
} from './state-manager.js';
import { generateReportDir } from '../utils/session.js';
import { getErrorMessage } from '../utils/error.js';
import { createLogger } from '../utils/debug.js';
import { interruptAllQueries } from '../claude/query-manager.js';
@ -57,10 +58,10 @@ export class WorkflowEngine extends EventEmitter {
private reportDir: string;
private abortRequested = false;
constructor(config: WorkflowConfig, cwd: string, task: string, options: WorkflowEngineOptions = {}) {
constructor(config: WorkflowConfig, cwd: string, task: string, options: WorkflowEngineOptions) {
super();
this.config = config;
this.projectCwd = options.projectCwd ?? cwd;
this.projectCwd = options.projectCwd;
this.cwd = cwd;
this.task = task;
this.options = options;
@ -553,7 +554,7 @@ export class WorkflowEngine extends EventEmitter {
if (this.abortRequested) {
this.emit('workflow:abort', this.state, 'Workflow interrupted by user (SIGINT)');
} else {
const message = error instanceof Error ? error.message : String(error);
const message = getErrorMessage(error);
this.emit('workflow:abort', this.state, ERROR_MESSAGES.STEP_EXECUTION_FAILED(message));
}
break;

View File

@ -11,196 +11,16 @@
* and also used in Phase 3 (buildStatusJudgmentInstruction) as a dedicated follow-up.
*/
import type { WorkflowStep, WorkflowRule, AgentResponse, Language, ReportConfig, ReportObjectConfig } from '../models/types.js';
import type { WorkflowStep, Language, ReportConfig, ReportObjectConfig } from '../models/types.js';
import { hasTagBasedRules } from './rule-utils.js';
import type { InstructionContext } from './instruction-context.js';
import { buildExecutionMetadata, renderExecutionMetadata, METADATA_STRINGS } from './instruction-context.js';
import { generateStatusRulesFromRules } from './status-rules.js';
/**
* Context for building instruction from template.
*/
export interface InstructionContext {
/** The main task/prompt */
task: string;
/** Current iteration number (workflow-wide turn count) */
iteration: number;
/** Maximum iterations allowed */
maxIterations: number;
/** Current step's iteration number (how many times this step has been executed) */
stepIteration: number;
/** Working directory (agent work dir, may be a clone) */
cwd: string;
/** Project root directory (where .takt/ lives). Defaults to cwd. */
projectCwd?: string;
/** User inputs accumulated during workflow */
userInputs: string[];
/** Previous step output if available */
previousOutput?: AgentResponse;
/** Report directory path */
reportDir?: string;
/** Language for metadata rendering. Defaults to 'en'. */
language?: Language;
}
/** Execution environment metadata prepended to agent instructions */
export interface ExecutionMetadata {
/** The agent's working directory (may be a clone) */
readonly workingDirectory: string;
/** Language for metadata rendering */
readonly language: Language;
/** Whether file editing is allowed for this step (undefined = no prompt) */
readonly edit?: boolean;
}
/**
* Build execution metadata from instruction context and step config.
*
* Pure function: (InstructionContext, edit?) ExecutionMetadata.
*/
export function buildExecutionMetadata(context: InstructionContext, edit?: boolean): ExecutionMetadata {
return {
workingDirectory: context.cwd,
language: context.language ?? 'en',
edit,
};
}
/** Localized strings for rules-based status prompt */
const RULES_PROMPT_STRINGS = {
en: {
criteriaHeading: '## Decision Criteria',
headerNum: '#',
headerCondition: 'Condition',
headerTag: 'Tag',
outputHeading: '## Output Format',
outputInstruction: 'Output the tag corresponding to your decision:',
appendixHeading: '### Appendix Template',
appendixInstruction: 'When outputting `[{tag}]`, append the following:',
},
ja: {
criteriaHeading: '## 判定基準',
headerNum: '#',
headerCondition: '状況',
headerTag: 'タグ',
outputHeading: '## 出力フォーマット',
outputInstruction: '判定に対応するタグを出力してください:',
appendixHeading: '### 追加出力テンプレート',
appendixInstruction: '`[{tag}]` を出力する場合、以下を追記してください:',
},
} as const;
/**
* Generate status rules prompt from rules configuration.
* Creates a structured prompt that tells the agent which numbered tags to output.
*
* Example output for step "plan" with 3 rules:
* ##
* | # | | |
* |---|------|------|
* | 1 | | `[PLAN:1]` |
* | 2 | | `[PLAN:2]` |
* | 3 | | `[PLAN:3]` |
*/
export function generateStatusRulesFromRules(
stepName: string,
rules: WorkflowRule[],
language: Language,
): string {
const tag = stepName.toUpperCase();
const strings = RULES_PROMPT_STRINGS[language];
const lines: string[] = [];
// Criteria table
lines.push(strings.criteriaHeading);
lines.push('');
lines.push(`| ${strings.headerNum} | ${strings.headerCondition} | ${strings.headerTag} |`);
lines.push('|---|------|------|');
for (const [i, rule] of rules.entries()) {
lines.push(`| ${i + 1} | ${rule.condition} | \`[${tag}:${i + 1}]\` |`);
}
lines.push('');
// Output format
lines.push(strings.outputHeading);
lines.push('');
lines.push(strings.outputInstruction);
lines.push('');
for (const [i, rule] of rules.entries()) {
lines.push(`- \`[${tag}:${i + 1}]\`${rule.condition}`);
}
// Appendix templates (if any rules have appendix)
const rulesWithAppendix = rules.filter((r) => r.appendix);
if (rulesWithAppendix.length > 0) {
lines.push('');
lines.push(strings.appendixHeading);
for (const [i, rule] of rules.entries()) {
if (!rule.appendix) continue;
const tagStr = `[${tag}:${i + 1}]`;
lines.push('');
lines.push(strings.appendixInstruction.replace('{tag}', tagStr));
lines.push('```');
lines.push(rule.appendix.trimEnd());
lines.push('```');
}
}
return lines.join('\n');
}
/** Localized strings for execution metadata rendering */
const METADATA_STRINGS = {
en: {
heading: '## Execution Context',
workingDirectory: 'Working Directory',
rulesHeading: '## Execution Rules',
noCommit: '**Do NOT run git commit.** Commits are handled automatically by the system after workflow completion.',
noCd: '**Do NOT use `cd` in Bash commands.** Your working directory is already set correctly. Run commands directly without changing directories.',
editEnabled: '**Editing is ENABLED for this step.** You may create, modify, and delete files as needed to fulfill the user\'s request.',
editDisabled: '**Editing is DISABLED for this step.** Do NOT create, modify, or delete any project source files. You may only read/search code and write to report files in the Report Directory.',
note: 'Note: This section is metadata. Follow the language used in the rest of the prompt.',
},
ja: {
heading: '## 実行コンテキスト',
workingDirectory: '作業ディレクトリ',
rulesHeading: '## 実行ルール',
noCommit: '**git commit を実行しないでください。** コミットはワークフロー完了後にシステムが自動で行います。',
noCd: '**Bashコマンドで `cd` を使用しないでください。** 作業ディレクトリは既に正しく設定されています。ディレクトリを変更せずにコマンドを実行してください。',
editEnabled: '**このステップでは編集が許可されています。** ユーザーの要求に応じて、ファイルの作成・変更・削除を行ってください。',
editDisabled: '**このステップでは編集が禁止されています。** プロジェクトのソースファイルを作成・変更・削除しないでください。コードの読み取り・検索と、Report Directoryへのレポート出力のみ行えます。',
note: '',
},
} as const;
/**
* Render execution metadata as a markdown string.
*
* Pure function: ExecutionMetadata string.
* Always includes heading + Working Directory + Execution Rules.
* Language determines the output language; 'en' includes a note about language consistency.
*/
export function renderExecutionMetadata(metadata: ExecutionMetadata): string {
const strings = METADATA_STRINGS[metadata.language];
const lines = [
strings.heading,
`- ${strings.workingDirectory}: ${metadata.workingDirectory}`,
'',
strings.rulesHeading,
`- ${strings.noCommit}`,
`- ${strings.noCd}`,
];
if (metadata.edit === true) {
lines.push(`- ${strings.editEnabled}`);
} else if (metadata.edit === false) {
lines.push(`- ${strings.editDisabled}`);
}
if (strings.note) {
lines.push('');
lines.push(strings.note);
}
lines.push('');
return lines.join('\n');
}
// Re-export from sub-modules for backward compatibility
export type { InstructionContext, ExecutionMetadata } from './instruction-context.js';
export { buildExecutionMetadata, renderExecutionMetadata } from './instruction-context.js';
export { generateStatusRulesFromRules } from './status-rules.js';
/**
* Escape special characters in dynamic content to prevent template injection.
@ -562,6 +382,7 @@ export function buildReportInstruction(
maxIterations: 0,
stepIteration: context.stepIteration,
cwd: context.cwd,
projectCwd: context.cwd,
userInputs: [],
reportDir: context.reportDir,
language,

View File

@ -0,0 +1,111 @@
/**
* Instruction context types and execution metadata rendering
*
* Defines the context structures used by instruction builders,
* and renders execution metadata (working directory, rules) as markdown.
*/
import type { AgentResponse, Language } from '../models/types.js';
/**
* Context for building instruction from template.
*/
export interface InstructionContext {
/** The main task/prompt */
task: string;
/** Current iteration number (workflow-wide turn count) */
iteration: number;
/** Maximum iterations allowed */
maxIterations: number;
/** Current step's iteration number (how many times this step has been executed) */
stepIteration: number;
/** Working directory (agent work dir, may be a clone) */
cwd: string;
/** Project root directory (where .takt/ lives). */
projectCwd: string;
/** User inputs accumulated during workflow */
userInputs: string[];
/** Previous step output if available */
previousOutput?: AgentResponse;
/** Report directory path */
reportDir?: string;
/** Language for metadata rendering. Defaults to 'en'. */
language?: Language;
}
/** Execution environment metadata prepended to agent instructions */
export interface ExecutionMetadata {
/** The agent's working directory (may be a clone) */
readonly workingDirectory: string;
/** Language for metadata rendering */
readonly language: Language;
/** Whether file editing is allowed for this step (undefined = no prompt) */
readonly edit?: boolean;
}
/**
* Build execution metadata from instruction context and step config.
*
* Pure function: (InstructionContext, edit?) ExecutionMetadata.
*/
export function buildExecutionMetadata(context: InstructionContext, edit?: boolean): ExecutionMetadata {
return {
workingDirectory: context.cwd,
language: context.language ?? 'en',
edit,
};
}
/** Localized strings for execution metadata rendering */
export const METADATA_STRINGS = {
en: {
heading: '## Execution Context',
workingDirectory: 'Working Directory',
rulesHeading: '## Execution Rules',
noCommit: '**Do NOT run git commit.** Commits are handled automatically by the system after workflow completion.',
noCd: '**Do NOT use `cd` in Bash commands.** Your working directory is already set correctly. Run commands directly without changing directories.',
editEnabled: '**Editing is ENABLED for this step.** You may create, modify, and delete files as needed to fulfill the user\'s request.',
editDisabled: '**Editing is DISABLED for this step.** Do NOT create, modify, or delete any project source files. You may only read/search code and write to report files in the Report Directory.',
note: 'Note: This section is metadata. Follow the language used in the rest of the prompt.',
},
ja: {
heading: '## 実行コンテキスト',
workingDirectory: '作業ディレクトリ',
rulesHeading: '## 実行ルール',
noCommit: '**git commit を実行しないでください。** コミットはワークフロー完了後にシステムが自動で行います。',
noCd: '**Bashコマンドで `cd` を使用しないでください。** 作業ディレクトリは既に正しく設定されています。ディレクトリを変更せずにコマンドを実行してください。',
editEnabled: '**このステップでは編集が許可されています。** ユーザーの要求に応じて、ファイルの作成・変更・削除を行ってください。',
editDisabled: '**このステップでは編集が禁止されています。** プロジェクトのソースファイルを作成・変更・削除しないでください。コードの読み取り・検索と、Report Directoryへのレポート出力のみ行えます。',
note: '',
},
} as const;
/**
* Render execution metadata as a markdown string.
*
* Pure function: ExecutionMetadata string.
* Always includes heading + Working Directory + Execution Rules.
* Language determines the output language; 'en' includes a note about language consistency.
*/
export function renderExecutionMetadata(metadata: ExecutionMetadata): string {
const strings = METADATA_STRINGS[metadata.language];
const lines = [
strings.heading,
`- ${strings.workingDirectory}: ${metadata.workingDirectory}`,
'',
strings.rulesHeading,
`- ${strings.noCommit}`,
`- ${strings.noCd}`,
];
if (metadata.edit === true) {
lines.push(`- ${strings.editEnabled}`);
} else if (metadata.edit === false) {
lines.push(`- ${strings.editDisabled}`);
}
if (strings.note) {
lines.push('');
lines.push(strings.note);
}
lines.push('');
return lines.join('\n');
}

View File

@ -0,0 +1,92 @@
/**
* Status rules prompt generation for workflow steps
*
* Generates structured prompts that tell agents which numbered tags to output
* based on the step's rule configuration.
*/
import type { WorkflowRule, Language } from '../models/types.js';
/** Localized strings for rules-based status prompt */
const RULES_PROMPT_STRINGS = {
en: {
criteriaHeading: '## Decision Criteria',
headerNum: '#',
headerCondition: 'Condition',
headerTag: 'Tag',
outputHeading: '## Output Format',
outputInstruction: 'Output the tag corresponding to your decision:',
appendixHeading: '### Appendix Template',
appendixInstruction: 'When outputting `[{tag}]`, append the following:',
},
ja: {
criteriaHeading: '## 判定基準',
headerNum: '#',
headerCondition: '状況',
headerTag: 'タグ',
outputHeading: '## 出力フォーマット',
outputInstruction: '判定に対応するタグを出力してください:',
appendixHeading: '### 追加出力テンプレート',
appendixInstruction: '`[{tag}]` を出力する場合、以下を追記してください:',
},
} as const;
/**
* Generate status rules prompt from rules configuration.
* Creates a structured prompt that tells the agent which numbered tags to output.
*
* Example output for step "plan" with 3 rules:
* ##
* | # | | |
* |---|------|------|
* | 1 | | `[PLAN:1]` |
* | 2 | | `[PLAN:2]` |
* | 3 | | `[PLAN:3]` |
*/
export function generateStatusRulesFromRules(
stepName: string,
rules: WorkflowRule[],
language: Language,
): string {
const tag = stepName.toUpperCase();
const strings = RULES_PROMPT_STRINGS[language];
const lines: string[] = [];
// Criteria table
lines.push(strings.criteriaHeading);
lines.push('');
lines.push(`| ${strings.headerNum} | ${strings.headerCondition} | ${strings.headerTag} |`);
lines.push('|---|------|------|');
for (const [i, rule] of rules.entries()) {
lines.push(`| ${i + 1} | ${rule.condition} | \`[${tag}:${i + 1}]\` |`);
}
lines.push('');
// Output format
lines.push(strings.outputHeading);
lines.push('');
lines.push(strings.outputInstruction);
lines.push('');
for (const [i, rule] of rules.entries()) {
lines.push(`- \`[${tag}:${i + 1}]\`${rule.condition}`);
}
// Appendix templates (if any rules have appendix)
const rulesWithAppendix = rules.filter((r) => r.appendix);
if (rulesWithAppendix.length > 0) {
lines.push('');
lines.push(strings.appendixHeading);
for (const [i, rule] of rules.entries()) {
if (!rule.appendix) continue;
const tagStr = `[${tag}:${i + 1}]`;
lines.push('');
lines.push(strings.appendixInstruction.replace('{tag}', tagStr));
lines.push('```');
lines.push(rule.appendix.trimEnd());
lines.push('```');
}
}
return lines.join('\n');
}

View File

@ -72,8 +72,8 @@ export interface WorkflowEngineOptions {
onIterationLimit?: IterationLimitCallback;
/** Bypass all permission checks (sacrifice-my-pc mode) */
bypassPermissions?: boolean;
/** Project root directory (where .takt/ lives). Defaults to cwd if not specified. */
projectCwd?: string;
/** Project root directory (where .takt/ lives). */
projectCwd: string;
/** Language for instruction metadata. Defaults to 'en'. */
language?: Language;
provider?: ProviderType;