mockを使ったITを追加

This commit is contained in:
nrslib 2026-02-01 21:58:47 +09:00
parent f8980e9841
commit 3bc0251aa9
5 changed files with 1005 additions and 2 deletions

View File

@ -0,0 +1,208 @@
/**
* Unit tests for the mock scenario queue and loader.
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { mkdtempSync, writeFileSync, rmSync } from 'node:fs';
import { join } from 'node:path';
import { tmpdir } from 'node:os';
import {
ScenarioQueue,
loadScenarioFile,
setMockScenario,
getScenarioQueue,
resetScenario,
type ScenarioEntry,
} from '../mock/scenario.js';
describe('ScenarioQueue', () => {
it('should consume entries in order when no agent specified', () => {
const queue = new ScenarioQueue([
{ status: 'done', content: 'first' },
{ status: 'done', content: 'second' },
]);
expect(queue.consume('any-agent')?.content).toBe('first');
expect(queue.consume('any-agent')?.content).toBe('second');
expect(queue.consume('any-agent')).toBeUndefined();
});
it('should match agent-specific entries first', () => {
const queue = new ScenarioQueue([
{ status: 'done', content: 'generic' },
{ agent: 'coder', status: 'done', content: 'coder response' },
{ status: 'done', content: 'second generic' },
]);
// Coder should get its specific entry
expect(queue.consume('coder')?.content).toBe('coder response');
// Other agents get generic entries in order
expect(queue.consume('reviewer')?.content).toBe('generic');
expect(queue.consume('planner')?.content).toBe('second generic');
expect(queue.remaining).toBe(0);
});
it('should fall back to unspecified entries when no agent match', () => {
const queue = new ScenarioQueue([
{ agent: 'coder', status: 'done', content: 'coder only' },
{ status: 'done', content: 'fallback' },
]);
// Reviewer has no specific entry -> gets the unspecified one
expect(queue.consume('reviewer')?.content).toBe('fallback');
// Coder still gets its own
expect(queue.consume('coder')?.content).toBe('coder only');
expect(queue.remaining).toBe(0);
});
it('should return undefined when queue is exhausted', () => {
const queue = new ScenarioQueue([
{ status: 'done', content: 'only' },
]);
queue.consume('agent');
expect(queue.consume('agent')).toBeUndefined();
});
it('should track remaining count', () => {
const queue = new ScenarioQueue([
{ status: 'done', content: 'a' },
{ status: 'done', content: 'b' },
{ status: 'done', content: 'c' },
]);
expect(queue.remaining).toBe(3);
queue.consume('x');
expect(queue.remaining).toBe(2);
});
it('should not modify the original array', () => {
const entries: ScenarioEntry[] = [
{ status: 'done', content: 'a' },
{ status: 'done', content: 'b' },
];
const queue = new ScenarioQueue(entries);
queue.consume('x');
expect(entries).toHaveLength(2);
});
it('should handle mixed agent and unspecified entries correctly', () => {
const queue = new ScenarioQueue([
{ agent: 'plan', status: 'done', content: '[PLAN:1]\nPlan done' },
{ agent: 'implement', status: 'done', content: '[IMPLEMENT:1]\nCode written' },
{ agent: 'ai_review', status: 'done', content: '[AI_REVIEW:1]\nNo issues' },
{ agent: 'supervise', status: 'done', content: '[SUPERVISE:1]\nAll good' },
]);
expect(queue.consume('plan')?.content).toContain('[PLAN:1]');
expect(queue.consume('implement')?.content).toContain('[IMPLEMENT:1]');
expect(queue.consume('ai_review')?.content).toContain('[AI_REVIEW:1]');
expect(queue.consume('supervise')?.content).toContain('[SUPERVISE:1]');
expect(queue.remaining).toBe(0);
});
});
describe('loadScenarioFile', () => {
let tempDir: string;
beforeEach(() => {
tempDir = mkdtempSync(join(tmpdir(), 'takt-scenario-'));
});
afterEach(() => {
rmSync(tempDir, { recursive: true, force: true });
});
it('should load valid scenario JSON', () => {
const scenario = [
{ agent: 'plan', status: 'done', content: 'Plan done' },
{ status: 'blocked', content: 'Blocked' },
];
const filePath = join(tempDir, 'scenario.json');
writeFileSync(filePath, JSON.stringify(scenario));
const entries = loadScenarioFile(filePath);
expect(entries).toHaveLength(2);
expect(entries[0]).toEqual({ agent: 'plan', status: 'done', content: 'Plan done' });
expect(entries[1]).toEqual({ agent: undefined, status: 'blocked', content: 'Blocked' });
});
it('should default status to "done" if omitted', () => {
const scenario = [{ content: 'Simple response' }];
const filePath = join(tempDir, 'scenario.json');
writeFileSync(filePath, JSON.stringify(scenario));
const entries = loadScenarioFile(filePath);
expect(entries[0].status).toBe('done');
});
it('should throw for non-existent file', () => {
expect(() => loadScenarioFile('/nonexistent/file.json')).toThrow('Scenario file not found');
});
it('should throw for invalid JSON', () => {
const filePath = join(tempDir, 'bad.json');
writeFileSync(filePath, 'not json at all');
expect(() => loadScenarioFile(filePath)).toThrow('not valid JSON');
});
it('should throw for non-array JSON', () => {
const filePath = join(tempDir, 'object.json');
writeFileSync(filePath, '{"key": "value"}');
expect(() => loadScenarioFile(filePath)).toThrow('must contain a JSON array');
});
it('should throw for entry without content', () => {
const filePath = join(tempDir, 'no-content.json');
writeFileSync(filePath, '[{"status": "done"}]');
expect(() => loadScenarioFile(filePath)).toThrow('must have a "content" string');
});
it('should throw for invalid status', () => {
const filePath = join(tempDir, 'bad-status.json');
writeFileSync(filePath, '[{"content": "test", "status": "invalid"}]');
expect(() => loadScenarioFile(filePath)).toThrow('invalid status');
});
});
describe('setMockScenario / getScenarioQueue / resetScenario', () => {
afterEach(() => {
resetScenario();
});
it('should set and retrieve scenario queue', () => {
setMockScenario([
{ status: 'done', content: 'test' },
]);
const queue = getScenarioQueue();
expect(queue).not.toBeNull();
expect(queue!.remaining).toBe(1);
});
it('should return null when no scenario is set', () => {
expect(getScenarioQueue()).toBeNull();
});
it('should clear scenario when null is passed', () => {
setMockScenario([{ status: 'done', content: 'test' }]);
setMockScenario(null);
expect(getScenarioQueue()).toBeNull();
});
it('should reset scenario state', () => {
setMockScenario([{ status: 'done', content: 'test' }]);
resetScenario();
expect(getScenarioQueue()).toBeNull();
});
});

View File

@ -0,0 +1,299 @@
/**
* Pipeline integration tests.
*
* Uses mock provider + scenario queue for end-to-end testing
* of the pipeline execution flow. Git operations are skipped via --skip-git.
*
* Mocked: git operations (child_process), GitHub API, UI output, notifications, session
* Not mocked: executeTask, executeWorkflow, WorkflowEngine, runAgent, rule evaluation
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { mkdtempSync, mkdirSync, writeFileSync, rmSync } from 'node:fs';
import { join } from 'node:path';
import { tmpdir } from 'node:os';
import { setMockScenario, resetScenario } from '../mock/scenario.js';
// --- Mocks ---
// Safety net: prevent callAiJudge from calling real Claude CLI.
vi.mock('../claude/client.js', async (importOriginal) => {
const original = await importOriginal<typeof import('../claude/client.js')>();
return {
...original,
callAiJudge: vi.fn().mockResolvedValue(-1),
};
});
// Git operations (even with --skip-git, some imports need to be available)
vi.mock('node:child_process', () => ({
execFileSync: vi.fn(),
}));
vi.mock('../github/issue.js', () => ({
fetchIssue: vi.fn(),
formatIssueAsTask: vi.fn(),
checkGhCli: vi.fn(),
}));
vi.mock('../github/pr.js', () => ({
createPullRequest: vi.fn(),
pushBranch: vi.fn(),
buildPrBody: vi.fn().mockReturnValue('PR body'),
}));
vi.mock('../utils/ui.js', () => ({
header: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
success: vi.fn(),
status: vi.fn(),
StreamDisplay: vi.fn().mockImplementation(() => ({
createHandler: () => vi.fn(),
flush: vi.fn(),
})),
}));
vi.mock('../utils/notification.js', () => ({
notifySuccess: vi.fn(),
notifyError: vi.fn(),
}));
vi.mock('../utils/session.js', () => ({
generateSessionId: vi.fn().mockReturnValue('test-session-id'),
createSessionLog: vi.fn().mockReturnValue({
startTime: new Date().toISOString(),
iterations: 0,
}),
finalizeSessionLog: vi.fn().mockImplementation((log, status) => ({ ...log, status })),
updateLatestPointer: vi.fn(),
initNdjsonLog: vi.fn().mockReturnValue('/tmp/test.ndjson'),
appendNdjsonLine: vi.fn(),
generateReportDir: vi.fn().mockReturnValue('test-report-dir'),
}));
vi.mock('../config/paths.js', async (importOriginal) => {
const original = await importOriginal<typeof import('../config/paths.js')>();
return {
...original,
loadAgentSessions: vi.fn().mockReturnValue({}),
updateAgentSession: vi.fn(),
loadWorktreeSessions: vi.fn().mockReturnValue({}),
updateWorktreeSession: vi.fn(),
getCurrentWorkflow: vi.fn().mockReturnValue('default'),
getProjectConfigDir: vi.fn().mockImplementation((cwd: string) => join(cwd, '.takt')),
};
});
vi.mock('../config/globalConfig.js', async (importOriginal) => {
const original = await importOriginal<typeof import('../config/globalConfig.js')>();
return {
...original,
loadGlobalConfig: vi.fn().mockReturnValue({}),
getLanguage: vi.fn().mockReturnValue('en'),
};
});
vi.mock('../config/projectConfig.js', async (importOriginal) => {
const original = await importOriginal<typeof import('../config/projectConfig.js')>();
return {
...original,
loadProjectConfig: vi.fn().mockReturnValue({}),
};
});
vi.mock('../cli.js', () => ({
isQuietMode: vi.fn().mockReturnValue(true),
}));
vi.mock('../prompt/index.js', () => ({
selectOption: vi.fn().mockResolvedValue('stop'),
promptInput: vi.fn().mockResolvedValue(null),
}));
vi.mock('../workflow/phase-runner.js', () => ({
needsStatusJudgmentPhase: vi.fn().mockReturnValue(false),
runReportPhase: vi.fn().mockResolvedValue(undefined),
runStatusJudgmentPhase: vi.fn().mockResolvedValue(''),
}));
// --- Imports (after mocks) ---
import { executePipeline } from '../commands/pipelineExecution.js';
// --- Test helpers ---
/** Create a minimal test workflow YAML + agent files in a temp directory */
function createTestWorkflowDir(): { dir: string; workflowPath: string } {
const dir = mkdtempSync(join(tmpdir(), 'takt-it-pipeline-'));
// Create .takt/reports structure
mkdirSync(join(dir, '.takt', 'reports', 'test-report-dir'), { recursive: true });
// Create agent prompt files
const agentsDir = join(dir, 'agents');
mkdirSync(agentsDir, { recursive: true });
writeFileSync(join(agentsDir, 'planner.md'), 'You are a planner. Analyze the task.');
writeFileSync(join(agentsDir, 'coder.md'), 'You are a coder. Implement the task.');
writeFileSync(join(agentsDir, 'reviewer.md'), 'You are a reviewer. Review the code.');
// Create a simple workflow YAML
const workflowYaml = `
name: it-simple
description: Integration test workflow
max_iterations: 10
initial_step: plan
steps:
- name: plan
agent: ./agents/planner.md
rules:
- condition: Requirements are clear
next: implement
- condition: Requirements unclear
next: ABORT
instruction: "{task}"
- name: implement
agent: ./agents/coder.md
rules:
- condition: Implementation complete
next: review
- condition: Cannot proceed
next: plan
instruction: "{task}"
- name: review
agent: ./agents/reviewer.md
rules:
- condition: All checks passed
next: COMPLETE
- condition: Issues found
next: implement
instruction: "{task}"
`;
const workflowPath = join(dir, 'workflow.yaml');
writeFileSync(workflowPath, workflowYaml);
return { dir, workflowPath };
}
describe('Pipeline Integration Tests', () => {
let testDir: string;
let workflowPath: string;
beforeEach(() => {
vi.clearAllMocks();
const setup = createTestWorkflowDir();
testDir = setup.dir;
workflowPath = setup.workflowPath;
});
afterEach(() => {
resetScenario();
rmSync(testDir, { recursive: true, force: true });
});
it('should complete pipeline with workflow path + skip-git + mock scenario', async () => {
// Scenario: plan -> implement -> review -> COMPLETE
// agent field must match extractAgentName(step.agent), i.e., the .md filename without extension
setMockScenario([
{ agent: 'planner', status: 'done', content: '[PLAN:1]\n\nPlan completed. Requirements are clear.' },
{ agent: 'coder', status: 'done', content: '[IMPLEMENT:1]\n\nImplementation complete.' },
{ agent: 'reviewer', status: 'done', content: '[REVIEW:1]\n\nAll checks passed.' },
]);
const exitCode = await executePipeline({
task: 'Add a hello world function',
workflow: workflowPath,
autoPr: false,
skipGit: true,
cwd: testDir,
provider: 'mock',
});
expect(exitCode).toBe(0);
});
it('should complete pipeline with workflow name + skip-git + mock scenario', async () => {
// Use builtin 'simple' workflow
// agent field: extractAgentName result (from .md filename)
// tag in content: [STEP_NAME:N] where STEP_NAME is the step name uppercased
setMockScenario([
{ agent: 'planner', status: 'done', content: '[PLAN:1]\n\nRequirements are clear and implementable.' },
{ agent: 'coder', status: 'done', content: '[IMPLEMENT:1]\n\nImplementation complete.' },
{ agent: 'ai-antipattern-reviewer', status: 'done', content: '[AI_REVIEW:1]\n\nNo AI-specific issues.' },
{ agent: 'architecture-reviewer', status: 'done', content: '[REVIEW:1]\n\nNo issues found.' },
{ agent: 'supervisor', status: 'done', content: '[SUPERVISE:1]\n\nAll checks passed.' },
]);
const exitCode = await executePipeline({
task: 'Add a hello world function',
workflow: 'simple',
autoPr: false,
skipGit: true,
cwd: testDir,
provider: 'mock',
});
expect(exitCode).toBe(0);
});
it('should return EXIT_WORKFLOW_FAILED for non-existent workflow', async () => {
const exitCode = await executePipeline({
task: 'Test task',
workflow: 'non-existent-workflow-xyz',
autoPr: false,
skipGit: true,
cwd: testDir,
provider: 'mock',
});
// executeTask returns false when workflow not found → executePipeline returns EXIT_WORKFLOW_FAILED (3)
expect(exitCode).toBe(3);
});
it('should handle ABORT transition from workflow', async () => {
// Scenario: plan returns second rule -> ABORT
setMockScenario([
{ agent: 'planner', status: 'done', content: '[PLAN:2]\n\nRequirements unclear, insufficient info.' },
]);
const exitCode = await executePipeline({
task: 'Vague task with no details',
workflow: workflowPath,
autoPr: false,
skipGit: true,
cwd: testDir,
provider: 'mock',
});
// ABORT means workflow failed -> EXIT_WORKFLOW_FAILED (3)
expect(exitCode).toBe(3);
});
it('should handle review reject → implement → review loop', async () => {
setMockScenario([
// First pass
{ agent: 'planner', status: 'done', content: '[PLAN:1]\n\nRequirements are clear.' },
{ agent: 'coder', status: 'done', content: '[IMPLEMENT:1]\n\nDone.' },
{ agent: 'reviewer', status: 'done', content: '[REVIEW:2]\n\nIssues found.' },
// Fix loop
{ agent: 'coder', status: 'done', content: '[IMPLEMENT:1]\n\nFixed.' },
{ agent: 'reviewer', status: 'done', content: '[REVIEW:1]\n\nAll checks passed.' },
]);
const exitCode = await executePipeline({
task: 'Task needing a fix',
workflow: workflowPath,
autoPr: false,
skipGit: true,
cwd: testDir,
provider: 'mock',
});
expect(exitCode).toBe(0);
});
});

View File

@ -0,0 +1,333 @@
/**
* Workflow execution integration tests.
*
* Tests WorkflowEngine with real runAgent + MockProvider + ScenarioQueue.
* No vi.mock on runAgent or detectMatchedRule rules are matched via
* [STEP_NAME:N] tags in scenario content (tag-based detection).
*
* Mocked: UI, session, phase-runner (report/judgment phases), notifications, config
* Not mocked: WorkflowEngine, runAgent, detectMatchedRule, rule-evaluator
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { mkdtempSync, mkdirSync, writeFileSync, rmSync } from 'node:fs';
import { join } from 'node:path';
import { tmpdir } from 'node:os';
import { setMockScenario, resetScenario } from '../mock/scenario.js';
import type { WorkflowConfig, WorkflowStep, WorkflowRule } from '../models/types.js';
// --- Mocks (minimal — only infrastructure, not core logic) ---
// Safety net: prevent callAiJudge from calling real Claude CLI.
// Tag-based detection should always match in these tests; if it doesn't,
// this mock surfaces the failure immediately instead of timing out.
vi.mock('../claude/client.js', async (importOriginal) => {
const original = await importOriginal<typeof import('../claude/client.js')>();
return {
...original,
callAiJudge: vi.fn().mockResolvedValue(-1),
};
});
vi.mock('../workflow/phase-runner.js', () => ({
needsStatusJudgmentPhase: vi.fn().mockReturnValue(false),
runReportPhase: vi.fn().mockResolvedValue(undefined),
runStatusJudgmentPhase: vi.fn().mockResolvedValue(''),
}));
vi.mock('../utils/session.js', () => ({
generateReportDir: vi.fn().mockReturnValue('test-report-dir'),
generateSessionId: vi.fn().mockReturnValue('test-session-id'),
}));
vi.mock('../config/globalConfig.js', () => ({
loadGlobalConfig: vi.fn().mockReturnValue({}),
getLanguage: vi.fn().mockReturnValue('en'),
}));
vi.mock('../config/projectConfig.js', () => ({
loadProjectConfig: vi.fn().mockReturnValue({}),
}));
// --- Imports (after mocks) ---
import { WorkflowEngine } from '../workflow/engine.js';
// --- Test helpers ---
function makeRule(condition: string, next: string): WorkflowRule {
return { condition, next };
}
function makeStep(name: string, agentPath: string, rules: WorkflowRule[]): WorkflowStep {
return {
name,
agent: `./agents/${name}.md`,
agentDisplayName: name,
agentPath,
instructionTemplate: '{task}',
passPreviousResponse: true,
rules,
};
}
function createTestEnv(): { dir: string; agentPaths: Record<string, string> } {
const dir = mkdtempSync(join(tmpdir(), 'takt-it-wf-'));
mkdirSync(join(dir, '.takt', 'reports', 'test-report-dir'), { recursive: true });
const agentsDir = join(dir, 'agents');
mkdirSync(agentsDir, { recursive: true });
const agents = ['planner', 'coder', 'reviewer', 'fixer', 'supervisor'];
const agentPaths: Record<string, string> = {};
for (const agent of agents) {
const path = join(agentsDir, `${agent}.md`);
writeFileSync(path, `You are a ${agent}.`);
agentPaths[agent] = path;
}
return { dir, agentPaths };
}
function buildSimpleWorkflow(agentPaths: Record<string, string>): WorkflowConfig {
return {
name: 'it-simple',
description: 'IT simple workflow',
maxIterations: 15,
initialStep: 'plan',
steps: [
makeStep('plan', agentPaths.planner, [
makeRule('Requirements are clear', 'implement'),
makeRule('Requirements unclear', 'ABORT'),
]),
makeStep('implement', agentPaths.coder, [
makeRule('Implementation complete', 'review'),
makeRule('Cannot proceed', 'plan'),
]),
makeStep('review', agentPaths.reviewer, [
makeRule('All checks passed', 'COMPLETE'),
makeRule('Issues found', 'implement'),
]),
],
};
}
function buildLoopWorkflow(agentPaths: Record<string, string>): WorkflowConfig {
return {
name: 'it-loop',
description: 'IT workflow with fix loop',
maxIterations: 20,
initialStep: 'plan',
steps: [
makeStep('plan', agentPaths.planner, [
makeRule('Requirements are clear', 'implement'),
makeRule('Requirements unclear', 'ABORT'),
]),
makeStep('implement', agentPaths.coder, [
makeRule('Implementation complete', 'review'),
makeRule('Cannot proceed', 'plan'),
]),
makeStep('review', agentPaths.reviewer, [
makeRule('Approved', 'supervise'),
makeRule('Needs fix', 'fix'),
]),
makeStep('fix', agentPaths.fixer, [
makeRule('Fix complete', 'review'),
makeRule('Cannot fix', 'ABORT'),
]),
makeStep('supervise', agentPaths.supervisor, [
makeRule('All checks passed', 'COMPLETE'),
makeRule('Requirements unmet', 'plan'),
]),
],
};
}
describe('Workflow Engine IT: Happy Path', () => {
let testDir: string;
let agentPaths: Record<string, string>;
beforeEach(() => {
vi.clearAllMocks();
const env = createTestEnv();
testDir = env.dir;
agentPaths = env.agentPaths;
});
afterEach(() => {
resetScenario();
rmSync(testDir, { recursive: true, force: true });
});
it('should complete: plan → implement → review → COMPLETE', async () => {
setMockScenario([
{ agent: 'plan', status: 'done', content: '[PLAN:1]\n\nRequirements are clear.' },
{ agent: 'implement', status: 'done', content: '[IMPLEMENT:1]\n\nImplementation complete.' },
{ agent: 'review', status: 'done', content: '[REVIEW:1]\n\nAll checks passed.' },
]);
const config = buildSimpleWorkflow(agentPaths);
const engine = new WorkflowEngine(config, testDir, 'Test task', {
provider: 'mock',
});
const state = await engine.run();
expect(state.status).toBe('completed');
expect(state.iteration).toBe(3);
});
it('should ABORT when plan returns rule 2', async () => {
setMockScenario([
{ agent: 'plan', status: 'done', content: '[PLAN:2]\n\nRequirements unclear.' },
]);
const config = buildSimpleWorkflow(agentPaths);
const engine = new WorkflowEngine(config, testDir, 'Vague task', {
provider: 'mock',
});
const state = await engine.run();
expect(state.status).toBe('aborted');
expect(state.iteration).toBe(1);
});
});
describe('Workflow Engine IT: Fix Loop', () => {
let testDir: string;
let agentPaths: Record<string, string>;
beforeEach(() => {
vi.clearAllMocks();
const env = createTestEnv();
testDir = env.dir;
agentPaths = env.agentPaths;
});
afterEach(() => {
resetScenario();
rmSync(testDir, { recursive: true, force: true });
});
it('should handle review → fix → review → supervise → COMPLETE', async () => {
setMockScenario([
{ agent: 'plan', status: 'done', content: '[PLAN:1]\n\nClear.' },
{ agent: 'implement', status: 'done', content: '[IMPLEMENT:1]\n\nDone.' },
// First review: needs fix
{ agent: 'review', status: 'done', content: '[REVIEW:2]\n\nNeeds fix.' },
// Fix
{ agent: 'fix', status: 'done', content: '[FIX:1]\n\nFix complete.' },
// Second review: approved
{ agent: 'review', status: 'done', content: '[REVIEW:1]\n\nApproved.' },
// Supervise
{ agent: 'supervise', status: 'done', content: '[SUPERVISE:1]\n\nAll checks passed.' },
]);
const config = buildLoopWorkflow(agentPaths);
const engine = new WorkflowEngine(config, testDir, 'Task needing fix', {
provider: 'mock',
});
const state = await engine.run();
expect(state.status).toBe('completed');
expect(state.iteration).toBe(6);
});
it('should ABORT if fix fails', async () => {
setMockScenario([
{ agent: 'plan', status: 'done', content: '[PLAN:1]\n\nClear.' },
{ agent: 'implement', status: 'done', content: '[IMPLEMENT:1]\n\nDone.' },
{ agent: 'review', status: 'done', content: '[REVIEW:2]\n\nNeeds fix.' },
{ agent: 'fix', status: 'done', content: '[FIX:2]\n\nCannot fix.' },
]);
const config = buildLoopWorkflow(agentPaths);
const engine = new WorkflowEngine(config, testDir, 'Unfixable task', {
provider: 'mock',
});
const state = await engine.run();
expect(state.status).toBe('aborted');
});
});
describe('Workflow Engine IT: Max Iterations', () => {
let testDir: string;
let agentPaths: Record<string, string>;
beforeEach(() => {
vi.clearAllMocks();
const env = createTestEnv();
testDir = env.dir;
agentPaths = env.agentPaths;
});
afterEach(() => {
resetScenario();
rmSync(testDir, { recursive: true, force: true });
});
it('should abort when maxIterations exceeded in infinite loop', async () => {
// Create an infinite loop: plan always goes to implement, implement always goes back to plan
const infiniteScenario = Array.from({ length: 10 }, (_, i) => ({
status: 'done' as const,
content: i % 2 === 0 ? '[PLAN:1]\n\nClear.' : '[IMPLEMENT:2]\n\nCannot proceed.',
}));
setMockScenario(infiniteScenario);
const config = buildSimpleWorkflow(agentPaths);
config.maxIterations = 5;
const engine = new WorkflowEngine(config, testDir, 'Looping task', {
provider: 'mock',
});
const state = await engine.run();
expect(state.status).toBe('aborted');
expect(state.iteration).toBeLessThanOrEqual(5);
});
});
describe('Workflow Engine IT: Step Output Tracking', () => {
let testDir: string;
let agentPaths: Record<string, string>;
beforeEach(() => {
vi.clearAllMocks();
const env = createTestEnv();
testDir = env.dir;
agentPaths = env.agentPaths;
});
afterEach(() => {
resetScenario();
rmSync(testDir, { recursive: true, force: true });
});
it('should track step outputs through events', async () => {
setMockScenario([
{ agent: 'plan', status: 'done', content: '[PLAN:1]\n\nPlan output.' },
{ agent: 'implement', status: 'done', content: '[IMPLEMENT:1]\n\nImplement output.' },
{ agent: 'review', status: 'done', content: '[REVIEW:1]\n\nReview output.' },
]);
const config = buildSimpleWorkflow(agentPaths);
const engine = new WorkflowEngine(config, testDir, 'Track outputs', {
provider: 'mock',
});
const completedSteps: string[] = [];
engine.on('step:complete', (step) => {
completedSteps.push(step.name);
});
const state = await engine.run();
expect(state.status).toBe('completed');
expect(completedSteps).toEqual(['plan', 'implement', 'review']);
});
});

View File

@ -8,6 +8,7 @@
import { randomUUID } from 'node:crypto';
import type { StreamCallback, StreamEvent } from '../claude/process.js';
import type { AgentResponse } from '../models/types.js';
import { getScenarioQueue } from './scenario.js';
/** Options for mock calls */
export interface MockCallOptions {
@ -36,9 +37,13 @@ export async function callMock(
options: MockCallOptions
): Promise<AgentResponse> {
const sessionId = options.sessionId ?? generateMockSessionId();
const status = options.mockStatus ?? 'done';
// Scenario queue takes priority over explicit options
const scenarioEntry = getScenarioQueue()?.consume(agentName);
const status = scenarioEntry?.status ?? options.mockStatus ?? 'done';
const statusMarker = `[MOCK:${status.toUpperCase()}]`;
const content = options.mockResponse ??
const content = scenarioEntry?.content ?? options.mockResponse ??
`${statusMarker}\n\nMock response for agent "${agentName}".\nPrompt: ${prompt.slice(0, 100)}${prompt.length > 100 ? '...' : ''}`;
// Emit stream events if callback is provided

158
src/mock/scenario.ts Normal file
View File

@ -0,0 +1,158 @@
/**
* Mock scenario support for integration testing.
*
* Provides a queue-based mechanism to control mock provider responses
* per agent or by call order. Scenarios can be loaded from JSON files
* (via TAKT_MOCK_SCENARIO env var) or set programmatically in tests.
*/
import { readFileSync, existsSync } from 'node:fs';
/** A single entry in a mock scenario */
export interface ScenarioEntry {
/** Agent name to match (optional — if omitted, consumed by call order) */
agent?: string;
/** Response status */
status: 'done' | 'blocked' | 'approved' | 'rejected' | 'improve';
/** Response content body */
content: string;
}
/**
* Queue that dispenses scenario entries.
*
* Matching rules:
* 1. If an entry has `agent` set, it only matches calls for that agent name.
* 2. Entries without `agent` match any call (consumed in order).
* 3. First matching entry is removed from the queue and returned.
* 4. Returns undefined when no matching entry remains.
*/
export class ScenarioQueue {
private entries: ScenarioEntry[];
constructor(entries: ScenarioEntry[]) {
// Defensive copy
this.entries = [...entries];
}
/**
* Consume the next matching entry for the given agent.
*/
consume(agentName: string): ScenarioEntry | undefined {
// Try agent-specific match first
const agentIndex = this.entries.findIndex(
(e) => e.agent !== undefined && e.agent === agentName,
);
if (agentIndex >= 0) {
return this.entries.splice(agentIndex, 1)[0];
}
// Fall back to first unspecified entry
const anyIndex = this.entries.findIndex((e) => e.agent === undefined);
if (anyIndex >= 0) {
return this.entries.splice(anyIndex, 1)[0];
}
return undefined;
}
/** Number of remaining entries */
get remaining(): number {
return this.entries.length;
}
}
// --- Global singleton (module-level state) ---
let globalQueue: ScenarioQueue | null = null;
/**
* Set mock scenario programmatically (for tests).
* Pass null to clear.
*/
export function setMockScenario(entries: ScenarioEntry[] | null): void {
globalQueue = entries ? new ScenarioQueue(entries) : null;
}
/**
* Get the current global scenario queue.
* Lazily loads from TAKT_MOCK_SCENARIO env var on first access.
*/
export function getScenarioQueue(): ScenarioQueue | null {
if (globalQueue) return globalQueue;
const envPath = process.env.TAKT_MOCK_SCENARIO;
if (envPath) {
const entries = loadScenarioFile(envPath);
globalQueue = new ScenarioQueue(entries);
return globalQueue;
}
return null;
}
/**
* Reset global scenario state (for test cleanup).
*/
export function resetScenario(): void {
globalQueue = null;
}
/**
* Load and validate a scenario JSON file.
*
* @param filePath Absolute or relative path to scenario JSON
* @throws Error if file not found or JSON invalid
*/
export function loadScenarioFile(filePath: string): ScenarioEntry[] {
if (!existsSync(filePath)) {
throw new Error(`Scenario file not found: ${filePath}`);
}
const raw = readFileSync(filePath, 'utf-8');
let parsed: unknown;
try {
parsed = JSON.parse(raw);
} catch {
throw new Error(`Scenario file is not valid JSON: ${filePath}`);
}
if (!Array.isArray(parsed)) {
throw new Error(`Scenario file must contain a JSON array: ${filePath}`);
}
return parsed.map((entry, i) => validateEntry(entry, i));
}
function validateEntry(entry: unknown, index: number): ScenarioEntry {
if (typeof entry !== 'object' || entry === null) {
throw new Error(`Scenario entry [${index}] must be an object`);
}
const obj = entry as Record<string, unknown>;
// content is required
if (typeof obj.content !== 'string') {
throw new Error(`Scenario entry [${index}] must have a "content" string`);
}
// status defaults to 'done'
const validStatuses = ['done', 'blocked', 'approved', 'rejected', 'improve'] as const;
const status = obj.status ?? 'done';
if (typeof status !== 'string' || !validStatuses.includes(status as typeof validStatuses[number])) {
throw new Error(
`Scenario entry [${index}] has invalid status "${String(status)}". Valid: ${validStatuses.join(', ')}`,
);
}
// agent is optional
if (obj.agent !== undefined && typeof obj.agent !== 'string') {
throw new Error(`Scenario entry [${index}] "agent" must be a string if provided`);
}
return {
agent: obj.agent as string | undefined,
status: status as ScenarioEntry['status'],
content: obj.content as string,
};
}