diff --git a/src/__tests__/prompts.test.ts b/src/__tests__/prompts.test.ts index f6af374..55fb8bf 100644 --- a/src/__tests__/prompts.test.ts +++ b/src/__tests__/prompts.test.ts @@ -130,6 +130,7 @@ describe('template file existence', () => { 'score_interactive_policy', 'score_summary_system_prompt', 'score_slug_system_prompt', + 'score_slug_user_prompt', 'perform_phase1_message', 'perform_phase2_message', 'perform_phase3_message', diff --git a/src/__tests__/summarize.test.ts b/src/__tests__/summarize.test.ts index 20ba865..db39382 100644 --- a/src/__tests__/summarize.test.ts +++ b/src/__tests__/summarize.test.ts @@ -57,20 +57,25 @@ describe('summarizeTaskName', () => { timestamp: new Date(), }); - // When - const result = await summarizeTaskName('long task name for testing', { cwd: '/project' }); - - // Then - expect(result).toBe('add-auth'); - expect(mockGetProvider).toHaveBeenCalledWith('claude'); - expect(mockProviderCall).toHaveBeenCalledWith( - 'long task name for testing', - expect.objectContaining({ - cwd: '/project', - allowedTools: [], - }) - ); - }); + // When + const result = await summarizeTaskName('long task name for testing', { cwd: '/project' }); + + // Then + expect(result).toBe('add-auth'); + expect(mockGetProvider).toHaveBeenCalledWith('claude'); + const callPrompt = mockProviderCall.mock.calls[0]?.[0]; + expect(callPrompt).toContain('Generate a slug from the task description below.'); + expect(callPrompt).toContain(''); + expect(callPrompt).toContain('long task name for testing'); + expect(callPrompt).toContain(''); + expect(mockProviderCall).toHaveBeenCalledWith( + expect.any(String), + expect.objectContaining({ + cwd: '/project', + permissionMode: 'readonly', + }) + ); + }); it('should return AI-generated slug for English task name', async () => { // Given diff --git a/src/infra/task/summarize.ts b/src/infra/task/summarize.ts index 185bd13..a8c8041 100644 --- a/src/infra/task/summarize.ts +++ b/src/infra/task/summarize.ts @@ -70,10 +70,11 @@ export class TaskSummarizer { name: 'summarizer', systemPrompt: loadTemplate('score_slug_system_prompt', 'en'), }); - const response = await agent.call(taskName, { + const prompt = loadTemplate('score_slug_user_prompt', 'en', { taskDescription: taskName }); + const response = await agent.call(prompt, { cwd: options.cwd, model, - allowedTools: [], + permissionMode: 'readonly', }); const slug = sanitizeSlug(response.content); diff --git a/src/shared/prompts/en/score_slug_user_prompt.md b/src/shared/prompts/en/score_slug_user_prompt.md new file mode 100644 index 0000000..bcdd39d --- /dev/null +++ b/src/shared/prompts/en/score_slug_user_prompt.md @@ -0,0 +1,12 @@ + +Generate a slug from the task description below. +Output ONLY the slug text. + + +{{taskDescription}} + diff --git a/src/shared/prompts/ja/score_slug_user_prompt.md b/src/shared/prompts/ja/score_slug_user_prompt.md new file mode 100644 index 0000000..bcdd39d --- /dev/null +++ b/src/shared/prompts/ja/score_slug_user_prompt.md @@ -0,0 +1,12 @@ + +Generate a slug from the task description below. +Output ONLY the slug text. + + +{{taskDescription}} +