diff --git a/opencti-platform/opencti-graphql/src/database/ai-llm.ts b/opencti-platform/opencti-graphql/src/database/ai-llm.ts index cd7a506978e5..2b5864637d36 100644 --- a/opencti-platform/opencti-graphql/src/database/ai-llm.ts +++ b/opencti-platform/opencti-graphql/src/database/ai-llm.ts @@ -20,6 +20,11 @@ if (AI_ENABLED && AI_TOKEN) { client = new Mistral({ serverURL: isEmptyField(AI_ENDPOINT) ? undefined : AI_ENDPOINT, apiKey: AI_TOKEN, + debugLogger: { + log: (message, args) => logApp.info(`[AI] log ${message}`, { message }), + group: (label) => logApp.info(`[AI] group ${label} start.`), + groupEnd: () => logApp.info('[AI] group end.'), + } }); break; case 'openai': @@ -38,15 +43,17 @@ export const queryMistralAi = async (busId: string | null, question: string, use throw UnsupportedError('Incorrect AI configuration', { enabled: AI_ENABLED, type: AI_TYPE, endpoint: AI_ENDPOINT, model: AI_MODEL }); } try { - logApp.debug('[AI] Querying MistralAI with prompt', { questionStart: question.substring(0, 100) }); + logApp.info('[AI] Querying MistralAI with prompt', { questionStart: question }); const response = await (client as Mistral)?.chat.stream({ model: AI_MODEL, messages: [{ role: 'user', content: question }], + safePrompt: undefined, }); let content = ''; if (response) { // eslint-disable-next-line no-restricted-syntax for await (const chunk of response) { + logApp.info('[AI] Querying MistralAI response chunk', { chunk }); if (chunk.data.choices[0].delta.content !== undefined) { const streamText = chunk.data.choices[0].delta.content; content += streamText;