Skip to content

Commit

Permalink
feat: add system prompt support to ReplicateLLM and OpenAI
Browse files Browse the repository at this point in the history
  • Loading branch information
igorMSoares committed Jan 25, 2025
1 parent 34faf48 commit 2976782
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 2 deletions.
10 changes: 9 additions & 1 deletion packages/providers/openai/src/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ export class OpenAI extends ToolCallLLM<OpenAIAdditionalChatOptions> {
topP: number;
maxTokens?: number | undefined;
additionalChatOptions?: OpenAIAdditionalChatOptions | undefined;
systemPrompt?: string | undefined;

// OpenAI session params
apiKey?: string | undefined = undefined;
Expand Down Expand Up @@ -185,6 +186,7 @@ export class OpenAI extends ToolCallLLM<OpenAIAdditionalChatOptions> {
this.temperature = init?.temperature ?? 0.1;
this.topP = init?.topP ?? 1;
this.maxTokens = init?.maxTokens ?? undefined;
this.systemPrompt = init?.systemPrompt ?? undefined;

this.maxRetries = init?.maxRetries ?? 10;
this.timeout = init?.timeout ?? 60 * 1000; // Default is 60 seconds
Expand Down Expand Up @@ -337,12 +339,18 @@ export class OpenAI extends ToolCallLLM<OpenAIAdditionalChatOptions> {
| AsyncIterable<ChatResponseChunk<ToolCallLLMMessageOptions>>
> {
const { messages, stream, tools, additionalChatOptions } = params;
const systemMessage: ChatMessage<ToolCallLLMMessageOptions> | undefined =
this.systemPrompt
? { role: "system", content: this.systemPrompt }
: undefined;
const baseRequestParams = <OpenAILLM.Chat.ChatCompletionCreateParams>{
model: this.model,
temperature: this.temperature,
max_tokens: this.maxTokens,
tools: tools?.map(OpenAI.toTool),
messages: OpenAI.toOpenAIMessage(messages),
messages: OpenAI.toOpenAIMessage(
systemMessage ? [systemMessage].concat(messages) : messages,
),
top_p: this.topP,
...Object.assign({}, this.additionalChatOptions, additionalChatOptions),
};
Expand Down
10 changes: 9 additions & 1 deletion packages/providers/replicate/src/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ export class ReplicateLLM extends BaseLLM {
topP: number;
maxTokens?: number;
replicateSession: ReplicateSession;
systemPrompt?: string | undefined;

constructor(init?: Partial<ReplicateLLM> & { noWarn?: boolean }) {
super();
Expand All @@ -135,6 +136,7 @@ export class ReplicateLLM extends BaseLLM {
init?.maxTokens ??
ALL_AVAILABLE_REPLICATE_MODELS[this.model].contextWindow; // For Replicate, the default is 500 tokens which is too low.
this.replicateSession = init?.replicateSession ?? new ReplicateSession();
this.systemPrompt = init?.systemPrompt ?? undefined;
}

get metadata() {
Expand Down Expand Up @@ -175,6 +177,12 @@ export class ReplicateLLM extends BaseLLM {
}

mapMessagesToPromptLlama3(messages: ChatMessage[]) {
const systemPrompt = this.systemPrompt
? "<|begin_of_text|><|start_header_id|>system<|end_header_id|>" +
this.systemPrompt +
"<|eot_id|>"
: undefined;

return {
prompt:
"<|begin_of_text|>" +
Expand All @@ -196,7 +204,7 @@ export class ReplicateLLM extends BaseLLM {
);
}, "") +
"<|start_header_id|>assistant<|end_header_id|>\n\n",
systemPrompt: undefined,
systemPrompt,
};
}

Expand Down

0 comments on commit 2976782

Please sign in to comment.