Skip to content

Commit

Permalink
Merge pull request #252 from learningpro/main
Browse files Browse the repository at this point in the history
Add OpenAI Compatible Provider
  • Loading branch information
hkdeman authored Jan 8, 2025
2 parents 1411fcf + 15fa9ca commit b994967
Show file tree
Hide file tree
Showing 4 changed files with 41 additions and 5 deletions.
7 changes: 6 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,12 +68,17 @@ Get up and running with WhoDB quickly using Docker:
docker run -it -p 8080:8080 clidey/whodb
```


To run WhoDB with an OpenAI compatible service, you should assign some environments.
```sh
docker run -it -e USE_CUSTOM_MODELS=1 -e CUSTOM_MODELS=gpt-4o,gpt-3.5,others -e OPENAI_BASE_URL=http://your_base_url/v1 -p 8080:8080 clidey/whodb
```

If you are using a remote Ollama server, please start Docker with Ollama Environments like this:
```sh
docker run -it -e WHODB_OLLAMA_HOST=YOUR_OLLAMA_HOST -e WHODB_OLLAMA_PORT=YOUR_OLLAMA_PORT -p 8080:8080 clidey/whodb
```


Or, use Docker Compose:
```sh
version: "3.8"
Expand Down
5 changes: 2 additions & 3 deletions core/src/llm/chatgpt_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"strings"
)

const chatGPTEndpoint = "https://api.openai.com/v1"

func prepareChatGPTRequest(c *LLMClient, prompt string, model LLMModel, receiverChan *chan string) (string, []byte, map[string]string, error) {
requestBody, err := json.Marshal(map[string]interface{}{
Expand All @@ -20,7 +19,7 @@ func prepareChatGPTRequest(c *LLMClient, prompt string, model LLMModel, receiver
if err != nil {
return "", nil, nil, err
}
url := fmt.Sprintf("%v/chat/completions", chatGPTEndpoint)
url := fmt.Sprintf("%v/chat/completions", getOpenAICompatibleBaseURL())
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", c.APIKey),
"Content-Type": "application/json",
Expand All @@ -29,7 +28,7 @@ func prepareChatGPTRequest(c *LLMClient, prompt string, model LLMModel, receiver
}

func prepareChatGPTModelsRequest(apiKey string) (string, map[string]string) {
url := fmt.Sprintf("%v/models", chatGPTEndpoint)
url := fmt.Sprintf("%v/models", getOpenAICompatibleBaseURL())
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", apiKey),
"Content-Type": "application/json",
Expand Down
31 changes: 30 additions & 1 deletion core/src/llm/env.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@ package llm

import (
"fmt"

"strings"
"os"
"github.com/clidey/whodb/core/src/common"
"github.com/clidey/whodb/core/src/env"
)
Expand All @@ -24,3 +25,31 @@ func getOllamaEndpoint() string {

return fmt.Sprintf("http://%v:%v/api", host, port)
}

func getOpenAICompatibleBaseURL() string {
defaultBaseURL := "https://api.openai.com/v1"
baseURL := os.Getenv("OPENAI_BASE_URL")
if baseURL == "" {
baseURL = defaultBaseURL
}
return baseURL
}

func getCustomModels() ([]string, error) {
modelsStr := os.Getenv("CUSTOM_MODELS")
if modelsStr == "" {
return []string{}, nil
}

models := strings.Split(modelsStr, ",")

for i := range models {
models[i] = strings.TrimSpace(models[i])
}
return models, nil
}

func ShouldUseCustomModels() bool {
useCustomModels := os.Getenv("USE_CUSTOM_MODELS")
return useCustomModels == "1"
}
3 changes: 3 additions & 0 deletions core/src/llm/llm_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,9 @@ func (c *LLMClient) GetSupportedModels() ([]string, error) {
url, headers = prepareOllamaModelsRequest()
case ChatGPT_LLMType:
url, headers = prepareChatGPTModelsRequest(c.APIKey)
if ShouldUseCustomModels() {
return getCustomModels()
}
case Anthropic_LLMType:
return getAnthropicModels(c.APIKey)
default:
Expand Down

0 comments on commit b994967

Please sign in to comment.