Skip to content

Commit

Permalink
Use gpt-3.5-turbo for completion
Browse files Browse the repository at this point in the history
  • Loading branch information
bakks committed Mar 2, 2023
1 parent 46e1ada commit 336665f
Show file tree
Hide file tree
Showing 5 changed files with 158 additions and 34 deletions.
52 changes: 36 additions & 16 deletions butterfish/butterfish.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,23 @@ import (
// for using AI capabilities on the command line.

type ButterfishConfig struct {
Verbose bool
OpenAIToken string
LLMClient LLM
ColorScheme *ColorScheme
Styles *styles

Verbose bool
OpenAIToken string
LLMClient LLM
ColorScheme *ColorScheme
Styles *styles
PromptLibraryPath string
PromptLibrary PromptLibrary

GencmdModel string
GencmdTemperature float32
GencmdMaxTokens int
ExeccheckModel string
ExeccheckTemperature float32
ExeccheckMaxTokens int
SummarizeModel string
SummarizeTemperature float32
SummarizeMaxTokens int
}

type PromptLibrary interface {
Expand Down Expand Up @@ -107,6 +116,27 @@ var GruvboxLight = ColorScheme{
Grey: "#928374",
}

const BestCompletionModel = "gpt-3.5-turbo"

func MakeButterfishConfig() *ButterfishConfig {
colorScheme := &GruvboxDark

return &ButterfishConfig{
Verbose: false,
ColorScheme: colorScheme,
Styles: ColorSchemeToStyles(colorScheme),
GencmdModel: BestCompletionModel,
GencmdTemperature: 0.6,
GencmdMaxTokens: 512,
ExeccheckModel: BestCompletionModel,
ExeccheckTemperature: 0.6,
ExeccheckMaxTokens: 512,
SummarizeModel: BestCompletionModel,
SummarizeTemperature: 0.7,
SummarizeMaxTokens: 1024,
}
}

// Data type for passing byte chunks from a wrapped command around
type byteMsg struct {
Data []byte
Expand Down Expand Up @@ -326,16 +356,6 @@ func ColorSchemeToStyles(colorScheme *ColorScheme) *styles {
}
}

func MakeButterfishConfig() *ButterfishConfig {
colorScheme := &GruvboxDark

return &ButterfishConfig{
Verbose: false,
ColorScheme: colorScheme,
Styles: ColorSchemeToStyles(colorScheme),
}
}

// Let's initialize our prompts. If we have a prompt library file, we'll load it.
// Either way, we'll then add the default prompts to the library, replacing
// loaded prompts only if OkToReplace is set on them. Then we save the library
Expand Down
26 changes: 13 additions & 13 deletions butterfish/commands.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,10 @@ func (this *ButterfishCtx) ParseCommand(cmd string) (*kong.Context, *CliCommandC
type CliCommandConfig struct {
Prompt struct {
Prompt []string `arg:"" help:"Prompt to use." optional:""`
Model string `short:"m" default:"text-davinci-003" help:"GPT model to use for the prompt."`
Model string `short:"m" default:"gpt-3.5-turbo" help:"GPT model to use for the prompt."`
NumTokens int `short:"n" default:"1024" help:"Maximum number of tokens to generate."`
Temperature float32 `short:"T" default:"0.7" help:"Temperature to use for the prompt, higher temperature indicates more freedom/randomness when generating each token."`
} `cmd:"" help:"Run an LLM prompt without wrapping, stream results back. This is a straight-through call to the LLM from the command line with a given prompt. This accepts piped input, if there is both piped input and a prompt then they will be concatenated together (prompt first). It is recommended that you wrap the prompt with quotes. The default GPT model is text-davinci-003."`
} `cmd:"" help:"Run an LLM prompt without wrapping, stream results back. This is a straight-through call to the LLM from the command line with a given prompt. This accepts piped input, if there is both piped input and a prompt then they will be concatenated together (prompt first). It is recommended that you wrap the prompt with quotes. The default GPT model is gpt-3.5-turbo."`

Summarize struct {
Files []string `arg:"" help:"File paths to summarize." optional:""`
Expand Down Expand Up @@ -110,7 +110,7 @@ type CliCommandConfig struct {

Indexquestion struct {
Question string `arg:"" help:"Question to ask."`
Model string `short:"m" default:"text-davinci-003" help:"GPT model to use for the prompt."`
Model string `short:"m" default:"gpt-3.5-turbo" help:"GPT model to use for the prompt."`
NumTokens int `short:"n" default:"1024" help:"Maximum number of tokens to generate."`
Temperature float32 `short:"T" default:"0.7" help:"Temperature to use for the prompt."`
} `cmd:"" help:"Ask a question using the embeddings index. This fetches text snippets from the index and passes them to the LLM to generate an answer, thus you need to run the index command first."`
Expand Down Expand Up @@ -557,9 +557,9 @@ func (this *ButterfishCtx) gencmdCommand(description string) (string, error) {
req := &util.CompletionRequest{
Ctx: this.Ctx,
Prompt: prompt,
Model: "text-davinci-003",
MaxTokens: 512,
Temperature: 0.6,
Model: this.Config.GencmdModel,
MaxTokens: this.Config.GencmdMaxTokens,
Temperature: this.Config.GencmdTemperature,
}

resp, err := this.LLMClient.Completion(req)
Expand Down Expand Up @@ -596,9 +596,9 @@ func (this *ButterfishCtx) execAndCheck(ctx context.Context, cmd string) error {
req := &util.CompletionRequest{
Ctx: this.Ctx,
Prompt: prompt,
Model: "code-davinci-003",
MaxTokens: 512,
Temperature: 0.6,
Model: this.Config.ExeccheckModel,
MaxTokens: this.Config.ExeccheckMaxTokens,
Temperature: this.Config.ExeccheckTemperature,
}

response, err := this.LLMClient.CompletionStream(req, styleWriter)
Expand Down Expand Up @@ -749,9 +749,9 @@ func (this *ButterfishCtx) SummarizeChunks(chunks [][]byte) error {
writer := util.NewStyledWriter(this.Out, this.Config.Styles.Foreground)
req := &util.CompletionRequest{
Ctx: this.Ctx,
Model: "text-davinci-003",
MaxTokens: 1024,
Temperature: 0.7,
Model: this.Config.SummarizeModel,
MaxTokens: this.Config.SummarizeMaxTokens,
Temperature: this.Config.SummarizeTemperature,
}

if len(chunks) == 1 {
Expand Down Expand Up @@ -825,7 +825,7 @@ func (this *ButterfishCtx) checkClientOutputForError(client int, openCmd string,
req := &util.CompletionRequest{
Ctx: this.Ctx,
Prompt: prompt,
Model: "text-davinci-003",
Model: "gpt-3.5-turbo",
MaxTokens: 1024,
Temperature: 0.7,
}
Expand Down
105 changes: 103 additions & 2 deletions butterfish/gpt.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ import (
"strings"
"time"

"github.com/PullRequestInc/go-gpt3"
"github.com/bakks/butterfish/util"
"github.com/bakks/go-gpt3"
)

type GPT struct {
Expand Down Expand Up @@ -40,7 +40,27 @@ func printResponse(writer io.Writer, response string) {
fmt.Fprintf(writer, "↓ ---\n%s\n-----\n", response)
}

// We're doing completions through the chat API by default, this routes
// to the legacy completion API if the model is the legacy model.
func (this *GPT) Completion(request *util.CompletionRequest) (string, error) {
if request.Model == gpt3.TextDavinci003Engine {
return this.LegacyCompletion(request)
}

return this.SimpleChatCompletion(request)
}

// We're doing completions through the chat API by default, this routes
// to the legacy completion API if the model is the legacy model.
func (this *GPT) CompletionStream(request *util.CompletionRequest, writer io.Writer) (string, error) {
if request.Model == gpt3.TextDavinci003Engine {
return this.LegacyCompletionStream(request, writer)
}

return this.SimpleChatCompletionStream(request, writer)
}

func (this *GPT) LegacyCompletionStream(request *util.CompletionRequest, writer io.Writer) (string, error) {
engine := request.Model
req := gpt3.CompletionRequest{
Prompt: []string{request.Prompt},
Expand Down Expand Up @@ -69,8 +89,55 @@ func (this *GPT) CompletionStream(request *util.CompletionRequest, writer io.Wri
return strBuilder.String(), err
}

const chatbotSystemMessage = "You are a helpful assistant that gives people technical advince about the unix command line and writing software. Respond only in commands or code, do not wrap code in quotes."

func (this *GPT) SimpleChatCompletionStream(request *util.CompletionRequest, writer io.Writer) (string, error) {
req := gpt3.ChatCompletionRequest{
Model: request.Model,
Messages: []gpt3.ChatCompletionRequestMessage{
{
Role: "system",
Content: chatbotSystemMessage,
},
{
Role: "user",
Content: request.Prompt,
},
},
MaxTokens: request.MaxTokens,
Temperature: request.Temperature,
N: 1,
}

strBuilder := strings.Builder{}

callback := func(resp *gpt3.ChatCompletionStreamResponse) {
//fmt.Fprintf(writer, "__%s\n", resp.Choices[0].Message.Content)
if resp.Choices == nil || len(resp.Choices) == 0 {
return
}

text := resp.Choices[0].Delta.Content
if text == "" {
return
}

writer.Write([]byte(text))
strBuilder.WriteString(text)
}

if this.verbose {
printPrompt(this.verboseWriter, request.Prompt)
}
err := this.client.ChatCompletionStream(request.Ctx, req, callback)
fmt.Fprintf(writer, "\n") // GPT doesn't finish with a newline

return strBuilder.String(), err
}

// Run a GPT completion request and return the response
func (this *GPT) Completion(request *util.CompletionRequest) (string, error) {
func (this *GPT) LegacyCompletion(request *util.CompletionRequest) (string, error) {
panic("old")
engine := request.Model
req := gpt3.CompletionRequest{
Prompt: []string{request.Prompt},
Expand All @@ -92,6 +159,40 @@ func (this *GPT) Completion(request *util.CompletionRequest) (string, error) {
return resp.Choices[0].Text, nil
}

func (this *GPT) SimpleChatCompletion(request *util.CompletionRequest) (string, error) {
req := gpt3.ChatCompletionRequest{
Model: request.Model,
Messages: []gpt3.ChatCompletionRequestMessage{
{
Role: "system",
Content: chatbotSystemMessage,
},
{
Role: "user",
Content: request.Prompt,
},
},
MaxTokens: request.MaxTokens,
Temperature: request.Temperature,
N: 1,
}

if this.verbose {
printPrompt(this.verboseWriter, request.Prompt)
}
resp, err := this.client.ChatCompletion(request.Ctx, req)
if err != nil {
return "", err
}

responseText := resp.Choices[0].Message.Content

if this.verbose {
printResponse(this.verboseWriter, responseText)
}
return responseText, nil
}

func (this *GPT) SetVerbose(verbose bool) {
this.verbose = verbose
}
Expand Down
3 changes: 2 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ module github.com/bakks/butterfish
go 1.19

require (
github.com/PullRequestInc/go-gpt3 v1.1.10
github.com/alecthomas/kong v0.7.1
github.com/bakks/go-gpt3 v1.1.10-turbo-2
github.com/charmbracelet/bubbles v0.14.0
github.com/charmbracelet/bubbletea v0.23.1
github.com/charmbracelet/lipgloss v0.6.0
Expand All @@ -25,6 +25,7 @@ require (
)

require (
github.com/PullRequestInc/go-gpt3 v1.1.11 // indirect
github.com/atotto/clipboard v0.1.4 // indirect
github.com/aymanbagabas/go-osc52 v1.0.3 // indirect
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
Expand Down
6 changes: 4 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/PullRequestInc/go-gpt3 v1.1.10 h1:Z2fYKq7oGrr4Cs0yDmuq25FYR4hgvCowIR9sENHmSZ0=
github.com/PullRequestInc/go-gpt3 v1.1.10/go.mod h1:F9yzAy070LhkqHS2154/IH0HVj5xq5g83gLTj7xzyfw=
github.com/PullRequestInc/go-gpt3 v1.1.11 h1:kZtCbAnUEKfUS50a+0TR2p9rJtz4t57THf5cxN3Ye/o=
github.com/PullRequestInc/go-gpt3 v1.1.11/go.mod h1:F9yzAy070LhkqHS2154/IH0HVj5xq5g83gLTj7xzyfw=
github.com/alecthomas/assert/v2 v2.1.0 h1:tbredtNcQnoSd3QBhQWI7QZ3XHOVkw1Moklp2ojoH/0=
github.com/alecthomas/kong v0.7.1 h1:azoTh0IOfwlAX3qN9sHWTxACE2oV8Bg2gAwBsMwDQY4=
github.com/alecthomas/kong v0.7.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U=
Expand All @@ -48,6 +48,8 @@ github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/aymanbagabas/go-osc52 v1.0.3 h1:DTwqENW7X9arYimJrPeGZcV0ln14sGMt3pHZspWD+Mg=
github.com/aymanbagabas/go-osc52 v1.0.3/go.mod h1:zT8H+Rk4VSabYN90pWyugflM3ZhpTZNC7cASDfUCdT4=
github.com/bakks/go-gpt3 v1.1.10-turbo-2 h1:limZbEic7l5gg6Z8+qMZy+4OBegF7pupFVHXGEc7g60=
github.com/bakks/go-gpt3 v1.1.10-turbo-2/go.mod h1:7DANcTjjael9W6oPGBrYheOnGiv1hUSdJE0CcBP6zwc=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
Expand Down

0 comments on commit 336665f

Please sign in to comment.