-
-
Notifications
You must be signed in to change notification settings - Fork 1.4k
/
stream.go
55 lines (48 loc) · 1.2 KB
/
stream.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
package openai
import (
"context"
"errors"
"net/http"
)
var (
ErrTooManyEmptyStreamMessages = errors.New("stream has sent too many empty messages")
)
type CompletionStream struct {
*streamReader[CompletionResponse]
}
// CreateCompletionStream — API call to create a completion w/ streaming
// support. It sets whether to stream back partial progress. If set, tokens will be
// sent as data-only server-sent events as they become available, with the
// stream terminated by a data: [DONE] message.
func (c *Client) CreateCompletionStream(
ctx context.Context,
request CompletionRequest,
) (stream *CompletionStream, err error) {
urlSuffix := "/completions"
if !checkEndpointSupportsModel(urlSuffix, request.Model) {
err = ErrCompletionUnsupportedModel
return
}
if !checkPromptType(request.Prompt) {
err = ErrCompletionRequestPromptTypeNotSupported
return
}
request.Stream = true
req, err := c.newRequest(
ctx,
http.MethodPost,
c.fullURL(urlSuffix, withModel(request.Model)),
withBody(request),
)
if err != nil {
return nil, err
}
resp, err := sendRequestStream[CompletionResponse](c, req)
if err != nil {
return
}
stream = &CompletionStream{
streamReader: resp,
}
return
}