go如何实现openai服务器推送
最近在玩openai,但opeanai的响应是一个漫长的过程,不能让浏览器端无限制等待,必须将openai的返回信息实时推送到客户端,让用户获得一个满意的交互过程。直接上源码:
package chat
import (
"context"
"errors"
"io"
"os"
"github.com/gogf/gf/v2/frame/g"
"github.com/joho/godotenv"
v1 "SmartPaper/api/chat/v1"
openai "github.com/sashabaranov/go-openai"
)
func (c *ChatV1) Chat(ctx context.Context, req *v1.ChatReq) (res *v1.ChatRes, err error) {
err = godotenv.Load(".env")
if err != nil {
return res, err
}
OPENAI_API_KEY := os.Getenv("OPENAI_API_KEY")
OPENAI_BASE_URL := os.Getenv("OPENAI_BASE_URL")
config := openai.DefaultConfig(OPENAI_API_KEY)
config.BaseURL = OPENAI_BASE_URL
clientai := openai.NewClientWithConfig(config) // openai.NewClient(OPENAI_API_KEY)
reqai := openai.ChatCompletionRequest{
Model: openai.GPT4o,
// MaxTokens: 128*1024 - 1,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: req.Prompt,
},
},
Stream: true,
}
stream, err := clientai.CreateChatCompletionStream(ctx, reqai)
if err != nil {
return res, err
}
defer stream.Close()
response := g.RequestFromCtx(ctx).Response
for {
responseai, err := stream.Recv()
if errors.Is(err, io.EOF) {
return res, nil
}
if err != nil {
return res, err
}
if len(responseai.Choices) > 0 {
response.Write(responseai.Choices[0].Delta.Content)
response.Flush()
}
}
}
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步