From 7519846e20a5c37c40d4ec21406255396e10f9f9 Mon Sep 17 00:00:00 2001 From: Sakurasan <26715255+Sakurasan@users.noreply.github.com> Date: Sun, 17 Nov 2024 18:17:21 +0800 Subject: [PATCH] fix:bug --- pkg/google/chat.go | 3 +++ pkg/openai/chat.go | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/google/chat.go b/pkg/google/chat.go index 034bdea..ad18ce0 100644 --- a/pkg/google/chat.go +++ b/pkg/google/chat.go @@ -1,3 +1,6 @@ +// https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/grounding-search-entry-points?authuser=2&hl=zh-cn +// +// https://cloud.google.com/vertex-ai/docs/generative-ai/quotas-genai package google import ( diff --git a/pkg/openai/chat.go b/pkg/openai/chat.go index d8fb5bd..f6abfec 100644 --- a/pkg/openai/chat.go +++ b/pkg/openai/chat.go @@ -88,7 +88,7 @@ type ChatCompletionRequest struct { Tools []Tool `json:"tools,omitempty"` ParallelToolCalls bool `json:"parallel_tool_calls,omitempty"` // ToolChoice any `json:"tool_choice,omitempty"` - StreamOptions StreamOption `json:"stream_options,omitempty"` + StreamOptions *StreamOption `json:"stream_options,omitempty"` } func (c ChatCompletionRequest) ToByteJson() []byte { @@ -206,7 +206,7 @@ func ChatProxy(c *gin.Context, chatReq *ChatCompletionRequest) { chatReq.MaxTokens = 16384 } if chatReq.Stream { - chatReq.StreamOptions.IncludeUsage = true + chatReq.StreamOptions = &StreamOption{IncludeUsage: true} } usagelog.PromptCount = tokenizer.NumTokensFromStr(prompt, chatReq.Model)