From a9ff7e1c94ba2197e4c5f5bbdf7ff13508e9f857 Mon Sep 17 00:00:00 2001 From: Sakurasan <26715255+Sakurasan@users.noreply.github.com> Date: Mon, 21 Apr 2025 21:50:29 +0800 Subject: [PATCH] add log --- internal/controller/proxy/chat_proxy.go | 31 ++++++++++++------------- llm/llm.go | 2 +- llm/openai_compatible/chat.go | 1 + 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/internal/controller/proxy/chat_proxy.go b/internal/controller/proxy/chat_proxy.go index abbc78b..2e9220c 100644 --- a/internal/controller/proxy/chat_proxy.go +++ b/internal/controller/proxy/chat_proxy.go @@ -60,22 +60,21 @@ func (h *Proxy) ChatHandler(c *gin.Context) { c.SSEvent("", data) } } - go func() { - llmusage := llm.GetTokenUsage() - cost := tokenizer.Cost(llmusage.Model, llmusage.PromptTokens+llmusage.ToolsTokens, llmusage.CompletionTokens) - userid, _ := strconv.ParseInt(c.GetString("user_id"), 10, 64) - usage := model.Usage{ - UserID: userid, - Model: llmusage.Model, - Stream: chatreq.Stream, - PromptTokens: llmusage.PromptTokens + llmusage.ToolsTokens, - CompletionTokens: llmusage.CompletionTokens, - TotalTokens: llmusage.TotalTokens, - Cost: fmt.Sprintf("%f", cost), - } - h.SendUsage(&usage) - defer fmt.Println("cost:", cost, "prompt_tokens:", llmusage.PromptTokens, "completion_tokens:", llmusage.CompletionTokens, "total_tokens:", llmusage.TotalTokens) - }() + llmusage := llm.GetTokenUsage() + + cost := tokenizer.Cost(llmusage.Model, llmusage.PromptTokens+llmusage.ToolsTokens, llmusage.CompletionTokens) + userid, _ := strconv.ParseInt(c.GetString("user_id"), 10, 64) + usage := model.Usage{ + UserID: userid, + Model: llmusage.Model, + Stream: chatreq.Stream, + PromptTokens: llmusage.PromptTokens + llmusage.ToolsTokens, + CompletionTokens: llmusage.CompletionTokens, + TotalTokens: llmusage.TotalTokens, + Cost: fmt.Sprintf("%f", cost), + } + h.SendUsage(&usage) + defer fmt.Println("cost:", cost, "prompt_tokens:", llmusage.PromptTokens, "completion_tokens:", llmusage.CompletionTokens, "total_tokens:", llmusage.TotalTokens) } diff --git a/llm/llm.go b/llm/llm.go index 0bf889e..7004458 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -13,7 +13,7 @@ type LLM interface { type llm struct { ApiKey *model.ApiKey - Usage *model.Usage + Usage *TokenUsage tools any // TODO Messages []any // TODO llm LLM diff --git a/llm/openai_compatible/chat.go b/llm/openai_compatible/chat.go index 5a760ec..b062182 100644 --- a/llm/openai_compatible/chat.go +++ b/llm/openai_compatible/chat.go @@ -218,6 +218,7 @@ func (o *OpenAICompatible) StreamChat(ctx context.Context, chatReq llm.ChatReque // case output <- &streamResp: // } } + fmt.Println("llm usage:", o.tokenUsage.Model, o.tokenUsage.PromptTokens, o.tokenUsage.CompletionTokens, o.tokenUsage.TotalTokens) }() return output, nil }