add log
This commit is contained in:
@@ -60,22 +60,21 @@ func (h *Proxy) ChatHandler(c *gin.Context) {
|
|||||||
c.SSEvent("", data)
|
c.SSEvent("", data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
go func() {
|
|
||||||
llmusage := llm.GetTokenUsage()
|
|
||||||
|
|
||||||
cost := tokenizer.Cost(llmusage.Model, llmusage.PromptTokens+llmusage.ToolsTokens, llmusage.CompletionTokens)
|
llmusage := llm.GetTokenUsage()
|
||||||
userid, _ := strconv.ParseInt(c.GetString("user_id"), 10, 64)
|
|
||||||
usage := model.Usage{
|
cost := tokenizer.Cost(llmusage.Model, llmusage.PromptTokens+llmusage.ToolsTokens, llmusage.CompletionTokens)
|
||||||
UserID: userid,
|
userid, _ := strconv.ParseInt(c.GetString("user_id"), 10, 64)
|
||||||
Model: llmusage.Model,
|
usage := model.Usage{
|
||||||
Stream: chatreq.Stream,
|
UserID: userid,
|
||||||
PromptTokens: llmusage.PromptTokens + llmusage.ToolsTokens,
|
Model: llmusage.Model,
|
||||||
CompletionTokens: llmusage.CompletionTokens,
|
Stream: chatreq.Stream,
|
||||||
TotalTokens: llmusage.TotalTokens,
|
PromptTokens: llmusage.PromptTokens + llmusage.ToolsTokens,
|
||||||
Cost: fmt.Sprintf("%f", cost),
|
CompletionTokens: llmusage.CompletionTokens,
|
||||||
}
|
TotalTokens: llmusage.TotalTokens,
|
||||||
h.SendUsage(&usage)
|
Cost: fmt.Sprintf("%f", cost),
|
||||||
defer fmt.Println("cost:", cost, "prompt_tokens:", llmusage.PromptTokens, "completion_tokens:", llmusage.CompletionTokens, "total_tokens:", llmusage.TotalTokens)
|
}
|
||||||
}()
|
h.SendUsage(&usage)
|
||||||
|
defer fmt.Println("cost:", cost, "prompt_tokens:", llmusage.PromptTokens, "completion_tokens:", llmusage.CompletionTokens, "total_tokens:", llmusage.TotalTokens)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ type LLM interface {
|
|||||||
|
|
||||||
type llm struct {
|
type llm struct {
|
||||||
ApiKey *model.ApiKey
|
ApiKey *model.ApiKey
|
||||||
Usage *model.Usage
|
Usage *TokenUsage
|
||||||
tools any // TODO
|
tools any // TODO
|
||||||
Messages []any // TODO
|
Messages []any // TODO
|
||||||
llm LLM
|
llm LLM
|
||||||
|
|||||||
@@ -218,6 +218,7 @@ func (o *OpenAICompatible) StreamChat(ctx context.Context, chatReq llm.ChatReque
|
|||||||
// case output <- &streamResp:
|
// case output <- &streamResp:
|
||||||
// }
|
// }
|
||||||
}
|
}
|
||||||
|
fmt.Println("llm usage:", o.tokenUsage.Model, o.tokenUsage.PromptTokens, o.tokenUsage.CompletionTokens, o.tokenUsage.TotalTokens)
|
||||||
}()
|
}()
|
||||||
return output, nil
|
return output, nil
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user