Files
opencatd-open/llm/claude/v2/chat.go
2025-05-04 02:52:04 +08:00

253 lines
7.3 KiB
Go

// https://docs.anthropic.com/en/docs/about-claude/models/all-models
package claude
import (
"context"
"encoding/base64"
"net/http"
"net/url"
"opencatd-open/internal/model"
"opencatd-open/llm"
"os"
"strings"
"github.com/liushuangls/go-anthropic/v2"
"github.com/sashabaranov/go-openai"
)
type Claude struct {
Ctx context.Context
ApiKey *model.ApiKey
tokenUsage *llm.TokenUsage
Done chan struct{}
Client *anthropic.Client
}
func NewClaude(apiKey *model.ApiKey) (*Claude, error) {
opts := []anthropic.ClientOption{}
if os.Getenv("LOCAL_PROXY") != "" {
proxyUrl, err := url.Parse(os.Getenv("LOCAL_PROXY"))
if err == nil {
client := http.DefaultClient
client.Transport = &http.Transport{Proxy: http.ProxyURL(proxyUrl)}
opts = append(opts, anthropic.WithHTTPClient(client))
}
}
return &Claude{
Ctx: context.Background(),
ApiKey: apiKey,
tokenUsage: &llm.TokenUsage{},
Done: make(chan struct{}),
Client: anthropic.NewClient(*apiKey.ApiKey, opts...),
}, nil
}
func (c *Claude) Chat(ctx context.Context, chatReq llm.ChatRequest) (*llm.ChatResponse, error) {
var messages []anthropic.Message
if len(chatReq.Messages) > 0 {
for _, msg := range chatReq.Messages {
var role anthropic.ChatRole
if msg.Role != "assistant" {
role = anthropic.RoleUser
} else {
role = anthropic.RoleAssistant
}
var content []anthropic.MessageContent
if len(msg.MultiContent) > 0 {
for _, mc := range msg.MultiContent {
if mc.Type == "text" {
content = append(content, anthropic.MessageContent{Type: anthropic.MessagesContentTypeText, Text: &mc.Text})
}
if mc.Type == "image_url" {
if strings.HasPrefix(mc.ImageURL.URL, "http") {
continue
}
if strings.HasPrefix(mc.ImageURL.URL, "data:image") {
var mediaType string
if strings.HasPrefix(mc.ImageURL.URL, "data:image/jpeg") {
mediaType = "image/jpeg"
}
if strings.HasPrefix(mc.ImageURL.URL, "data:image/png") {
mediaType = "image/png"
}
imageString := strings.Split(mc.ImageURL.URL, ",")[1]
imageBytes, _ := base64.StdEncoding.DecodeString(imageString)
content = append(content, anthropic.MessageContent{Type: "image", Source: &anthropic.MessageContentSource{Type: "base64", MediaType: mediaType, Data: imageBytes}})
}
}
messages = append(messages, anthropic.Message{Role: role, Content: content})
}
} else {
if len(msg.Content) > 0 {
content = append(content, anthropic.MessageContent{Type: "text", Text: &msg.Content})
}
}
messages = append(messages, anthropic.Message{Role: role, Content: content})
}
}
var maxTokens int
if chatReq.MaxTokens > 0 {
maxTokens = chatReq.MaxTokens
} else {
if strings.Contains(chatReq.Model, "3-7") {
maxTokens = 64000
} else if strings.Contains(chatReq.Model, "3-5") {
maxTokens = 8192
} else {
maxTokens = 4096
}
}
resp, err := c.Client.CreateMessages(ctx, anthropic.MessagesRequest{
Model: anthropic.Model(chatReq.Model),
Messages: messages,
MaxTokens: maxTokens,
Stream: false,
})
if err != nil {
return nil, err
}
if c.tokenUsage.Model == "" && resp.Model != "" {
c.tokenUsage.Model = string(resp.Model)
}
c.tokenUsage.PromptTokens += resp.Usage.InputTokens
c.tokenUsage.CompletionTokens += resp.Usage.OutputTokens
c.tokenUsage.TotalTokens += resp.Usage.InputTokens + resp.Usage.OutputTokens
return &llm.ChatResponse{
Model: string(resp.Model),
Choices: []openai.ChatCompletionChoice{
{
FinishReason: openai.FinishReason(resp.StopReason),
Message: openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleAssistant,
Content: *resp.Content[0].Text,
},
},
},
}, nil
}
func (c *Claude) StreamChat(ctx context.Context, chatReq llm.ChatRequest) (chan *llm.StreamChatResponse, error) {
var messages []anthropic.Message
if len(chatReq.Messages) > 0 {
for _, msg := range chatReq.Messages {
var role anthropic.ChatRole
if msg.Role != "assistant" {
role = anthropic.RoleUser
} else {
role = anthropic.RoleAssistant
}
var content []anthropic.MessageContent
if len(msg.MultiContent) > 0 {
for _, mc := range msg.MultiContent {
if mc.Type == "text" {
content = append(content, anthropic.MessageContent{Type: anthropic.MessagesContentTypeText, Text: &mc.Text})
}
if mc.Type == "image_url" {
if strings.HasPrefix(mc.ImageURL.URL, "http") {
continue
}
if strings.HasPrefix(mc.ImageURL.URL, "data:image") {
var mediaType string
if strings.HasPrefix(mc.ImageURL.URL, "data:image/jpeg") {
mediaType = "image/jpeg"
}
if strings.HasPrefix(mc.ImageURL.URL, "data:image/png") {
mediaType = "image/png"
}
imageString := strings.Split(mc.ImageURL.URL, ",")[1]
imageBytes, _ := base64.StdEncoding.DecodeString(imageString)
content = append(content, anthropic.MessageContent{Type: "image", Source: &anthropic.MessageContentSource{Type: "base64", MediaType: mediaType, Data: imageBytes}})
}
}
messages = append(messages, anthropic.Message{Role: role, Content: content})
}
} else {
if len(msg.Content) > 0 {
content = append(content, anthropic.MessageContent{Type: "text", Text: &msg.Content})
}
}
messages = append(messages, anthropic.Message{Role: role, Content: content})
}
}
var maxTokens int
if chatReq.MaxTokens > 0 {
maxTokens = chatReq.MaxTokens
} else {
if strings.Contains(chatReq.Model, "sonnet") || strings.Contains(chatReq.Model, "haiku") {
maxTokens = 8192
} else {
maxTokens = 4096
}
}
datachan := make(chan *llm.StreamChatResponse)
// var resp anthropic.MessagesResponse
var err error
go func() {
defer close(datachan)
_, err = c.Client.CreateMessagesStream(ctx, anthropic.MessagesStreamRequest{
MessagesRequest: anthropic.MessagesRequest{
Model: anthropic.Model(chatReq.Model),
Messages: messages,
MaxTokens: maxTokens,
},
OnContentBlockDelta: func(data anthropic.MessagesEventContentBlockDeltaData) {
datachan <- &llm.StreamChatResponse{
Model: chatReq.Model,
Choices: []openai.ChatCompletionStreamChoice{
{
Delta: openai.ChatCompletionStreamChoiceDelta{Content: *data.Delta.Text},
},
},
}
},
OnMessageStart: func(memss anthropic.MessagesEventMessageStartData) {
c.tokenUsage.PromptTokens += memss.Message.Usage.InputTokens
c.tokenUsage.CompletionTokens += memss.Message.Usage.OutputTokens
c.tokenUsage.TotalTokens += memss.Message.Usage.InputTokens + memss.Message.Usage.OutputTokens
},
OnMessageDelta: func(memdd anthropic.MessagesEventMessageDeltaData) {
c.tokenUsage.PromptTokens += memdd.Usage.InputTokens
c.tokenUsage.CompletionTokens += memdd.Usage.OutputTokens
c.tokenUsage.TotalTokens += memdd.Usage.InputTokens + memdd.Usage.OutputTokens
datachan <- &llm.StreamChatResponse{
Model: chatReq.Model,
Choices: []openai.ChatCompletionStreamChoice{
{FinishReason: openai.FinishReason(memdd.Delta.StopReason)},
},
}
},
})
select {
case <-ctx.Done():
return
default:
}
}()
if err != nil {
return nil, err
}
return datachan, err
}
func (c *Claude) GetTokenUsage() *llm.TokenUsage {
return c.tokenUsage
}