feat: Go backend, enhanced search, new widgets, Docker deploy

Major changes:
- Add Go backend (backend/) with microservices architecture
- Enhanced master-agents-svc: reranker, content-classifier, stealth-crawler,
  proxy-manager, media-search, fastClassifier, language detection
- New web-svc widgets: KnowledgeCard, ProductCard, ProfileCard, VideoCard,
  UnifiedCard, CardGallery, InlineImageGallery, SourcesPanel, RelatedQuestions
- Improved discover-svc with discover-db integration
- Docker deployment improvements (Caddyfile, vendor.sh, BUILD.md)
- Library-svc: project_id schema migration
- Remove deprecated finance-svc and travel-svc
- Localization improvements across services

Made-with: Cursor
This commit is contained in:
home
2026-02-27 04:15:32 +03:00
parent 328d968f3f
commit 06fe57c765
285 changed files with 53132 additions and 1871 deletions

View File

@@ -0,0 +1,182 @@
package llm
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
)
type AnthropicClient struct {
baseClient
apiKey string
baseURL string
client *http.Client
}
func NewAnthropicClient(cfg ProviderConfig) (*AnthropicClient, error) {
baseURL := cfg.BaseURL
if baseURL == "" {
baseURL = "https://api.anthropic.com"
}
return &AnthropicClient{
baseClient: baseClient{
providerID: cfg.ProviderID,
modelKey: cfg.ModelKey,
},
apiKey: cfg.APIKey,
baseURL: strings.TrimSuffix(baseURL, "/"),
client: &http.Client{},
}, nil
}
type anthropicRequest struct {
Model string `json:"model"`
Messages []anthropicMessage `json:"messages"`
System string `json:"system,omitempty"`
MaxTokens int `json:"max_tokens"`
Stream bool `json:"stream"`
Tools []anthropicTool `json:"tools,omitempty"`
}
type anthropicMessage struct {
Role string `json:"role"`
Content string `json:"content"`
}
type anthropicTool struct {
Name string `json:"name"`
Description string `json:"description"`
InputSchema interface{} `json:"input_schema"`
}
type anthropicStreamEvent struct {
Type string `json:"type"`
Index int `json:"index,omitempty"`
Delta struct {
Type string `json:"type,omitempty"`
Text string `json:"text,omitempty"`
} `json:"delta,omitempty"`
ContentBlock struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
} `json:"content_block,omitempty"`
}
func (c *AnthropicClient) StreamText(ctx context.Context, req StreamRequest) (<-chan StreamChunk, error) {
var systemPrompt string
messages := make([]anthropicMessage, 0)
for _, m := range req.Messages {
if m.Role == RoleSystem {
systemPrompt = m.Content
continue
}
role := string(m.Role)
if role == "tool" {
role = "user"
}
messages = append(messages, anthropicMessage{
Role: role,
Content: m.Content,
})
}
maxTokens := req.Options.MaxTokens
if maxTokens == 0 {
maxTokens = 4096
}
anthropicReq := anthropicRequest{
Model: c.modelKey,
Messages: messages,
System: systemPrompt,
MaxTokens: maxTokens,
Stream: true,
}
if len(req.Tools) > 0 {
anthropicReq.Tools = make([]anthropicTool, len(req.Tools))
for i, t := range req.Tools {
anthropicReq.Tools[i] = anthropicTool{
Name: t.Name,
Description: t.Description,
InputSchema: t.Schema,
}
}
}
body, err := json.Marshal(anthropicReq)
if err != nil {
return nil, err
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", c.baseURL+"/v1/messages", bytes.NewReader(body))
if err != nil {
return nil, err
}
httpReq.Header.Set("Content-Type", "application/json")
httpReq.Header.Set("x-api-key", c.apiKey)
httpReq.Header.Set("anthropic-version", "2023-06-01")
resp, err := c.client.Do(httpReq)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
return nil, fmt.Errorf("anthropic API error: %d - %s", resp.StatusCode, string(body))
}
ch := make(chan StreamChunk, 100)
go func() {
defer close(ch)
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
line := scanner.Text()
if !strings.HasPrefix(line, "data: ") {
continue
}
data := strings.TrimPrefix(line, "data: ")
if data == "[DONE]" {
return
}
var event anthropicStreamEvent
if err := json.Unmarshal([]byte(data), &event); err != nil {
continue
}
switch event.Type {
case "content_block_delta":
if event.Delta.Text != "" {
ch <- StreamChunk{ContentChunk: event.Delta.Text}
}
case "message_stop":
ch <- StreamChunk{FinishReason: "stop"}
return
}
}
}()
return ch, nil
}
func (c *AnthropicClient) GenerateText(ctx context.Context, req StreamRequest) (string, error) {
ch, err := c.StreamText(ctx, req)
if err != nil {
return "", err
}
return readAllChunks(ch), nil
}

View File

@@ -0,0 +1,145 @@
package llm
import (
"context"
"fmt"
"io"
)
type Role string
const (
RoleSystem Role = "system"
RoleUser Role = "user"
RoleAssistant Role = "assistant"
RoleTool Role = "tool"
)
type Message struct {
Role Role `json:"role"`
Content string `json:"content"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
Name string `json:"name,omitempty"`
Images []ImageContent `json:"images,omitempty"`
}
type ImageContent struct {
Type string `json:"type"`
URL string `json:"url,omitempty"`
Data string `json:"data,omitempty"`
IsBase64 bool `json:"isBase64,omitempty"`
}
type ToolCall struct {
ID string `json:"id"`
Name string `json:"name"`
Arguments map[string]interface{} `json:"arguments"`
}
type Tool struct {
Name string `json:"name"`
Description string `json:"description"`
Schema interface{} `json:"schema"`
}
type StreamOptions struct {
MaxTokens int `json:"max_tokens,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
StopWords []string `json:"stop,omitempty"`
}
type StreamChunk struct {
ContentChunk string `json:"content_chunk,omitempty"`
ToolCallChunk []ToolCall `json:"tool_call_chunk,omitempty"`
FinishReason string `json:"finish_reason,omitempty"`
}
type StreamRequest struct {
Messages []Message `json:"messages"`
Tools []Tool `json:"tools,omitempty"`
Options StreamOptions `json:"options,omitempty"`
}
type Client interface {
StreamText(ctx context.Context, req StreamRequest) (<-chan StreamChunk, error)
GenerateText(ctx context.Context, req StreamRequest) (string, error)
GetProviderID() string
GetModelKey() string
}
type ProviderConfig struct {
ProviderID string `json:"providerId"`
ModelKey string `json:"key"`
APIKey string `json:"apiKey,omitempty"`
BaseURL string `json:"baseUrl,omitempty"`
AgentAccessID string `json:"agentAccessId,omitempty"`
}
func NewClient(cfg ProviderConfig) (Client, error) {
switch cfg.ProviderID {
case "timeweb":
return NewTimewebClient(TimewebConfig{
BaseURL: cfg.BaseURL,
AgentAccessID: cfg.AgentAccessID,
APIKey: cfg.APIKey,
ModelKey: cfg.ModelKey,
ProxySource: "gooseek",
})
case "openai":
return NewOpenAIClient(cfg)
case "anthropic":
return NewAnthropicClient(cfg)
case "gemini", "google":
return NewGeminiClient(cfg)
default:
return nil, fmt.Errorf("unknown provider: %s", cfg.ProviderID)
}
}
type baseClient struct {
providerID string
modelKey string
}
func (c *baseClient) GetProviderID() string {
return c.providerID
}
func (c *baseClient) GetModelKey() string {
return c.modelKey
}
func readAllChunks(ch <-chan StreamChunk) string {
var result string
for chunk := range ch {
result += chunk.ContentChunk
}
return result
}
type streamReader struct {
ch <-chan StreamChunk
buffer []byte
}
func (r *streamReader) Read(p []byte) (n int, err error) {
if len(r.buffer) > 0 {
n = copy(p, r.buffer)
r.buffer = r.buffer[n:]
return n, nil
}
chunk, ok := <-r.ch
if !ok {
return 0, io.EOF
}
data := []byte(chunk.ContentChunk)
n = copy(p, data)
if n < len(data) {
r.buffer = data[n:]
}
return n, nil
}

View File

@@ -0,0 +1,193 @@
package llm
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
)
type GeminiClient struct {
baseClient
apiKey string
baseURL string
client *http.Client
}
func NewGeminiClient(cfg ProviderConfig) (*GeminiClient, error) {
baseURL := cfg.BaseURL
if baseURL == "" {
baseURL = "https://generativelanguage.googleapis.com/v1beta"
}
return &GeminiClient{
baseClient: baseClient{
providerID: cfg.ProviderID,
modelKey: cfg.ModelKey,
},
apiKey: cfg.APIKey,
baseURL: strings.TrimSuffix(baseURL, "/"),
client: &http.Client{},
}, nil
}
type geminiRequest struct {
Contents []geminiContent `json:"contents"`
SystemInstruction *geminiContent `json:"systemInstruction,omitempty"`
GenerationConfig geminiGenerationConfig `json:"generationConfig,omitempty"`
Tools []geminiTool `json:"tools,omitempty"`
}
type geminiContent struct {
Role string `json:"role,omitempty"`
Parts []geminiPart `json:"parts"`
}
type geminiPart struct {
Text string `json:"text,omitempty"`
}
type geminiGenerationConfig struct {
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"topP,omitempty"`
}
type geminiTool struct {
FunctionDeclarations []geminiFunctionDecl `json:"functionDeclarations,omitempty"`
}
type geminiFunctionDecl struct {
Name string `json:"name"`
Description string `json:"description"`
Parameters interface{} `json:"parameters"`
}
type geminiStreamResponse struct {
Candidates []struct {
Content struct {
Parts []struct {
Text string `json:"text"`
} `json:"parts"`
} `json:"content"`
FinishReason string `json:"finishReason,omitempty"`
} `json:"candidates"`
}
func (c *GeminiClient) StreamText(ctx context.Context, req StreamRequest) (<-chan StreamChunk, error) {
contents := make([]geminiContent, 0)
var systemInstruction *geminiContent
for _, m := range req.Messages {
if m.Role == RoleSystem {
systemInstruction = &geminiContent{
Parts: []geminiPart{{Text: m.Content}},
}
continue
}
role := "user"
if m.Role == RoleAssistant {
role = "model"
}
contents = append(contents, geminiContent{
Role: role,
Parts: []geminiPart{{Text: m.Content}},
})
}
geminiReq := geminiRequest{
Contents: contents,
SystemInstruction: systemInstruction,
GenerationConfig: geminiGenerationConfig{
MaxOutputTokens: req.Options.MaxTokens,
Temperature: req.Options.Temperature,
TopP: req.Options.TopP,
},
}
if len(req.Tools) > 0 {
decls := make([]geminiFunctionDecl, len(req.Tools))
for i, t := range req.Tools {
decls[i] = geminiFunctionDecl{
Name: t.Name,
Description: t.Description,
Parameters: t.Schema,
}
}
geminiReq.Tools = []geminiTool{{FunctionDeclarations: decls}}
}
body, err := json.Marshal(geminiReq)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%s/models/%s:streamGenerateContent?key=%s&alt=sse",
c.baseURL, c.modelKey, c.apiKey)
httpReq, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body))
if err != nil {
return nil, err
}
httpReq.Header.Set("Content-Type", "application/json")
resp, err := c.client.Do(httpReq)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
return nil, fmt.Errorf("gemini API error: %d - %s", resp.StatusCode, string(body))
}
ch := make(chan StreamChunk, 100)
go func() {
defer close(ch)
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
line := scanner.Text()
if !strings.HasPrefix(line, "data: ") {
continue
}
data := strings.TrimPrefix(line, "data: ")
var response geminiStreamResponse
if err := json.Unmarshal([]byte(data), &response); err != nil {
continue
}
if len(response.Candidates) > 0 {
candidate := response.Candidates[0]
for _, part := range candidate.Content.Parts {
if part.Text != "" {
ch <- StreamChunk{ContentChunk: part.Text}
}
}
if candidate.FinishReason != "" {
ch <- StreamChunk{FinishReason: candidate.FinishReason}
}
}
}
}()
return ch, nil
}
func (c *GeminiClient) GenerateText(ctx context.Context, req StreamRequest) (string, error) {
ch, err := c.StreamText(ctx, req)
if err != nil {
return "", err
}
return readAllChunks(ch), nil
}

View File

@@ -0,0 +1,166 @@
package llm
import (
"context"
"encoding/json"
"errors"
"io"
"github.com/sashabaranov/go-openai"
)
type OpenAIClient struct {
baseClient
client *openai.Client
}
func NewOpenAIClient(cfg ProviderConfig) (*OpenAIClient, error) {
config := openai.DefaultConfig(cfg.APIKey)
if cfg.BaseURL != "" {
config.BaseURL = cfg.BaseURL
}
return &OpenAIClient{
baseClient: baseClient{
providerID: cfg.ProviderID,
modelKey: cfg.ModelKey,
},
client: openai.NewClientWithConfig(config),
}, nil
}
func (c *OpenAIClient) StreamText(ctx context.Context, req StreamRequest) (<-chan StreamChunk, error) {
messages := make([]openai.ChatCompletionMessage, 0, len(req.Messages))
for _, m := range req.Messages {
msg := openai.ChatCompletionMessage{
Role: string(m.Role),
Content: m.Content,
}
if m.Name != "" {
msg.Name = m.Name
}
if m.ToolCallID != "" {
msg.ToolCallID = m.ToolCallID
}
if len(m.ToolCalls) > 0 {
msg.ToolCalls = make([]openai.ToolCall, len(m.ToolCalls))
for i, tc := range m.ToolCalls {
args, _ := json.Marshal(tc.Arguments)
msg.ToolCalls[i] = openai.ToolCall{
ID: tc.ID,
Type: openai.ToolTypeFunction,
Function: openai.FunctionCall{
Name: tc.Name,
Arguments: string(args),
},
}
}
}
messages = append(messages, msg)
}
chatReq := openai.ChatCompletionRequest{
Model: c.modelKey,
Messages: messages,
Stream: true,
}
if req.Options.MaxTokens > 0 {
chatReq.MaxTokens = req.Options.MaxTokens
}
if req.Options.Temperature > 0 {
chatReq.Temperature = float32(req.Options.Temperature)
}
if req.Options.TopP > 0 {
chatReq.TopP = float32(req.Options.TopP)
}
if len(req.Tools) > 0 {
chatReq.Tools = make([]openai.Tool, len(req.Tools))
for i, t := range req.Tools {
chatReq.Tools[i] = openai.Tool{
Type: openai.ToolTypeFunction,
Function: &openai.FunctionDefinition{
Name: t.Name,
Description: t.Description,
Parameters: t.Schema,
},
}
}
}
stream, err := c.client.CreateChatCompletionStream(ctx, chatReq)
if err != nil {
return nil, err
}
ch := make(chan StreamChunk, 100)
go func() {
defer close(ch)
defer stream.Close()
toolCalls := make(map[int]*ToolCall)
for {
response, err := stream.Recv()
if errors.Is(err, io.EOF) {
if len(toolCalls) > 0 {
calls := make([]ToolCall, 0, len(toolCalls))
for _, tc := range toolCalls {
calls = append(calls, *tc)
}
ch <- StreamChunk{ToolCallChunk: calls}
}
return
}
if err != nil {
return
}
if len(response.Choices) == 0 {
continue
}
delta := response.Choices[0].Delta
if delta.Content != "" {
ch <- StreamChunk{ContentChunk: delta.Content}
}
for _, tc := range delta.ToolCalls {
idx := *tc.Index
if _, ok := toolCalls[idx]; !ok {
toolCalls[idx] = &ToolCall{
ID: tc.ID,
Name: tc.Function.Name,
Arguments: make(map[string]interface{}),
}
}
if tc.Function.Arguments != "" {
existing := toolCalls[idx]
var args map[string]interface{}
if err := json.Unmarshal([]byte(tc.Function.Arguments), &args); err == nil {
for k, v := range args {
existing.Arguments[k] = v
}
}
}
}
if response.Choices[0].FinishReason != "" {
ch <- StreamChunk{FinishReason: string(response.Choices[0].FinishReason)}
}
}
}()
return ch, nil
}
func (c *OpenAIClient) GenerateText(ctx context.Context, req StreamRequest) (string, error) {
ch, err := c.StreamText(ctx, req)
if err != nil {
return "", err
}
return readAllChunks(ch), nil
}

View File

@@ -0,0 +1,229 @@
package llm
import (
"errors"
"sort"
"sync"
)
type ModelCapability string
const (
CapReasoning ModelCapability = "reasoning"
CapCoding ModelCapability = "coding"
CapSearch ModelCapability = "search"
CapCreative ModelCapability = "creative"
CapFast ModelCapability = "fast"
CapLongContext ModelCapability = "long_context"
CapVision ModelCapability = "vision"
CapMath ModelCapability = "math"
CapVideo ModelCapability = "video"
CapImage ModelCapability = "image"
)
type ModelSpec struct {
ID string
Provider string
Model string
Capabilities []ModelCapability
CostPer1K float64
MaxContext int
Priority int
MaxTokens int
Description string
}
func (m ModelSpec) HasCapability(cap ModelCapability) bool {
for _, c := range m.Capabilities {
if c == cap {
return true
}
}
return false
}
type ModelRegistry struct {
models map[string]ModelSpec
clients map[string]Client
mu sync.RWMutex
}
func NewModelRegistry() *ModelRegistry {
return &ModelRegistry{
models: make(map[string]ModelSpec),
clients: make(map[string]Client),
}
}
func (r *ModelRegistry) Register(spec ModelSpec, client Client) {
r.mu.Lock()
defer r.mu.Unlock()
r.models[spec.ID] = spec
r.clients[spec.ID] = client
}
func (r *ModelRegistry) Unregister(id string) {
r.mu.Lock()
defer r.mu.Unlock()
delete(r.models, id)
delete(r.clients, id)
}
func (r *ModelRegistry) GetByID(id string) (Client, ModelSpec, error) {
r.mu.RLock()
defer r.mu.RUnlock()
spec, ok := r.models[id]
if !ok {
return nil, ModelSpec{}, errors.New("model not found: " + id)
}
client, ok := r.clients[id]
if !ok {
return nil, ModelSpec{}, errors.New("client not found: " + id)
}
return client, spec, nil
}
func (r *ModelRegistry) GetBest(cap ModelCapability) (Client, ModelSpec, error) {
r.mu.RLock()
defer r.mu.RUnlock()
var candidates []ModelSpec
for _, spec := range r.models {
if spec.HasCapability(cap) {
candidates = append(candidates, spec)
}
}
if len(candidates) == 0 {
return nil, ModelSpec{}, errors.New("no model found with capability: " + string(cap))
}
sort.Slice(candidates, func(i, j int) bool {
if candidates[i].Priority != candidates[j].Priority {
return candidates[i].Priority < candidates[j].Priority
}
return candidates[i].CostPer1K < candidates[j].CostPer1K
})
best := candidates[0]
client := r.clients[best.ID]
return client, best, nil
}
func (r *ModelRegistry) GetAllWithCapability(cap ModelCapability) []ModelSpec {
r.mu.RLock()
defer r.mu.RUnlock()
var result []ModelSpec
for _, spec := range r.models {
if spec.HasCapability(cap) {
result = append(result, spec)
}
}
sort.Slice(result, func(i, j int) bool {
return result[i].Priority < result[j].Priority
})
return result
}
func (r *ModelRegistry) GetAll() []ModelSpec {
r.mu.RLock()
defer r.mu.RUnlock()
result := make([]ModelSpec, 0, len(r.models))
for _, spec := range r.models {
result = append(result, spec)
}
return result
}
func (r *ModelRegistry) GetClient(id string) (Client, error) {
r.mu.RLock()
defer r.mu.RUnlock()
client, ok := r.clients[id]
if !ok {
return nil, errors.New("client not found: " + id)
}
return client, nil
}
func (r *ModelRegistry) Count() int {
r.mu.RLock()
defer r.mu.RUnlock()
return len(r.models)
}
var DefaultModels = []ModelSpec{
{
ID: "gpt-4o",
Provider: "openai",
Model: "gpt-4o",
Capabilities: []ModelCapability{CapSearch, CapFast, CapVision, CapCoding, CapCreative},
CostPer1K: 0.005,
MaxContext: 128000,
MaxTokens: 16384,
Priority: 1,
Description: "GPT-4o: fast multimodal model with search",
},
{
ID: "gpt-4o-mini",
Provider: "openai",
Model: "gpt-4o-mini",
Capabilities: []ModelCapability{CapFast, CapCoding},
CostPer1K: 0.00015,
MaxContext: 128000,
MaxTokens: 16384,
Priority: 2,
Description: "GPT-4o Mini: cost-effective for simple tasks",
},
{
ID: "claude-3-opus",
Provider: "anthropic",
Model: "claude-3-opus-20240229",
Capabilities: []ModelCapability{CapReasoning, CapCoding, CapCreative, CapLongContext},
CostPer1K: 0.015,
MaxContext: 200000,
MaxTokens: 4096,
Priority: 1,
Description: "Claude 3 Opus: best for complex reasoning and coding",
},
{
ID: "claude-3-sonnet",
Provider: "anthropic",
Model: "claude-3-5-sonnet-20241022",
Capabilities: []ModelCapability{CapCoding, CapCreative, CapFast},
CostPer1K: 0.003,
MaxContext: 200000,
MaxTokens: 8192,
Priority: 1,
Description: "Claude 3.5 Sonnet: balanced speed and quality",
},
{
ID: "gemini-1.5-pro",
Provider: "gemini",
Model: "gemini-1.5-pro",
Capabilities: []ModelCapability{CapLongContext, CapSearch, CapVision, CapMath},
CostPer1K: 0.00125,
MaxContext: 2000000,
MaxTokens: 8192,
Priority: 1,
Description: "Gemini 1.5 Pro: best for long context and research",
},
{
ID: "gemini-1.5-flash",
Provider: "gemini",
Model: "gemini-1.5-flash",
Capabilities: []ModelCapability{CapFast, CapVision},
CostPer1K: 0.000075,
MaxContext: 1000000,
MaxTokens: 8192,
Priority: 2,
Description: "Gemini 1.5 Flash: fastest for lightweight tasks",
},
}

View File

@@ -0,0 +1,402 @@
package llm
import (
"bufio"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"time"
)
type TimewebClient struct {
baseClient
httpClient *http.Client
baseURL string
agentAccessID string
apiKey string
proxySource string
}
type TimewebConfig struct {
ProviderID string
ModelKey string
BaseURL string
AgentAccessID string
APIKey string
ProxySource string
}
func NewTimewebClient(cfg TimewebConfig) (*TimewebClient, error) {
if cfg.AgentAccessID == "" {
return nil, errors.New("agent_access_id is required for Timeweb")
}
if cfg.APIKey == "" {
return nil, errors.New("api_key is required for Timeweb")
}
baseURL := cfg.BaseURL
if baseURL == "" {
baseURL = "https://api.timeweb.cloud"
}
proxySource := cfg.ProxySource
if proxySource == "" {
proxySource = "gooseek"
}
return &TimewebClient{
baseClient: baseClient{
providerID: cfg.ProviderID,
modelKey: cfg.ModelKey,
},
httpClient: &http.Client{
Timeout: 120 * time.Second,
},
baseURL: baseURL,
agentAccessID: cfg.AgentAccessID,
apiKey: cfg.APIKey,
proxySource: proxySource,
}, nil
}
type timewebChatRequest struct {
Model string `json:"model,omitempty"`
Messages []timewebMessage `json:"messages"`
Stream bool `json:"stream,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
TopP float64 `json:"top_p,omitempty"`
Tools []timewebTool `json:"tools,omitempty"`
Stop []string `json:"stop,omitempty"`
}
type timewebMessage struct {
Role string `json:"role"`
Content interface{} `json:"content"`
Name string `json:"name,omitempty"`
ToolCalls []timewebToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
}
type timewebTool struct {
Type string `json:"type"`
Function timewebFunction `json:"function"`
}
type timewebFunction struct {
Name string `json:"name"`
Description string `json:"description"`
Parameters interface{} `json:"parameters"`
}
type timewebToolCall struct {
ID string `json:"id"`
Type string `json:"type"`
Function struct {
Name string `json:"name"`
Arguments string `json:"arguments"`
} `json:"function"`
}
type timewebChatResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Choices []struct {
Index int `json:"index"`
Message struct {
Role string `json:"role"`
Content string `json:"content"`
ToolCalls []timewebToolCall `json:"tool_calls,omitempty"`
} `json:"message"`
FinishReason string `json:"finish_reason"`
} `json:"choices"`
Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
} `json:"usage"`
}
type timewebStreamResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Choices []struct {
Index int `json:"index"`
Delta struct {
Role string `json:"role,omitempty"`
Content string `json:"content,omitempty"`
ToolCalls []timewebToolCall `json:"tool_calls,omitempty"`
} `json:"delta"`
FinishReason string `json:"finish_reason,omitempty"`
} `json:"choices"`
}
func (c *TimewebClient) StreamText(ctx context.Context, req StreamRequest) (<-chan StreamChunk, error) {
messages := make([]timewebMessage, 0, len(req.Messages))
for _, m := range req.Messages {
msg := timewebMessage{
Role: string(m.Role),
Content: m.Content,
}
if m.Name != "" {
msg.Name = m.Name
}
if m.ToolCallID != "" {
msg.ToolCallID = m.ToolCallID
}
if len(m.ToolCalls) > 0 {
msg.ToolCalls = make([]timewebToolCall, len(m.ToolCalls))
for i, tc := range m.ToolCalls {
args, _ := json.Marshal(tc.Arguments)
msg.ToolCalls[i] = timewebToolCall{
ID: tc.ID,
Type: "function",
}
msg.ToolCalls[i].Function.Name = tc.Name
msg.ToolCalls[i].Function.Arguments = string(args)
}
}
messages = append(messages, msg)
}
chatReq := timewebChatRequest{
Model: c.modelKey,
Messages: messages,
Stream: true,
}
if req.Options.MaxTokens > 0 {
chatReq.MaxTokens = req.Options.MaxTokens
}
if req.Options.Temperature > 0 {
chatReq.Temperature = req.Options.Temperature
}
if req.Options.TopP > 0 {
chatReq.TopP = req.Options.TopP
}
if len(req.Options.StopWords) > 0 {
chatReq.Stop = req.Options.StopWords
}
if len(req.Tools) > 0 {
chatReq.Tools = make([]timewebTool, len(req.Tools))
for i, t := range req.Tools {
chatReq.Tools[i] = timewebTool{
Type: "function",
Function: timewebFunction{
Name: t.Name,
Description: t.Description,
Parameters: t.Schema,
},
}
}
}
body, err := json.Marshal(chatReq)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
url := fmt.Sprintf("%s/api/v1/cloud-ai/agents/%s/v1/chat/completions", c.baseURL, c.agentAccessID)
httpReq, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
httpReq.Header.Set("Content-Type", "application/json")
httpReq.Header.Set("Authorization", "Bearer "+c.apiKey)
httpReq.Header.Set("x-proxy-source", c.proxySource)
resp, err := c.httpClient.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
if resp.StatusCode != http.StatusOK {
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("Timeweb API error: status %d, body: %s", resp.StatusCode, string(body))
}
ch := make(chan StreamChunk, 100)
go func() {
defer close(ch)
defer resp.Body.Close()
toolCalls := make(map[int]*ToolCall)
reader := bufio.NewReader(resp.Body)
for {
line, err := reader.ReadString('\n')
if err != nil {
if err != io.EOF {
return
}
if len(toolCalls) > 0 {
calls := make([]ToolCall, 0, len(toolCalls))
for _, tc := range toolCalls {
calls = append(calls, *tc)
}
ch <- StreamChunk{ToolCallChunk: calls}
}
return
}
line = strings.TrimSpace(line)
if line == "" {
continue
}
if !strings.HasPrefix(line, "data: ") {
continue
}
data := strings.TrimPrefix(line, "data: ")
if data == "[DONE]" {
if len(toolCalls) > 0 {
calls := make([]ToolCall, 0, len(toolCalls))
for _, tc := range toolCalls {
calls = append(calls, *tc)
}
ch <- StreamChunk{ToolCallChunk: calls}
}
return
}
var streamResp timewebStreamResponse
if err := json.Unmarshal([]byte(data), &streamResp); err != nil {
continue
}
if len(streamResp.Choices) == 0 {
continue
}
delta := streamResp.Choices[0].Delta
if delta.Content != "" {
ch <- StreamChunk{ContentChunk: delta.Content}
}
for _, tc := range delta.ToolCalls {
idx := 0
if _, ok := toolCalls[idx]; !ok {
toolCalls[idx] = &ToolCall{
ID: tc.ID,
Name: tc.Function.Name,
Arguments: make(map[string]interface{}),
}
}
if tc.Function.Arguments != "" {
existing := toolCalls[idx]
var args map[string]interface{}
if err := json.Unmarshal([]byte(tc.Function.Arguments), &args); err == nil {
for k, v := range args {
existing.Arguments[k] = v
}
}
}
}
if streamResp.Choices[0].FinishReason != "" {
ch <- StreamChunk{FinishReason: streamResp.Choices[0].FinishReason}
}
}
}()
return ch, nil
}
func (c *TimewebClient) GenerateText(ctx context.Context, req StreamRequest) (string, error) {
messages := make([]timewebMessage, 0, len(req.Messages))
for _, m := range req.Messages {
msg := timewebMessage{
Role: string(m.Role),
Content: m.Content,
}
if m.Name != "" {
msg.Name = m.Name
}
if m.ToolCallID != "" {
msg.ToolCallID = m.ToolCallID
}
messages = append(messages, msg)
}
chatReq := timewebChatRequest{
Model: c.modelKey,
Messages: messages,
Stream: false,
}
if req.Options.MaxTokens > 0 {
chatReq.MaxTokens = req.Options.MaxTokens
}
if req.Options.Temperature > 0 {
chatReq.Temperature = req.Options.Temperature
}
if req.Options.TopP > 0 {
chatReq.TopP = req.Options.TopP
}
if len(req.Tools) > 0 {
chatReq.Tools = make([]timewebTool, len(req.Tools))
for i, t := range req.Tools {
chatReq.Tools[i] = timewebTool{
Type: "function",
Function: timewebFunction{
Name: t.Name,
Description: t.Description,
Parameters: t.Schema,
},
}
}
}
body, err := json.Marshal(chatReq)
if err != nil {
return "", fmt.Errorf("failed to marshal request: %w", err)
}
url := fmt.Sprintf("%s/api/v1/cloud-ai/agents/%s/v1/chat/completions", c.baseURL, c.agentAccessID)
httpReq, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body))
if err != nil {
return "", fmt.Errorf("failed to create request: %w", err)
}
httpReq.Header.Set("Content-Type", "application/json")
httpReq.Header.Set("Authorization", "Bearer "+c.apiKey)
httpReq.Header.Set("x-proxy-source", c.proxySource)
resp, err := c.httpClient.Do(httpReq)
if err != nil {
return "", fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("Timeweb API error: status %d, body: %s", resp.StatusCode, string(body))
}
var chatResp timewebChatResponse
if err := json.NewDecoder(resp.Body).Decode(&chatResp); err != nil {
return "", fmt.Errorf("failed to decode response: %w", err)
}
if len(chatResp.Choices) == 0 {
return "", errors.New("no choices in response")
}
return chatResp.Choices[0].Message.Content, nil
}