Files
gooseek/backend/internal/computer/planner.go
home 06fe57c765 feat: Go backend, enhanced search, new widgets, Docker deploy
Major changes:
- Add Go backend (backend/) with microservices architecture
- Enhanced master-agents-svc: reranker, content-classifier, stealth-crawler,
  proxy-manager, media-search, fastClassifier, language detection
- New web-svc widgets: KnowledgeCard, ProductCard, ProfileCard, VideoCard,
  UnifiedCard, CardGallery, InlineImageGallery, SourcesPanel, RelatedQuestions
- Improved discover-svc with discover-db integration
- Docker deployment improvements (Caddyfile, vendor.sh, BUILD.md)
- Library-svc: project_id schema migration
- Remove deprecated finance-svc and travel-svc
- Localization improvements across services

Made-with: Cursor
2026-02-27 04:15:32 +03:00

372 lines
9.5 KiB
Go

package computer
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strings"
"github.com/gooseek/backend/internal/llm"
"github.com/google/uuid"
)
type Planner struct {
registry *llm.ModelRegistry
}
func NewPlanner(registry *llm.ModelRegistry) *Planner {
return &Planner{
registry: registry,
}
}
func (p *Planner) Plan(ctx context.Context, query string, memory map[string]interface{}) (*TaskPlan, error) {
client, _, err := p.registry.GetBest(llm.CapReasoning)
if err != nil {
client, _, err = p.registry.GetBest(llm.CapCoding)
if err != nil {
return nil, fmt.Errorf("no suitable model for planning: %w", err)
}
}
memoryContext := ""
if len(memory) > 0 {
memoryJSON, _ := json.Marshal(memory)
memoryContext = fmt.Sprintf("\n\nUser context and memory:\n%s", string(memoryJSON))
}
prompt := fmt.Sprintf(`You are a task planning AI. Analyze this query and create an execution plan.
Query: %s%s
Break this into subtasks. Each subtask should be:
1. Atomic - one clear action
2. Independent where possible (for parallel execution)
3. Have clear dependencies when needed
Available task types:
- research: Search web, gather information
- code: Write/generate code
- analysis: Analyze data, extract insights
- design: Design architecture, create plans
- deploy: Deploy applications, run code
- monitor: Set up monitoring, tracking
- report: Generate reports, summaries
- communicate: Send emails, messages
- transform: Convert data formats
- validate: Check, verify results
For each subtask specify:
- type: one of the task types above
- description: what to do
- dependencies: list of subtask IDs this depends on (empty if none)
- capabilities: required AI capabilities (reasoning, coding, search, creative, fast, long_context, vision, math)
Respond in JSON format:
{
"summary": "Brief summary of the plan",
"subtasks": [
{
"id": "1",
"type": "research",
"description": "Search for...",
"dependencies": [],
"capabilities": ["search"]
},
{
"id": "2",
"type": "code",
"description": "Write code to...",
"dependencies": ["1"],
"capabilities": ["coding"]
}
],
"estimatedCost": 0.05,
"estimatedTimeSeconds": 120
}
Create 3-10 subtasks. Be specific and actionable.`, query, memoryContext)
messages := []llm.Message{
{Role: llm.RoleUser, Content: prompt},
}
response, err := client.GenerateText(ctx, llm.StreamRequest{
Messages: messages,
Options: llm.StreamOptions{MaxTokens: 4096},
})
if err != nil {
return p.createDefaultPlan(query), nil
}
plan, err := p.parsePlanResponse(response)
if err != nil {
return p.createDefaultPlan(query), nil
}
plan.Query = query
plan.ExecutionOrder = p.calculateExecutionOrder(plan.SubTasks)
return plan, nil
}
func (p *Planner) parsePlanResponse(response string) (*TaskPlan, error) {
jsonRegex := regexp.MustCompile(`\{[\s\S]*\}`)
jsonMatch := jsonRegex.FindString(response)
if jsonMatch == "" {
return nil, fmt.Errorf("no JSON found in response")
}
var rawPlan struct {
Summary string `json:"summary"`
EstimatedCost float64 `json:"estimatedCost"`
EstimatedTimeSeconds int `json:"estimatedTimeSeconds"`
SubTasks []struct {
ID string `json:"id"`
Type string `json:"type"`
Description string `json:"description"`
Dependencies []string `json:"dependencies"`
Capabilities []string `json:"capabilities"`
} `json:"subtasks"`
}
if err := json.Unmarshal([]byte(jsonMatch), &rawPlan); err != nil {
return nil, fmt.Errorf("failed to parse plan JSON: %w", err)
}
plan := &TaskPlan{
Summary: rawPlan.Summary,
EstimatedCost: rawPlan.EstimatedCost,
EstimatedTime: rawPlan.EstimatedTimeSeconds,
SubTasks: make([]SubTask, len(rawPlan.SubTasks)),
}
for i, st := range rawPlan.SubTasks {
caps := make([]llm.ModelCapability, len(st.Capabilities))
for j, c := range st.Capabilities {
caps[j] = llm.ModelCapability(c)
}
plan.SubTasks[i] = SubTask{
ID: st.ID,
Type: TaskType(st.Type),
Description: st.Description,
Dependencies: st.Dependencies,
RequiredCaps: caps,
Status: StatusPending,
MaxRetries: 3,
}
}
return plan, nil
}
func (p *Planner) calculateExecutionOrder(subTasks []SubTask) [][]string {
taskMap := make(map[string]*SubTask)
for i := range subTasks {
taskMap[subTasks[i].ID] = &subTasks[i]
}
inDegree := make(map[string]int)
for _, st := range subTasks {
if _, ok := inDegree[st.ID]; !ok {
inDegree[st.ID] = 0
}
for _, dep := range st.Dependencies {
inDegree[st.ID]++
if _, ok := inDegree[dep]; !ok {
inDegree[dep] = 0
}
}
}
var order [][]string
completed := make(map[string]bool)
for len(completed) < len(subTasks) {
var wave []string
for _, st := range subTasks {
if completed[st.ID] {
continue
}
canExecute := true
for _, dep := range st.Dependencies {
if !completed[dep] {
canExecute = false
break
}
}
if canExecute {
wave = append(wave, st.ID)
}
}
if len(wave) == 0 {
for _, st := range subTasks {
if !completed[st.ID] {
wave = append(wave, st.ID)
}
}
}
for _, id := range wave {
completed[id] = true
}
order = append(order, wave)
}
return order
}
func (p *Planner) createDefaultPlan(query string) *TaskPlan {
queryLower := strings.ToLower(query)
subTasks := []SubTask{
{
ID: uuid.New().String(),
Type: TaskResearch,
Description: "Research and gather information about: " + query,
Dependencies: []string{},
RequiredCaps: []llm.ModelCapability{llm.CapSearch},
Status: StatusPending,
MaxRetries: 3,
},
}
if strings.Contains(queryLower, "код") || strings.Contains(queryLower, "code") ||
strings.Contains(queryLower, "приложение") || strings.Contains(queryLower, "app") ||
strings.Contains(queryLower, "скрипт") || strings.Contains(queryLower, "script") {
subTasks = append(subTasks, SubTask{
ID: uuid.New().String(),
Type: TaskDesign,
Description: "Design architecture and structure",
Dependencies: []string{subTasks[0].ID},
RequiredCaps: []llm.ModelCapability{llm.CapReasoning},
Status: StatusPending,
MaxRetries: 3,
})
subTasks = append(subTasks, SubTask{
ID: uuid.New().String(),
Type: TaskCode,
Description: "Generate code implementation",
Dependencies: []string{subTasks[1].ID},
RequiredCaps: []llm.ModelCapability{llm.CapCoding},
Status: StatusPending,
MaxRetries: 3,
})
}
if strings.Contains(queryLower, "отчёт") || strings.Contains(queryLower, "report") ||
strings.Contains(queryLower, "анализ") || strings.Contains(queryLower, "analysis") {
subTasks = append(subTasks, SubTask{
ID: uuid.New().String(),
Type: TaskAnalysis,
Description: "Analyze gathered information",
Dependencies: []string{subTasks[0].ID},
RequiredCaps: []llm.ModelCapability{llm.CapReasoning},
Status: StatusPending,
MaxRetries: 3,
})
subTasks = append(subTasks, SubTask{
ID: uuid.New().String(),
Type: TaskReport,
Description: "Generate comprehensive report",
Dependencies: []string{subTasks[len(subTasks)-1].ID},
RequiredCaps: []llm.ModelCapability{llm.CapCreative},
Status: StatusPending,
MaxRetries: 3,
})
}
if strings.Contains(queryLower, "email") || strings.Contains(queryLower, "письмо") ||
strings.Contains(queryLower, "telegram") || strings.Contains(queryLower, "отправ") {
subTasks = append(subTasks, SubTask{
ID: uuid.New().String(),
Type: TaskCommunicate,
Description: "Send notification/message",
Dependencies: []string{subTasks[len(subTasks)-1].ID},
RequiredCaps: []llm.ModelCapability{llm.CapFast},
Status: StatusPending,
MaxRetries: 3,
})
}
plan := &TaskPlan{
Query: query,
Summary: "Auto-generated plan for: " + query,
SubTasks: subTasks,
EstimatedCost: float64(len(subTasks)) * 0.01,
EstimatedTime: len(subTasks) * 30,
}
plan.ExecutionOrder = p.calculateExecutionOrder(subTasks)
return plan
}
func (p *Planner) Replan(ctx context.Context, plan *TaskPlan, newContext string) (*TaskPlan, error) {
completedTasks := make([]SubTask, 0)
pendingTasks := make([]SubTask, 0)
for _, st := range plan.SubTasks {
if st.Status == StatusCompleted {
completedTasks = append(completedTasks, st)
} else if st.Status == StatusPending || st.Status == StatusFailed {
pendingTasks = append(pendingTasks, st)
}
}
completedJSON, _ := json.Marshal(completedTasks)
pendingJSON, _ := json.Marshal(pendingTasks)
client, _, err := p.registry.GetBest(llm.CapReasoning)
if err != nil {
return plan, nil
}
prompt := fmt.Sprintf(`You need to replan a task based on new context.
Original query: %s
Completed subtasks:
%s
Pending subtasks:
%s
New context/feedback:
%s
Adjust the plan. Keep completed tasks, modify or remove pending tasks as needed.
Add new subtasks if the new context requires it.
Respond in the same JSON format as before.`, plan.Query, string(completedJSON), string(pendingJSON), newContext)
messages := []llm.Message{
{Role: llm.RoleUser, Content: prompt},
}
response, err := client.GenerateText(ctx, llm.StreamRequest{
Messages: messages,
Options: llm.StreamOptions{MaxTokens: 4096},
})
if err != nil {
return plan, nil
}
newPlan, err := p.parsePlanResponse(response)
if err != nil {
return plan, nil
}
newPlan.Query = plan.Query
newPlan.ExecutionOrder = p.calculateExecutionOrder(newPlan.SubTasks)
return newPlan, nil
}