feat: Go backend, enhanced search, new widgets, Docker deploy
Major changes: - Add Go backend (backend/) with microservices architecture - Enhanced master-agents-svc: reranker, content-classifier, stealth-crawler, proxy-manager, media-search, fastClassifier, language detection - New web-svc widgets: KnowledgeCard, ProductCard, ProfileCard, VideoCard, UnifiedCard, CardGallery, InlineImageGallery, SourcesPanel, RelatedQuestions - Improved discover-svc with discover-db integration - Docker deployment improvements (Caddyfile, vendor.sh, BUILD.md) - Library-svc: project_id schema migration - Remove deprecated finance-svc and travel-svc - Localization improvements across services Made-with: Cursor
This commit is contained in:
587
backend/internal/computer/browser/browser.go
Normal file
587
backend/internal/computer/browser/browser.go
Normal file
@@ -0,0 +1,587 @@
|
||||
package browser
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type PlaywrightBrowser struct {
|
||||
cmd *exec.Cmd
|
||||
serverURL string
|
||||
client *http.Client
|
||||
sessions map[string]*BrowserSession
|
||||
mu sync.RWMutex
|
||||
config Config
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
PlaywrightServerURL string
|
||||
DefaultTimeout time.Duration
|
||||
Headless bool
|
||||
UserAgent string
|
||||
ProxyURL string
|
||||
ScreenshotsDir string
|
||||
RecordingsDir string
|
||||
}
|
||||
|
||||
type BrowserSession struct {
|
||||
ID string
|
||||
ContextID string
|
||||
PageID string
|
||||
CreatedAt time.Time
|
||||
LastAction time.Time
|
||||
Screenshots []string
|
||||
Recordings []string
|
||||
Closed bool
|
||||
}
|
||||
|
||||
type ActionRequest struct {
|
||||
SessionID string `json:"sessionId"`
|
||||
Action string `json:"action"`
|
||||
Params map[string]interface{} `json:"params"`
|
||||
}
|
||||
|
||||
type ActionResponse struct {
|
||||
Success bool `json:"success"`
|
||||
Data interface{} `json:"data,omitempty"`
|
||||
Screenshot string `json:"screenshot,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
PageTitle string `json:"pageTitle,omitempty"`
|
||||
PageURL string `json:"pageUrl,omitempty"`
|
||||
}
|
||||
|
||||
func NewPlaywrightBrowser(cfg Config) *PlaywrightBrowser {
|
||||
if cfg.DefaultTimeout == 0 {
|
||||
cfg.DefaultTimeout = 30 * time.Second
|
||||
}
|
||||
if cfg.PlaywrightServerURL == "" {
|
||||
cfg.PlaywrightServerURL = "http://localhost:3050"
|
||||
}
|
||||
if cfg.ScreenshotsDir == "" {
|
||||
cfg.ScreenshotsDir = "/tmp/gooseek-screenshots"
|
||||
}
|
||||
if cfg.RecordingsDir == "" {
|
||||
cfg.RecordingsDir = "/tmp/gooseek-recordings"
|
||||
}
|
||||
|
||||
os.MkdirAll(cfg.ScreenshotsDir, 0755)
|
||||
os.MkdirAll(cfg.RecordingsDir, 0755)
|
||||
|
||||
return &PlaywrightBrowser{
|
||||
serverURL: cfg.PlaywrightServerURL,
|
||||
client: &http.Client{
|
||||
Timeout: cfg.DefaultTimeout,
|
||||
},
|
||||
sessions: make(map[string]*BrowserSession),
|
||||
config: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) NewSession(ctx context.Context, opts SessionOptions) (*BrowserSession, error) {
|
||||
sessionID := uuid.New().String()
|
||||
|
||||
params := map[string]interface{}{
|
||||
"headless": b.config.Headless,
|
||||
"sessionId": sessionID,
|
||||
}
|
||||
|
||||
if opts.Viewport != nil {
|
||||
params["viewport"] = opts.Viewport
|
||||
}
|
||||
if opts.UserAgent != "" {
|
||||
params["userAgent"] = opts.UserAgent
|
||||
} else if b.config.UserAgent != "" {
|
||||
params["userAgent"] = b.config.UserAgent
|
||||
}
|
||||
if opts.ProxyURL != "" {
|
||||
params["proxy"] = opts.ProxyURL
|
||||
} else if b.config.ProxyURL != "" {
|
||||
params["proxy"] = b.config.ProxyURL
|
||||
}
|
||||
if opts.RecordVideo {
|
||||
params["recordVideo"] = map[string]interface{}{
|
||||
"dir": b.config.RecordingsDir,
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := b.sendCommand(ctx, "browser.newContext", params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create browser context: %w", err)
|
||||
}
|
||||
|
||||
contextID, _ := resp["contextId"].(string)
|
||||
pageID, _ := resp["pageId"].(string)
|
||||
|
||||
session := &BrowserSession{
|
||||
ID: sessionID,
|
||||
ContextID: contextID,
|
||||
PageID: pageID,
|
||||
CreatedAt: time.Now(),
|
||||
LastAction: time.Now(),
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
b.sessions[sessionID] = session
|
||||
b.mu.Unlock()
|
||||
|
||||
return session, nil
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) CloseSession(ctx context.Context, sessionID string) error {
|
||||
b.mu.Lock()
|
||||
session, ok := b.sessions[sessionID]
|
||||
if !ok {
|
||||
b.mu.Unlock()
|
||||
return errors.New("session not found")
|
||||
}
|
||||
session.Closed = true
|
||||
delete(b.sessions, sessionID)
|
||||
b.mu.Unlock()
|
||||
|
||||
_, err := b.sendCommand(ctx, "browser.closeContext", map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) Navigate(ctx context.Context, sessionID, url string, opts NavigateOptions) (*ActionResponse, error) {
|
||||
params := map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
"url": url,
|
||||
}
|
||||
if opts.Timeout > 0 {
|
||||
params["timeout"] = opts.Timeout
|
||||
}
|
||||
if opts.WaitUntil != "" {
|
||||
params["waitUntil"] = opts.WaitUntil
|
||||
}
|
||||
|
||||
resp, err := b.sendCommand(ctx, "page.goto", params)
|
||||
if err != nil {
|
||||
return &ActionResponse{Success: false, Error: err.Error()}, err
|
||||
}
|
||||
|
||||
result := &ActionResponse{
|
||||
Success: true,
|
||||
PageURL: getString(resp, "url"),
|
||||
PageTitle: getString(resp, "title"),
|
||||
}
|
||||
|
||||
if opts.Screenshot {
|
||||
screenshot, _ := b.Screenshot(ctx, sessionID, ScreenshotOptions{FullPage: false})
|
||||
if screenshot != nil {
|
||||
result.Screenshot = screenshot.Data
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) Click(ctx context.Context, sessionID, selector string, opts ClickOptions) (*ActionResponse, error) {
|
||||
params := map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
"selector": selector,
|
||||
}
|
||||
if opts.Button != "" {
|
||||
params["button"] = opts.Button
|
||||
}
|
||||
if opts.ClickCount > 0 {
|
||||
params["clickCount"] = opts.ClickCount
|
||||
}
|
||||
if opts.Timeout > 0 {
|
||||
params["timeout"] = opts.Timeout
|
||||
}
|
||||
if opts.Force {
|
||||
params["force"] = true
|
||||
}
|
||||
|
||||
_, err := b.sendCommand(ctx, "page.click", params)
|
||||
if err != nil {
|
||||
return &ActionResponse{Success: false, Error: err.Error()}, err
|
||||
}
|
||||
|
||||
result := &ActionResponse{Success: true}
|
||||
|
||||
if opts.WaitAfter > 0 {
|
||||
time.Sleep(time.Duration(opts.WaitAfter) * time.Millisecond)
|
||||
}
|
||||
|
||||
if opts.Screenshot {
|
||||
screenshot, _ := b.Screenshot(ctx, sessionID, ScreenshotOptions{FullPage: false})
|
||||
if screenshot != nil {
|
||||
result.Screenshot = screenshot.Data
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) Type(ctx context.Context, sessionID, selector, text string, opts TypeOptions) (*ActionResponse, error) {
|
||||
params := map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
"selector": selector,
|
||||
"text": text,
|
||||
}
|
||||
if opts.Delay > 0 {
|
||||
params["delay"] = opts.Delay
|
||||
}
|
||||
if opts.Timeout > 0 {
|
||||
params["timeout"] = opts.Timeout
|
||||
}
|
||||
if opts.Clear {
|
||||
b.sendCommand(ctx, "page.fill", map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
"selector": selector,
|
||||
"value": "",
|
||||
})
|
||||
}
|
||||
|
||||
_, err := b.sendCommand(ctx, "page.type", params)
|
||||
if err != nil {
|
||||
return &ActionResponse{Success: false, Error: err.Error()}, err
|
||||
}
|
||||
|
||||
return &ActionResponse{Success: true}, nil
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) Fill(ctx context.Context, sessionID, selector, value string) (*ActionResponse, error) {
|
||||
params := map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
"selector": selector,
|
||||
"value": value,
|
||||
}
|
||||
|
||||
_, err := b.sendCommand(ctx, "page.fill", params)
|
||||
if err != nil {
|
||||
return &ActionResponse{Success: false, Error: err.Error()}, err
|
||||
}
|
||||
|
||||
return &ActionResponse{Success: true}, nil
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) Screenshot(ctx context.Context, sessionID string, opts ScreenshotOptions) (*ScreenshotResult, error) {
|
||||
params := map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
"fullPage": opts.FullPage,
|
||||
}
|
||||
if opts.Selector != "" {
|
||||
params["selector"] = opts.Selector
|
||||
}
|
||||
if opts.Quality > 0 {
|
||||
params["quality"] = opts.Quality
|
||||
}
|
||||
params["type"] = "png"
|
||||
if opts.Format != "" {
|
||||
params["type"] = opts.Format
|
||||
}
|
||||
|
||||
resp, err := b.sendCommand(ctx, "page.screenshot", params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, _ := resp["data"].(string)
|
||||
|
||||
filename := fmt.Sprintf("%s/%s-%d.png", b.config.ScreenshotsDir, sessionID, time.Now().UnixNano())
|
||||
if decoded, err := base64.StdEncoding.DecodeString(data); err == nil {
|
||||
os.WriteFile(filename, decoded, 0644)
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
if session, ok := b.sessions[sessionID]; ok {
|
||||
session.Screenshots = append(session.Screenshots, filename)
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
return &ScreenshotResult{
|
||||
Data: data,
|
||||
Path: filename,
|
||||
MimeType: "image/png",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) ExtractText(ctx context.Context, sessionID, selector string) (string, error) {
|
||||
params := map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
"selector": selector,
|
||||
}
|
||||
|
||||
resp, err := b.sendCommand(ctx, "page.textContent", params)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return getString(resp, "text"), nil
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) ExtractHTML(ctx context.Context, sessionID, selector string) (string, error) {
|
||||
params := map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
"selector": selector,
|
||||
}
|
||||
|
||||
resp, err := b.sendCommand(ctx, "page.innerHTML", params)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return getString(resp, "html"), nil
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) WaitForSelector(ctx context.Context, sessionID, selector string, opts WaitOptions) error {
|
||||
params := map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
"selector": selector,
|
||||
}
|
||||
if opts.Timeout > 0 {
|
||||
params["timeout"] = opts.Timeout
|
||||
}
|
||||
if opts.State != "" {
|
||||
params["state"] = opts.State
|
||||
}
|
||||
|
||||
_, err := b.sendCommand(ctx, "page.waitForSelector", params)
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) WaitForNavigation(ctx context.Context, sessionID string, opts WaitOptions) error {
|
||||
params := map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
}
|
||||
if opts.Timeout > 0 {
|
||||
params["timeout"] = opts.Timeout
|
||||
}
|
||||
if opts.WaitUntil != "" {
|
||||
params["waitUntil"] = opts.WaitUntil
|
||||
}
|
||||
|
||||
_, err := b.sendCommand(ctx, "page.waitForNavigation", params)
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) Scroll(ctx context.Context, sessionID string, opts ScrollOptions) (*ActionResponse, error) {
|
||||
script := fmt.Sprintf("window.scrollBy(%d, %d)", opts.X, opts.Y)
|
||||
if opts.Selector != "" {
|
||||
script = fmt.Sprintf(`document.querySelector('%s').scrollBy(%d, %d)`, opts.Selector, opts.X, opts.Y)
|
||||
}
|
||||
if opts.ToBottom {
|
||||
script = "window.scrollTo(0, document.body.scrollHeight)"
|
||||
}
|
||||
if opts.ToTop {
|
||||
script = "window.scrollTo(0, 0)"
|
||||
}
|
||||
|
||||
_, err := b.Evaluate(ctx, sessionID, script)
|
||||
if err != nil {
|
||||
return &ActionResponse{Success: false, Error: err.Error()}, err
|
||||
}
|
||||
|
||||
if opts.WaitAfter > 0 {
|
||||
time.Sleep(time.Duration(opts.WaitAfter) * time.Millisecond)
|
||||
}
|
||||
|
||||
return &ActionResponse{Success: true}, nil
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) Evaluate(ctx context.Context, sessionID, script string) (interface{}, error) {
|
||||
params := map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
"expression": script,
|
||||
}
|
||||
|
||||
resp, err := b.sendCommand(ctx, "page.evaluate", params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp["result"], nil
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) Select(ctx context.Context, sessionID, selector string, values []string) (*ActionResponse, error) {
|
||||
params := map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
"selector": selector,
|
||||
"values": values,
|
||||
}
|
||||
|
||||
_, err := b.sendCommand(ctx, "page.selectOption", params)
|
||||
if err != nil {
|
||||
return &ActionResponse{Success: false, Error: err.Error()}, err
|
||||
}
|
||||
|
||||
return &ActionResponse{Success: true}, nil
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) GetPageInfo(ctx context.Context, sessionID string) (*PageInfo, error) {
|
||||
params := map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
}
|
||||
|
||||
resp, err := b.sendCommand(ctx, "page.info", params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &PageInfo{
|
||||
URL: getString(resp, "url"),
|
||||
Title: getString(resp, "title"),
|
||||
Content: getString(resp, "content"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) PDF(ctx context.Context, sessionID string, opts PDFOptions) ([]byte, error) {
|
||||
params := map[string]interface{}{
|
||||
"sessionId": sessionID,
|
||||
}
|
||||
if opts.Format != "" {
|
||||
params["format"] = opts.Format
|
||||
}
|
||||
if opts.Landscape {
|
||||
params["landscape"] = true
|
||||
}
|
||||
if opts.PrintBackground {
|
||||
params["printBackground"] = true
|
||||
}
|
||||
|
||||
resp, err := b.sendCommand(ctx, "page.pdf", params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, _ := resp["data"].(string)
|
||||
return base64.StdEncoding.DecodeString(data)
|
||||
}
|
||||
|
||||
func (b *PlaywrightBrowser) sendCommand(ctx context.Context, method string, params map[string]interface{}) (map[string]interface{}, error) {
|
||||
body := map[string]interface{}{
|
||||
"method": method,
|
||||
"params": params,
|
||||
}
|
||||
|
||||
jsonBody, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", b.serverURL+"/api/browser", strings.NewReader(string(jsonBody)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := b.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := json.Unmarshal(respBody, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if errMsg, ok := result["error"].(string); ok && errMsg != "" {
|
||||
return result, errors.New(errMsg)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func getString(m map[string]interface{}, key string) string {
|
||||
if v, ok := m[key].(string); ok {
|
||||
return v
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type SessionOptions struct {
|
||||
Headless bool
|
||||
Viewport *Viewport
|
||||
UserAgent string
|
||||
ProxyURL string
|
||||
RecordVideo bool
|
||||
BlockAds bool
|
||||
}
|
||||
|
||||
type Viewport struct {
|
||||
Width int `json:"width"`
|
||||
Height int `json:"height"`
|
||||
}
|
||||
|
||||
type NavigateOptions struct {
|
||||
Timeout int
|
||||
WaitUntil string
|
||||
Screenshot bool
|
||||
}
|
||||
|
||||
type ClickOptions struct {
|
||||
Button string
|
||||
ClickCount int
|
||||
Timeout int
|
||||
Force bool
|
||||
WaitAfter int
|
||||
Screenshot bool
|
||||
}
|
||||
|
||||
type TypeOptions struct {
|
||||
Delay int
|
||||
Timeout int
|
||||
Clear bool
|
||||
}
|
||||
|
||||
type ScreenshotOptions struct {
|
||||
FullPage bool
|
||||
Selector string
|
||||
Format string
|
||||
Quality int
|
||||
}
|
||||
|
||||
type ScreenshotResult struct {
|
||||
Data string
|
||||
Path string
|
||||
MimeType string
|
||||
}
|
||||
|
||||
type WaitOptions struct {
|
||||
Timeout int
|
||||
State string
|
||||
WaitUntil string
|
||||
}
|
||||
|
||||
type ScrollOptions struct {
|
||||
X int
|
||||
Y int
|
||||
Selector string
|
||||
ToBottom bool
|
||||
ToTop bool
|
||||
WaitAfter int
|
||||
}
|
||||
|
||||
type PageInfo struct {
|
||||
URL string
|
||||
Title string
|
||||
Content string
|
||||
}
|
||||
|
||||
type PDFOptions struct {
|
||||
Format string
|
||||
Landscape bool
|
||||
PrintBackground bool
|
||||
}
|
||||
555
backend/internal/computer/browser/server.go
Normal file
555
backend/internal/computer/browser/server.go
Normal file
@@ -0,0 +1,555 @@
|
||||
package browser
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/cors"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
)
|
||||
|
||||
type BrowserServer struct {
|
||||
browser *PlaywrightBrowser
|
||||
sessions map[string]*ManagedSession
|
||||
mu sync.RWMutex
|
||||
config ServerConfig
|
||||
}
|
||||
|
||||
type ServerConfig struct {
|
||||
Port int
|
||||
MaxSessions int
|
||||
SessionTimeout time.Duration
|
||||
CleanupInterval time.Duration
|
||||
}
|
||||
|
||||
type ManagedSession struct {
|
||||
*BrowserSession
|
||||
LastActive time.Time
|
||||
Actions []ActionLog
|
||||
}
|
||||
|
||||
type ActionLog struct {
|
||||
Action string `json:"action"`
|
||||
Params string `json:"params"`
|
||||
Success bool `json:"success"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Duration int64 `json:"durationMs"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
type BrowserRequest struct {
|
||||
Method string `json:"method"`
|
||||
Params map[string]interface{} `json:"params"`
|
||||
}
|
||||
|
||||
func NewBrowserServer(cfg ServerConfig) *BrowserServer {
|
||||
if cfg.Port == 0 {
|
||||
cfg.Port = 3050
|
||||
}
|
||||
if cfg.MaxSessions == 0 {
|
||||
cfg.MaxSessions = 20
|
||||
}
|
||||
if cfg.SessionTimeout == 0 {
|
||||
cfg.SessionTimeout = 30 * time.Minute
|
||||
}
|
||||
if cfg.CleanupInterval == 0 {
|
||||
cfg.CleanupInterval = 5 * time.Minute
|
||||
}
|
||||
|
||||
return &BrowserServer{
|
||||
browser: NewPlaywrightBrowser(Config{
|
||||
DefaultTimeout: 30 * time.Second,
|
||||
Headless: true,
|
||||
}),
|
||||
sessions: make(map[string]*ManagedSession),
|
||||
config: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BrowserServer) Start(ctx context.Context) error {
|
||||
go s.cleanupLoop(ctx)
|
||||
|
||||
app := fiber.New(fiber.Config{
|
||||
BodyLimit: 50 * 1024 * 1024,
|
||||
ReadTimeout: 2 * time.Minute,
|
||||
WriteTimeout: 2 * time.Minute,
|
||||
})
|
||||
|
||||
app.Use(logger.New())
|
||||
app.Use(cors.New())
|
||||
|
||||
app.Get("/health", func(c *fiber.Ctx) error {
|
||||
return c.JSON(fiber.Map{"status": "ok", "sessions": len(s.sessions)})
|
||||
})
|
||||
|
||||
app.Post("/api/browser", s.handleBrowserCommand)
|
||||
|
||||
app.Post("/api/session/new", s.handleNewSession)
|
||||
app.Delete("/api/session/:id", s.handleCloseSession)
|
||||
app.Get("/api/session/:id", s.handleGetSession)
|
||||
app.Get("/api/sessions", s.handleListSessions)
|
||||
|
||||
app.Post("/api/action", s.handleAction)
|
||||
|
||||
log.Printf("[BrowserServer] Starting on port %d", s.config.Port)
|
||||
return app.Listen(fmt.Sprintf(":%d", s.config.Port))
|
||||
}
|
||||
|
||||
func (s *BrowserServer) handleBrowserCommand(c *fiber.Ctx) error {
|
||||
var req BrowserRequest
|
||||
if err := c.BodyParser(&req); err != nil {
|
||||
return c.Status(400).JSON(fiber.Map{"error": "Invalid request"})
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
sessionID, _ := req.Params["sessionId"].(string)
|
||||
|
||||
s.mu.Lock()
|
||||
if session, ok := s.sessions[sessionID]; ok {
|
||||
session.LastActive = time.Now()
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
start := time.Now()
|
||||
result, err := s.executeMethod(ctx, req.Method, req.Params)
|
||||
|
||||
s.mu.Lock()
|
||||
if session, ok := s.sessions[sessionID]; ok {
|
||||
paramsJSON, _ := json.Marshal(req.Params)
|
||||
session.Actions = append(session.Actions, ActionLog{
|
||||
Action: req.Method,
|
||||
Params: string(paramsJSON),
|
||||
Success: err == nil,
|
||||
Error: errToString(err),
|
||||
Duration: time.Since(start).Milliseconds(),
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
return c.JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(result)
|
||||
}
|
||||
|
||||
func (s *BrowserServer) executeMethod(ctx context.Context, method string, params map[string]interface{}) (map[string]interface{}, error) {
|
||||
sessionID, _ := params["sessionId"].(string)
|
||||
|
||||
switch method {
|
||||
case "browser.newContext":
|
||||
opts := SessionOptions{
|
||||
Headless: getBool(params, "headless"),
|
||||
}
|
||||
if viewport, ok := params["viewport"].(map[string]interface{}); ok {
|
||||
opts.Viewport = &Viewport{
|
||||
Width: getInt(viewport, "width"),
|
||||
Height: getInt(viewport, "height"),
|
||||
}
|
||||
}
|
||||
if ua, ok := params["userAgent"].(string); ok {
|
||||
opts.UserAgent = ua
|
||||
}
|
||||
if proxy, ok := params["proxy"].(string); ok {
|
||||
opts.ProxyURL = proxy
|
||||
}
|
||||
if rv, ok := params["recordVideo"].(map[string]interface{}); ok {
|
||||
_ = rv
|
||||
opts.RecordVideo = true
|
||||
}
|
||||
|
||||
session, err := s.browser.NewSession(ctx, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.sessions[session.ID] = &ManagedSession{
|
||||
BrowserSession: session,
|
||||
LastActive: time.Now(),
|
||||
Actions: make([]ActionLog, 0),
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
return map[string]interface{}{
|
||||
"sessionId": session.ID,
|
||||
"contextId": session.ContextID,
|
||||
"pageId": session.PageID,
|
||||
}, nil
|
||||
|
||||
case "browser.closeContext":
|
||||
err := s.browser.CloseSession(ctx, sessionID)
|
||||
s.mu.Lock()
|
||||
delete(s.sessions, sessionID)
|
||||
s.mu.Unlock()
|
||||
return map[string]interface{}{"success": err == nil}, err
|
||||
|
||||
case "page.goto":
|
||||
url, _ := params["url"].(string)
|
||||
opts := NavigateOptions{
|
||||
Timeout: getInt(params, "timeout"),
|
||||
WaitUntil: getString(params, "waitUntil"),
|
||||
}
|
||||
result, err := s.browser.Navigate(ctx, sessionID, url, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"success": result.Success,
|
||||
"url": result.PageURL,
|
||||
"title": result.PageTitle,
|
||||
}, nil
|
||||
|
||||
case "page.click":
|
||||
selector, _ := params["selector"].(string)
|
||||
opts := ClickOptions{
|
||||
Button: getString(params, "button"),
|
||||
ClickCount: getInt(params, "clickCount"),
|
||||
Timeout: getInt(params, "timeout"),
|
||||
Force: getBool(params, "force"),
|
||||
}
|
||||
result, err := s.browser.Click(ctx, sessionID, selector, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"success": result.Success,
|
||||
"screenshot": result.Screenshot,
|
||||
}, nil
|
||||
|
||||
case "page.type":
|
||||
selector, _ := params["selector"].(string)
|
||||
text, _ := params["text"].(string)
|
||||
opts := TypeOptions{
|
||||
Delay: getInt(params, "delay"),
|
||||
Timeout: getInt(params, "timeout"),
|
||||
}
|
||||
_, err := s.browser.Type(ctx, sessionID, selector, text, opts)
|
||||
return map[string]interface{}{"success": err == nil}, err
|
||||
|
||||
case "page.fill":
|
||||
selector, _ := params["selector"].(string)
|
||||
value, _ := params["value"].(string)
|
||||
_, err := s.browser.Fill(ctx, sessionID, selector, value)
|
||||
return map[string]interface{}{"success": err == nil}, err
|
||||
|
||||
case "page.screenshot":
|
||||
opts := ScreenshotOptions{
|
||||
FullPage: getBool(params, "fullPage"),
|
||||
Selector: getString(params, "selector"),
|
||||
Format: getString(params, "type"),
|
||||
Quality: getInt(params, "quality"),
|
||||
}
|
||||
result, err := s.browser.Screenshot(ctx, sessionID, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"data": result.Data,
|
||||
"path": result.Path,
|
||||
}, nil
|
||||
|
||||
case "page.textContent":
|
||||
selector, _ := params["selector"].(string)
|
||||
text, err := s.browser.ExtractText(ctx, sessionID, selector)
|
||||
return map[string]interface{}{"text": text}, err
|
||||
|
||||
case "page.innerHTML":
|
||||
selector, _ := params["selector"].(string)
|
||||
html, err := s.browser.ExtractHTML(ctx, sessionID, selector)
|
||||
return map[string]interface{}{"html": html}, err
|
||||
|
||||
case "page.waitForSelector":
|
||||
selector, _ := params["selector"].(string)
|
||||
opts := WaitOptions{
|
||||
Timeout: getInt(params, "timeout"),
|
||||
State: getString(params, "state"),
|
||||
}
|
||||
err := s.browser.WaitForSelector(ctx, sessionID, selector, opts)
|
||||
return map[string]interface{}{"success": err == nil}, err
|
||||
|
||||
case "page.waitForNavigation":
|
||||
opts := WaitOptions{
|
||||
Timeout: getInt(params, "timeout"),
|
||||
WaitUntil: getString(params, "waitUntil"),
|
||||
}
|
||||
err := s.browser.WaitForNavigation(ctx, sessionID, opts)
|
||||
return map[string]interface{}{"success": err == nil}, err
|
||||
|
||||
case "page.evaluate":
|
||||
expression, _ := params["expression"].(string)
|
||||
result, err := s.browser.Evaluate(ctx, sessionID, expression)
|
||||
return map[string]interface{}{"result": result}, err
|
||||
|
||||
case "page.selectOption":
|
||||
selector, _ := params["selector"].(string)
|
||||
values := getStringArray(params, "values")
|
||||
_, err := s.browser.Select(ctx, sessionID, selector, values)
|
||||
return map[string]interface{}{"success": err == nil}, err
|
||||
|
||||
case "page.info":
|
||||
info, err := s.browser.GetPageInfo(ctx, sessionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"url": info.URL,
|
||||
"title": info.Title,
|
||||
"content": info.Content,
|
||||
}, nil
|
||||
|
||||
case "page.pdf":
|
||||
opts := PDFOptions{
|
||||
Format: getString(params, "format"),
|
||||
Landscape: getBool(params, "landscape"),
|
||||
PrintBackground: getBool(params, "printBackground"),
|
||||
}
|
||||
data, err := s.browser.PDF(ctx, sessionID, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"data": data,
|
||||
}, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown method: %s", method)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BrowserServer) handleNewSession(c *fiber.Ctx) error {
|
||||
var req struct {
|
||||
Headless bool `json:"headless"`
|
||||
Viewport *Viewport `json:"viewport,omitempty"`
|
||||
UserAgent string `json:"userAgent,omitempty"`
|
||||
ProxyURL string `json:"proxyUrl,omitempty"`
|
||||
}
|
||||
|
||||
if err := c.BodyParser(&req); err != nil {
|
||||
req.Headless = true
|
||||
}
|
||||
|
||||
s.mu.RLock()
|
||||
if len(s.sessions) >= s.config.MaxSessions {
|
||||
s.mu.RUnlock()
|
||||
return c.Status(http.StatusTooManyRequests).JSON(fiber.Map{
|
||||
"error": "Maximum sessions limit reached",
|
||||
})
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
session, err := s.browser.NewSession(ctx, SessionOptions{
|
||||
Headless: req.Headless,
|
||||
Viewport: req.Viewport,
|
||||
UserAgent: req.UserAgent,
|
||||
ProxyURL: req.ProxyURL,
|
||||
})
|
||||
if err != nil {
|
||||
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.sessions[session.ID] = &ManagedSession{
|
||||
BrowserSession: session,
|
||||
LastActive: time.Now(),
|
||||
Actions: make([]ActionLog, 0),
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"sessionId": session.ID,
|
||||
"contextId": session.ContextID,
|
||||
"pageId": session.PageID,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *BrowserServer) handleCloseSession(c *fiber.Ctx) error {
|
||||
sessionID := c.Params("id")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err := s.browser.CloseSession(ctx, sessionID)
|
||||
if err != nil {
|
||||
return c.Status(404).JSON(fiber.Map{"error": err.Error()})
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
delete(s.sessions, sessionID)
|
||||
s.mu.Unlock()
|
||||
|
||||
return c.JSON(fiber.Map{"success": true})
|
||||
}
|
||||
|
||||
func (s *BrowserServer) handleGetSession(c *fiber.Ctx) error {
|
||||
sessionID := c.Params("id")
|
||||
|
||||
s.mu.RLock()
|
||||
session, ok := s.sessions[sessionID]
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return c.Status(404).JSON(fiber.Map{"error": "Session not found"})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"sessionId": session.ID,
|
||||
"createdAt": session.CreatedAt,
|
||||
"lastActive": session.LastActive,
|
||||
"screenshots": session.Screenshots,
|
||||
"actions": len(session.Actions),
|
||||
})
|
||||
}
|
||||
|
||||
func (s *BrowserServer) handleListSessions(c *fiber.Ctx) error {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
sessions := make([]map[string]interface{}, 0, len(s.sessions))
|
||||
for _, session := range s.sessions {
|
||||
sessions = append(sessions, map[string]interface{}{
|
||||
"sessionId": session.ID,
|
||||
"createdAt": session.CreatedAt,
|
||||
"lastActive": session.LastActive,
|
||||
"actions": len(session.Actions),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{"sessions": sessions, "count": len(sessions)})
|
||||
}
|
||||
|
||||
func (s *BrowserServer) handleAction(c *fiber.Ctx) error {
|
||||
var req struct {
|
||||
SessionID string `json:"sessionId"`
|
||||
Action string `json:"action"`
|
||||
Selector string `json:"selector,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Value string `json:"value,omitempty"`
|
||||
Screenshot bool `json:"screenshot"`
|
||||
}
|
||||
|
||||
if err := c.BodyParser(&req); err != nil {
|
||||
return c.Status(400).JSON(fiber.Map{"error": "Invalid request"})
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
s.mu.Lock()
|
||||
if session, ok := s.sessions[req.SessionID]; ok {
|
||||
session.LastActive = time.Now()
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
var result *ActionResponse
|
||||
var err error
|
||||
|
||||
switch req.Action {
|
||||
case "navigate":
|
||||
result, err = s.browser.Navigate(ctx, req.SessionID, req.URL, NavigateOptions{Screenshot: req.Screenshot})
|
||||
case "click":
|
||||
result, err = s.browser.Click(ctx, req.SessionID, req.Selector, ClickOptions{Screenshot: req.Screenshot})
|
||||
case "type":
|
||||
result, err = s.browser.Type(ctx, req.SessionID, req.Selector, req.Value, TypeOptions{})
|
||||
case "fill":
|
||||
result, err = s.browser.Fill(ctx, req.SessionID, req.Selector, req.Value)
|
||||
case "screenshot":
|
||||
var screenshot *ScreenshotResult
|
||||
screenshot, err = s.browser.Screenshot(ctx, req.SessionID, ScreenshotOptions{})
|
||||
if err == nil {
|
||||
result = &ActionResponse{Success: true, Screenshot: screenshot.Data}
|
||||
}
|
||||
case "extract":
|
||||
var text string
|
||||
text, err = s.browser.ExtractText(ctx, req.SessionID, req.Selector)
|
||||
result = &ActionResponse{Success: err == nil, Data: text}
|
||||
default:
|
||||
return c.Status(400).JSON(fiber.Map{"error": "Unknown action: " + req.Action})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return c.Status(500).JSON(fiber.Map{"error": err.Error(), "success": false})
|
||||
}
|
||||
|
||||
return c.JSON(result)
|
||||
}
|
||||
|
||||
func (s *BrowserServer) cleanupLoop(ctx context.Context) {
|
||||
ticker := time.NewTicker(s.config.CleanupInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
s.cleanupExpiredSessions()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BrowserServer) cleanupExpiredSessions() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
for sessionID, session := range s.sessions {
|
||||
if now.Sub(session.LastActive) > s.config.SessionTimeout {
|
||||
log.Printf("[BrowserServer] Cleaning up expired session: %s", sessionID)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
s.browser.CloseSession(ctx, sessionID)
|
||||
cancel()
|
||||
delete(s.sessions, sessionID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func errToString(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
func getBool(m map[string]interface{}, key string) bool {
|
||||
if v, ok := m[key].(bool); ok {
|
||||
return v
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getInt(m map[string]interface{}, key string) int {
|
||||
if v, ok := m[key].(float64); ok {
|
||||
return int(v)
|
||||
}
|
||||
if v, ok := m[key].(int); ok {
|
||||
return v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func getStringArray(m map[string]interface{}, key string) []string {
|
||||
if v, ok := m[key].([]interface{}); ok {
|
||||
result := make([]string, len(v))
|
||||
for i, item := range v {
|
||||
result[i], _ = item.(string)
|
||||
}
|
||||
return result
|
||||
}
|
||||
return nil
|
||||
}
|
||||
738
backend/internal/computer/computer.go
Normal file
738
backend/internal/computer/computer.go
Normal file
@@ -0,0 +1,738 @@
|
||||
package computer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gooseek/backend/internal/computer/connectors"
|
||||
"github.com/gooseek/backend/internal/llm"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type ComputerConfig struct {
|
||||
MaxParallelTasks int
|
||||
MaxSubTasks int
|
||||
TaskTimeout time.Duration
|
||||
SubTaskTimeout time.Duration
|
||||
TotalBudget float64
|
||||
EnableSandbox bool
|
||||
EnableScheduling bool
|
||||
EnableBrowser bool
|
||||
SandboxImage string
|
||||
ArtifactStorageURL string
|
||||
BrowserServerURL string
|
||||
CheckpointStorePath string
|
||||
MaxConcurrentTasks int
|
||||
HeartbeatInterval time.Duration
|
||||
CheckpointInterval time.Duration
|
||||
}
|
||||
|
||||
func DefaultConfig() ComputerConfig {
|
||||
return ComputerConfig{
|
||||
MaxParallelTasks: 10,
|
||||
MaxSubTasks: 100,
|
||||
TaskTimeout: 365 * 24 * time.Hour,
|
||||
SubTaskTimeout: 2 * time.Hour,
|
||||
TotalBudget: 100.0,
|
||||
EnableSandbox: true,
|
||||
EnableScheduling: true,
|
||||
EnableBrowser: true,
|
||||
SandboxImage: "gooseek/sandbox:latest",
|
||||
BrowserServerURL: "http://browser-svc:3050",
|
||||
CheckpointStorePath: "/data/checkpoints",
|
||||
MaxConcurrentTasks: 50,
|
||||
HeartbeatInterval: 30 * time.Second,
|
||||
CheckpointInterval: 15 * time.Minute,
|
||||
}
|
||||
}
|
||||
|
||||
func GetDurationConfig(mode DurationMode) (maxDuration, checkpointFreq, heartbeatFreq time.Duration, maxIter int) {
|
||||
cfg, ok := DurationModeConfigs[mode]
|
||||
if !ok {
|
||||
cfg = DurationModeConfigs[DurationMedium]
|
||||
}
|
||||
return cfg.MaxDuration, cfg.CheckpointFreq, cfg.HeartbeatFreq, cfg.MaxIterations
|
||||
}
|
||||
|
||||
type Dependencies struct {
|
||||
Registry *llm.ModelRegistry
|
||||
TaskRepo TaskRepository
|
||||
MemoryRepo MemoryRepository
|
||||
ArtifactRepo ArtifactRepository
|
||||
}
|
||||
|
||||
type TaskRepository interface {
|
||||
Create(ctx context.Context, task *ComputerTask) error
|
||||
Update(ctx context.Context, task *ComputerTask) error
|
||||
GetByID(ctx context.Context, id string) (*ComputerTask, error)
|
||||
GetByUserID(ctx context.Context, userID string, limit, offset int) ([]ComputerTask, error)
|
||||
GetScheduled(ctx context.Context) ([]ComputerTask, error)
|
||||
Delete(ctx context.Context, id string) error
|
||||
}
|
||||
|
||||
type MemoryRepository interface {
|
||||
Store(ctx context.Context, entry *MemoryEntry) error
|
||||
GetByUser(ctx context.Context, userID string, limit int) ([]MemoryEntry, error)
|
||||
GetByTask(ctx context.Context, taskID string) ([]MemoryEntry, error)
|
||||
Search(ctx context.Context, userID, query string, limit int) ([]MemoryEntry, error)
|
||||
Delete(ctx context.Context, id string) error
|
||||
}
|
||||
|
||||
type ArtifactRepository interface {
|
||||
Create(ctx context.Context, artifact *Artifact) error
|
||||
GetByID(ctx context.Context, id string) (*Artifact, error)
|
||||
GetByTaskID(ctx context.Context, taskID string) ([]Artifact, error)
|
||||
Delete(ctx context.Context, id string) error
|
||||
}
|
||||
|
||||
type Computer struct {
|
||||
cfg ComputerConfig
|
||||
planner *Planner
|
||||
router *Router
|
||||
executor *Executor
|
||||
sandbox *SandboxManager
|
||||
memory *MemoryStore
|
||||
scheduler *Scheduler
|
||||
connectors *connectors.ConnectorHub
|
||||
registry *llm.ModelRegistry
|
||||
taskRepo TaskRepository
|
||||
eventBus *EventBus
|
||||
mu sync.RWMutex
|
||||
tasks map[string]*ComputerTask
|
||||
}
|
||||
|
||||
func NewComputer(cfg ComputerConfig, deps Dependencies) *Computer {
|
||||
eventBus := NewEventBus()
|
||||
|
||||
c := &Computer{
|
||||
cfg: cfg,
|
||||
registry: deps.Registry,
|
||||
taskRepo: deps.TaskRepo,
|
||||
eventBus: eventBus,
|
||||
tasks: make(map[string]*ComputerTask),
|
||||
}
|
||||
|
||||
c.planner = NewPlanner(deps.Registry)
|
||||
c.router = NewRouter(deps.Registry)
|
||||
c.executor = NewExecutor(c.router, cfg.MaxParallelTasks)
|
||||
c.memory = NewMemoryStore(deps.MemoryRepo)
|
||||
c.connectors = connectors.NewConnectorHub()
|
||||
|
||||
if cfg.EnableSandbox {
|
||||
c.sandbox = NewSandboxManager(SandboxConfig{
|
||||
Image: cfg.SandboxImage,
|
||||
Timeout: cfg.SubTaskTimeout,
|
||||
})
|
||||
c.executor.SetSandbox(c.sandbox)
|
||||
}
|
||||
|
||||
if cfg.EnableScheduling {
|
||||
c.scheduler = NewScheduler(deps.TaskRepo, c)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Computer) Execute(ctx context.Context, userID, query string, opts ExecuteOptions) (*ComputerTask, error) {
|
||||
if opts.ResumeFromID != "" {
|
||||
return c.resumeFromCheckpoint(ctx, opts.ResumeFromID, opts)
|
||||
}
|
||||
|
||||
durationMode := opts.DurationMode
|
||||
if durationMode == "" {
|
||||
durationMode = DurationMedium
|
||||
}
|
||||
|
||||
maxDuration, _, _, maxIter := GetDurationConfig(durationMode)
|
||||
|
||||
task := &ComputerTask{
|
||||
ID: uuid.New().String(),
|
||||
UserID: userID,
|
||||
Query: query,
|
||||
Status: StatusPending,
|
||||
Memory: make(map[string]interface{}),
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
DurationMode: durationMode,
|
||||
MaxDuration: maxDuration,
|
||||
MaxIterations: maxIter,
|
||||
Priority: opts.Priority,
|
||||
}
|
||||
|
||||
if opts.Priority == "" {
|
||||
task.Priority = PriorityNormal
|
||||
}
|
||||
|
||||
if opts.ResourceLimits != nil {
|
||||
task.ResourceLimits = opts.ResourceLimits
|
||||
}
|
||||
|
||||
if opts.Schedule != nil {
|
||||
task.Schedule = opts.Schedule
|
||||
task.Status = StatusScheduled
|
||||
}
|
||||
|
||||
if opts.Context != nil {
|
||||
task.Memory = opts.Context
|
||||
}
|
||||
|
||||
estimatedEnd := time.Now().Add(maxDuration)
|
||||
task.EstimatedEnd = &estimatedEnd
|
||||
|
||||
if err := c.taskRepo.Create(ctx, task); err != nil {
|
||||
return nil, fmt.Errorf("failed to create task: %w", err)
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
c.tasks[task.ID] = task
|
||||
c.mu.Unlock()
|
||||
|
||||
c.emitEvent(TaskEvent{
|
||||
Type: EventTaskCreated,
|
||||
TaskID: task.ID,
|
||||
Status: task.Status,
|
||||
Message: fmt.Sprintf("Task created (mode: %s, max duration: %v)", durationMode, maxDuration),
|
||||
Timestamp: time.Now(),
|
||||
Data: map[string]interface{}{
|
||||
"durationMode": durationMode,
|
||||
"maxDuration": maxDuration.String(),
|
||||
"maxIterations": maxIter,
|
||||
},
|
||||
})
|
||||
|
||||
if opts.Async {
|
||||
go c.executeTaskWithCheckpoints(context.Background(), task, opts)
|
||||
return task, nil
|
||||
}
|
||||
|
||||
return c.executeTaskWithCheckpoints(ctx, task, opts)
|
||||
}
|
||||
|
||||
func (c *Computer) resumeFromCheckpoint(ctx context.Context, checkpointID string, opts ExecuteOptions) (*ComputerTask, error) {
|
||||
task, err := c.taskRepo.GetByID(ctx, checkpointID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("task not found: %w", err)
|
||||
}
|
||||
|
||||
if task.Checkpoint == nil {
|
||||
return nil, errors.New("no checkpoint found for this task")
|
||||
}
|
||||
|
||||
task.Status = StatusExecuting
|
||||
now := time.Now()
|
||||
task.ResumedAt = &now
|
||||
task.UpdatedAt = now
|
||||
|
||||
c.emitEvent(TaskEvent{
|
||||
Type: EventResumed,
|
||||
TaskID: task.ID,
|
||||
Status: task.Status,
|
||||
Message: fmt.Sprintf("Resumed from checkpoint (wave: %d, subtask: %d)", task.Checkpoint.WaveIndex, task.Checkpoint.SubTaskIndex),
|
||||
Progress: task.Checkpoint.Progress,
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
|
||||
c.mu.Lock()
|
||||
c.tasks[task.ID] = task
|
||||
c.mu.Unlock()
|
||||
|
||||
if opts.Async {
|
||||
go c.executeTaskWithCheckpoints(context.Background(), task, opts)
|
||||
return task, nil
|
||||
}
|
||||
|
||||
return c.executeTaskWithCheckpoints(ctx, task, opts)
|
||||
}
|
||||
|
||||
func (c *Computer) executeTask(ctx context.Context, task *ComputerTask, opts ExecuteOptions) (*ComputerTask, error) {
|
||||
return c.executeTaskWithCheckpoints(ctx, task, opts)
|
||||
}
|
||||
|
||||
func (c *Computer) executeTaskWithCheckpoints(ctx context.Context, task *ComputerTask, opts ExecuteOptions) (*ComputerTask, error) {
|
||||
maxDuration, checkpointFreq, heartbeatFreq, _ := GetDurationConfig(task.DurationMode)
|
||||
|
||||
if opts.Timeout > 0 {
|
||||
maxDuration = time.Duration(opts.Timeout) * time.Second
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, maxDuration)
|
||||
defer cancel()
|
||||
|
||||
budget := c.cfg.TotalBudget
|
||||
if opts.MaxCost > 0 {
|
||||
budget = opts.MaxCost
|
||||
}
|
||||
if task.ResourceLimits != nil && task.ResourceLimits.MaxTotalCost > 0 {
|
||||
budget = task.ResourceLimits.MaxTotalCost
|
||||
}
|
||||
|
||||
startWave := 0
|
||||
if task.Checkpoint != nil {
|
||||
startWave = task.Checkpoint.WaveIndex
|
||||
for k, v := range task.Checkpoint.Memory {
|
||||
task.Memory[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if task.Plan == nil {
|
||||
task.Status = StatusPlanning
|
||||
task.UpdatedAt = time.Now()
|
||||
c.updateTask(ctx, task)
|
||||
|
||||
c.emitEvent(TaskEvent{
|
||||
Type: EventTaskStarted,
|
||||
TaskID: task.ID,
|
||||
Status: StatusPlanning,
|
||||
Message: "Planning task execution",
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
|
||||
userMemory, _ := c.memory.GetUserContext(ctx, task.UserID)
|
||||
memoryContext := make(map[string]interface{})
|
||||
for k, v := range userMemory {
|
||||
memoryContext[k] = v
|
||||
}
|
||||
for k, v := range task.Memory {
|
||||
memoryContext[k] = v
|
||||
}
|
||||
|
||||
plan, err := c.planner.Plan(ctx, task.Query, memoryContext)
|
||||
if err != nil {
|
||||
task.Status = StatusFailed
|
||||
task.Error = fmt.Sprintf("Planning failed: %v", err)
|
||||
task.UpdatedAt = time.Now()
|
||||
c.updateTask(ctx, task)
|
||||
c.emitEvent(TaskEvent{
|
||||
Type: EventTaskFailed,
|
||||
TaskID: task.ID,
|
||||
Status: StatusFailed,
|
||||
Message: task.Error,
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
return task, err
|
||||
}
|
||||
|
||||
task.Plan = plan
|
||||
task.SubTasks = plan.SubTasks
|
||||
}
|
||||
|
||||
task.Status = StatusLongRunning
|
||||
task.UpdatedAt = time.Now()
|
||||
c.updateTask(ctx, task)
|
||||
|
||||
c.emitEvent(TaskEvent{
|
||||
Type: EventTaskProgress,
|
||||
TaskID: task.ID,
|
||||
Status: StatusLongRunning,
|
||||
Progress: 10,
|
||||
Message: fmt.Sprintf("Executing %d subtasks (long-running mode)", len(task.Plan.SubTasks)),
|
||||
Data: map[string]interface{}{
|
||||
"plan": task.Plan,
|
||||
"durationMode": task.DurationMode,
|
||||
"checkpointFreq": checkpointFreq.String(),
|
||||
},
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
|
||||
heartbeatTicker := time.NewTicker(heartbeatFreq)
|
||||
defer heartbeatTicker.Stop()
|
||||
|
||||
checkpointTicker := time.NewTicker(checkpointFreq)
|
||||
defer checkpointTicker.Stop()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-heartbeatTicker.C:
|
||||
now := time.Now()
|
||||
task.HeartbeatAt = &now
|
||||
c.emitEvent(TaskEvent{
|
||||
Type: EventHeartbeat,
|
||||
TaskID: task.ID,
|
||||
Progress: task.Progress,
|
||||
Message: fmt.Sprintf("Heartbeat: %d%% complete, cost: $%.4f", task.Progress, task.TotalCost),
|
||||
Data: map[string]interface{}{
|
||||
"runtime": time.Since(task.CreatedAt).String(),
|
||||
"cost": task.TotalCost,
|
||||
},
|
||||
Timestamp: now,
|
||||
})
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
totalSubTasks := len(task.Plan.ExecutionOrder)
|
||||
for waveIdx := startWave; waveIdx < totalSubTasks; waveIdx++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
c.saveCheckpoint(task, waveIdx, 0, "context_timeout")
|
||||
return task, ctx.Err()
|
||||
case <-checkpointTicker.C:
|
||||
c.saveCheckpoint(task, waveIdx, 0, "periodic")
|
||||
default:
|
||||
}
|
||||
|
||||
if budget > 0 && task.TotalCost >= budget {
|
||||
c.saveCheckpoint(task, waveIdx, 0, "budget_exceeded")
|
||||
task.Status = StatusPaused
|
||||
task.Message = fmt.Sprintf("Paused: budget exceeded ($%.2f / $%.2f)", task.TotalCost, budget)
|
||||
c.updateTask(ctx, task)
|
||||
return task, nil
|
||||
}
|
||||
|
||||
wave := task.Plan.ExecutionOrder[waveIdx]
|
||||
waveTasks := make([]SubTask, 0)
|
||||
for _, subTaskID := range wave {
|
||||
for i := range task.SubTasks {
|
||||
if task.SubTasks[i].ID == subTaskID {
|
||||
waveTasks = append(waveTasks, task.SubTasks[i])
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results, err := c.executor.ExecuteGroup(ctx, waveTasks, budget-task.TotalCost)
|
||||
if err != nil {
|
||||
c.saveCheckpoint(task, waveIdx, 0, "execution_error")
|
||||
task.Status = StatusFailed
|
||||
task.Error = fmt.Sprintf("Execution failed at wave %d: %v", waveIdx, err)
|
||||
task.UpdatedAt = time.Now()
|
||||
c.updateTask(ctx, task)
|
||||
return task, err
|
||||
}
|
||||
|
||||
for _, result := range results {
|
||||
for i := range task.SubTasks {
|
||||
if task.SubTasks[i].ID == result.SubTaskID {
|
||||
task.SubTasks[i].Output = result.Output
|
||||
task.SubTasks[i].Cost = result.Cost
|
||||
task.SubTasks[i].Status = StatusCompleted
|
||||
now := time.Now()
|
||||
task.SubTasks[i].CompletedAt = &now
|
||||
if result.Error != nil {
|
||||
task.SubTasks[i].Status = StatusFailed
|
||||
task.SubTasks[i].Error = result.Error.Error()
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
task.TotalCost += result.Cost
|
||||
task.TotalRuntime = time.Since(task.CreatedAt)
|
||||
|
||||
for _, artifact := range result.Artifacts {
|
||||
task.Artifacts = append(task.Artifacts, artifact)
|
||||
c.emitEvent(TaskEvent{
|
||||
Type: EventArtifact,
|
||||
TaskID: task.ID,
|
||||
SubTaskID: result.SubTaskID,
|
||||
Data: map[string]interface{}{
|
||||
"artifact": artifact,
|
||||
},
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
progress := 10 + int(float64(waveIdx+1)/float64(totalSubTasks)*80)
|
||||
task.Progress = progress
|
||||
task.Iterations = waveIdx + 1
|
||||
task.UpdatedAt = time.Now()
|
||||
c.updateTask(ctx, task)
|
||||
|
||||
c.emitEvent(TaskEvent{
|
||||
Type: EventIteration,
|
||||
TaskID: task.ID,
|
||||
Progress: progress,
|
||||
Message: fmt.Sprintf("Completed wave %d/%d (runtime: %v)", waveIdx+1, totalSubTasks, time.Since(task.CreatedAt).Round(time.Second)),
|
||||
Data: map[string]interface{}{
|
||||
"wave": waveIdx + 1,
|
||||
"total": totalSubTasks,
|
||||
"cost": task.TotalCost,
|
||||
"runtime": time.Since(task.CreatedAt).String(),
|
||||
"artifacts": len(task.Artifacts),
|
||||
},
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
task.Status = StatusCompleted
|
||||
task.Progress = 100
|
||||
now := time.Now()
|
||||
task.CompletedAt = &now
|
||||
task.UpdatedAt = now
|
||||
task.TotalRuntime = time.Since(task.CreatedAt)
|
||||
c.updateTask(ctx, task)
|
||||
|
||||
c.emitEvent(TaskEvent{
|
||||
Type: EventTaskCompleted,
|
||||
TaskID: task.ID,
|
||||
Status: StatusCompleted,
|
||||
Progress: 100,
|
||||
Message: fmt.Sprintf("Task completed (runtime: %v, cost: $%.4f)", task.TotalRuntime.Round(time.Second), task.TotalCost),
|
||||
Data: map[string]interface{}{
|
||||
"artifacts": task.Artifacts,
|
||||
"totalCost": task.TotalCost,
|
||||
"totalRuntime": task.TotalRuntime.String(),
|
||||
"iterations": task.Iterations,
|
||||
},
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
|
||||
c.storeTaskResults(ctx, task)
|
||||
|
||||
return task, nil
|
||||
}
|
||||
|
||||
func (c *Computer) saveCheckpoint(task *ComputerTask, waveIdx, subTaskIdx int, reason string) {
|
||||
checkpoint := Checkpoint{
|
||||
ID: uuid.New().String(),
|
||||
TaskID: task.ID,
|
||||
WaveIndex: waveIdx,
|
||||
SubTaskIndex: subTaskIdx,
|
||||
State: make(map[string]interface{}),
|
||||
Progress: task.Progress,
|
||||
Memory: task.Memory,
|
||||
CreatedAt: time.Now(),
|
||||
RuntimeSoFar: time.Since(task.CreatedAt),
|
||||
CostSoFar: task.TotalCost,
|
||||
Reason: reason,
|
||||
}
|
||||
|
||||
for _, artifact := range task.Artifacts {
|
||||
checkpoint.Artifacts = append(checkpoint.Artifacts, artifact.ID)
|
||||
}
|
||||
|
||||
task.Checkpoint = &checkpoint
|
||||
task.Checkpoints = append(task.Checkpoints, checkpoint)
|
||||
task.UpdatedAt = time.Now()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
c.taskRepo.Update(ctx, task)
|
||||
|
||||
c.emitEvent(TaskEvent{
|
||||
Type: EventCheckpointSaved,
|
||||
TaskID: task.ID,
|
||||
Progress: task.Progress,
|
||||
Message: fmt.Sprintf("Checkpoint saved: %s (wave %d)", reason, waveIdx),
|
||||
Data: map[string]interface{}{
|
||||
"checkpointId": checkpoint.ID,
|
||||
"waveIndex": waveIdx,
|
||||
"subTaskIndex": subTaskIdx,
|
||||
"reason": reason,
|
||||
"runtime": checkpoint.RuntimeSoFar.String(),
|
||||
"cost": checkpoint.CostSoFar,
|
||||
},
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Computer) Pause(ctx context.Context, taskID string) error {
|
||||
c.mu.Lock()
|
||||
task, ok := c.tasks[taskID]
|
||||
if !ok {
|
||||
c.mu.Unlock()
|
||||
var err error
|
||||
task, err = c.taskRepo.GetByID(ctx, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.mu.Lock()
|
||||
}
|
||||
|
||||
if task.Status != StatusExecuting && task.Status != StatusLongRunning {
|
||||
c.mu.Unlock()
|
||||
return errors.New("task is not running")
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
task.Status = StatusPaused
|
||||
task.PausedAt = &now
|
||||
task.UpdatedAt = now
|
||||
c.mu.Unlock()
|
||||
|
||||
c.saveCheckpoint(task, task.Iterations, 0, "user_paused")
|
||||
|
||||
c.emitEvent(TaskEvent{
|
||||
Type: EventPaused,
|
||||
TaskID: taskID,
|
||||
Status: StatusPaused,
|
||||
Progress: task.Progress,
|
||||
Message: "Task paused by user",
|
||||
Timestamp: now,
|
||||
})
|
||||
|
||||
return c.taskRepo.Update(ctx, task)
|
||||
}
|
||||
|
||||
func (c *Computer) Resume(ctx context.Context, taskID string, userInput string) error {
|
||||
c.mu.RLock()
|
||||
task, ok := c.tasks[taskID]
|
||||
c.mu.RUnlock()
|
||||
|
||||
if !ok {
|
||||
var err error
|
||||
task, err = c.taskRepo.GetByID(ctx, taskID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("task not found: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if task.Status != StatusWaiting {
|
||||
return errors.New("task is not waiting for user input")
|
||||
}
|
||||
|
||||
task.Memory["user_input"] = userInput
|
||||
task.Status = StatusExecuting
|
||||
task.UpdatedAt = time.Now()
|
||||
|
||||
go c.executeTask(context.Background(), task, ExecuteOptions{Async: true})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Computer) Cancel(ctx context.Context, taskID string) error {
|
||||
c.mu.Lock()
|
||||
task, ok := c.tasks[taskID]
|
||||
if ok {
|
||||
task.Status = StatusCancelled
|
||||
task.UpdatedAt = time.Now()
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
if !ok {
|
||||
task, err := c.taskRepo.GetByID(ctx, taskID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("task not found: %w", err)
|
||||
}
|
||||
task.Status = StatusCancelled
|
||||
task.UpdatedAt = time.Now()
|
||||
return c.taskRepo.Update(ctx, task)
|
||||
}
|
||||
|
||||
c.emitEvent(TaskEvent{
|
||||
Type: EventTaskFailed,
|
||||
TaskID: taskID,
|
||||
Status: StatusCancelled,
|
||||
Message: "Task cancelled by user",
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
|
||||
return c.taskRepo.Update(ctx, task)
|
||||
}
|
||||
|
||||
func (c *Computer) GetStatus(ctx context.Context, taskID string) (*ComputerTask, error) {
|
||||
c.mu.RLock()
|
||||
task, ok := c.tasks[taskID]
|
||||
c.mu.RUnlock()
|
||||
|
||||
if ok {
|
||||
return task, nil
|
||||
}
|
||||
|
||||
return c.taskRepo.GetByID(ctx, taskID)
|
||||
}
|
||||
|
||||
func (c *Computer) GetUserTasks(ctx context.Context, userID string, limit, offset int) ([]ComputerTask, error) {
|
||||
return c.taskRepo.GetByUserID(ctx, userID, limit, offset)
|
||||
}
|
||||
|
||||
func (c *Computer) Stream(ctx context.Context, taskID string) (<-chan TaskEvent, error) {
|
||||
return c.eventBus.Subscribe(taskID), nil
|
||||
}
|
||||
|
||||
func (c *Computer) updateTask(ctx context.Context, task *ComputerTask) {
|
||||
c.mu.Lock()
|
||||
c.tasks[task.ID] = task
|
||||
c.mu.Unlock()
|
||||
|
||||
_ = c.taskRepo.Update(ctx, task)
|
||||
}
|
||||
|
||||
func (c *Computer) emitEvent(event TaskEvent) {
|
||||
c.eventBus.Publish(event.TaskID, event)
|
||||
}
|
||||
|
||||
func (c *Computer) storeTaskResults(ctx context.Context, task *ComputerTask) {
|
||||
for _, st := range task.SubTasks {
|
||||
if st.Output != nil {
|
||||
outputJSON, _ := json.Marshal(st.Output)
|
||||
entry := &MemoryEntry{
|
||||
ID: uuid.New().String(),
|
||||
UserID: task.UserID,
|
||||
TaskID: task.ID,
|
||||
Key: fmt.Sprintf("subtask_%s_result", st.ID),
|
||||
Value: string(outputJSON),
|
||||
Type: MemoryTypeResult,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
_ = c.memory.Store(ctx, task.UserID, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Computer) StartScheduler(ctx context.Context) {
|
||||
if c.scheduler != nil {
|
||||
c.scheduler.Start(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Computer) StopScheduler() {
|
||||
if c.scheduler != nil {
|
||||
c.scheduler.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
type EventBus struct {
|
||||
subscribers map[string][]chan TaskEvent
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewEventBus() *EventBus {
|
||||
return &EventBus{
|
||||
subscribers: make(map[string][]chan TaskEvent),
|
||||
}
|
||||
}
|
||||
|
||||
func (eb *EventBus) Subscribe(taskID string) <-chan TaskEvent {
|
||||
eb.mu.Lock()
|
||||
defer eb.mu.Unlock()
|
||||
|
||||
ch := make(chan TaskEvent, 100)
|
||||
eb.subscribers[taskID] = append(eb.subscribers[taskID], ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (eb *EventBus) Unsubscribe(taskID string, ch <-chan TaskEvent) {
|
||||
eb.mu.Lock()
|
||||
defer eb.mu.Unlock()
|
||||
|
||||
subs := eb.subscribers[taskID]
|
||||
for i, sub := range subs {
|
||||
if sub == ch {
|
||||
eb.subscribers[taskID] = append(subs[:i], subs[i+1:]...)
|
||||
close(sub)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (eb *EventBus) Publish(taskID string, event TaskEvent) {
|
||||
eb.mu.RLock()
|
||||
subs := eb.subscribers[taskID]
|
||||
eb.mu.RUnlock()
|
||||
|
||||
for _, ch := range subs {
|
||||
select {
|
||||
case ch <- event:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
104
backend/internal/computer/connectors/connector.go
Normal file
104
backend/internal/computer/connectors/connector.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package connectors
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Connector interface {
|
||||
ID() string
|
||||
Name() string
|
||||
Description() string
|
||||
Execute(ctx context.Context, action string, params map[string]interface{}) (interface{}, error)
|
||||
GetActions() []Action
|
||||
Validate(params map[string]interface{}) error
|
||||
}
|
||||
|
||||
type Action struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Schema map[string]interface{} `json:"schema"`
|
||||
Required []string `json:"required"`
|
||||
}
|
||||
|
||||
type ConnectorHub struct {
|
||||
connectors map[string]Connector
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewConnectorHub() *ConnectorHub {
|
||||
return &ConnectorHub{
|
||||
connectors: make(map[string]Connector),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *ConnectorHub) Register(connector Connector) {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
h.connectors[connector.ID()] = connector
|
||||
}
|
||||
|
||||
func (h *ConnectorHub) Unregister(id string) {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
delete(h.connectors, id)
|
||||
}
|
||||
|
||||
func (h *ConnectorHub) Get(id string) (Connector, error) {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
connector, ok := h.connectors[id]
|
||||
if !ok {
|
||||
return nil, errors.New("connector not found: " + id)
|
||||
}
|
||||
return connector, nil
|
||||
}
|
||||
|
||||
func (h *ConnectorHub) List() []Connector {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
result := make([]Connector, 0, len(h.connectors))
|
||||
for _, c := range h.connectors {
|
||||
result = append(result, c)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (h *ConnectorHub) Execute(ctx context.Context, connectorID, action string, params map[string]interface{}) (interface{}, error) {
|
||||
connector, err := h.Get(connectorID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := connector.Validate(params); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return connector.Execute(ctx, action, params)
|
||||
}
|
||||
|
||||
type ConnectorInfo struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Actions []Action `json:"actions"`
|
||||
}
|
||||
|
||||
func (h *ConnectorHub) GetInfo() []ConnectorInfo {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
result := make([]ConnectorInfo, 0, len(h.connectors))
|
||||
for _, c := range h.connectors {
|
||||
result = append(result, ConnectorInfo{
|
||||
ID: c.ID(),
|
||||
Name: c.Name(),
|
||||
Description: c.Description(),
|
||||
Actions: c.GetActions(),
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
||||
215
backend/internal/computer/connectors/email.go
Normal file
215
backend/internal/computer/connectors/email.go
Normal file
@@ -0,0 +1,215 @@
|
||||
package connectors
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/smtp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type EmailConfig struct {
|
||||
SMTPHost string
|
||||
SMTPPort int
|
||||
Username string
|
||||
Password string
|
||||
FromAddress string
|
||||
FromName string
|
||||
UseTLS bool
|
||||
AllowHTML bool
|
||||
}
|
||||
|
||||
type EmailConnector struct {
|
||||
cfg EmailConfig
|
||||
}
|
||||
|
||||
func NewEmailConnector(cfg EmailConfig) *EmailConnector {
|
||||
return &EmailConnector{cfg: cfg}
|
||||
}
|
||||
|
||||
func (e *EmailConnector) ID() string {
|
||||
return "email"
|
||||
}
|
||||
|
||||
func (e *EmailConnector) Name() string {
|
||||
return "Email"
|
||||
}
|
||||
|
||||
func (e *EmailConnector) Description() string {
|
||||
return "Send emails via SMTP"
|
||||
}
|
||||
|
||||
func (e *EmailConnector) GetActions() []Action {
|
||||
return []Action{
|
||||
{
|
||||
Name: "send",
|
||||
Description: "Send an email",
|
||||
Schema: map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"to": map[string]interface{}{"type": "string", "description": "Recipient email address"},
|
||||
"subject": map[string]interface{}{"type": "string", "description": "Email subject"},
|
||||
"body": map[string]interface{}{"type": "string", "description": "Email body"},
|
||||
"html": map[string]interface{}{"type": "boolean", "description": "Whether body is HTML"},
|
||||
"cc": map[string]interface{}{"type": "string", "description": "CC recipients (comma-separated)"},
|
||||
"bcc": map[string]interface{}{"type": "string", "description": "BCC recipients (comma-separated)"},
|
||||
},
|
||||
},
|
||||
Required: []string{"to", "subject", "body"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EmailConnector) Validate(params map[string]interface{}) error {
|
||||
if _, ok := params["to"]; !ok {
|
||||
return errors.New("'to' is required")
|
||||
}
|
||||
if _, ok := params["subject"]; !ok {
|
||||
return errors.New("'subject' is required")
|
||||
}
|
||||
if _, ok := params["body"]; !ok {
|
||||
return errors.New("'body' is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EmailConnector) Execute(ctx context.Context, action string, params map[string]interface{}) (interface{}, error) {
|
||||
switch action {
|
||||
case "send":
|
||||
return e.send(ctx, params)
|
||||
default:
|
||||
return nil, errors.New("unknown action: " + action)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EmailConnector) send(ctx context.Context, params map[string]interface{}) (interface{}, error) {
|
||||
to := params["to"].(string)
|
||||
subject := params["subject"].(string)
|
||||
body := params["body"].(string)
|
||||
|
||||
isHTML := false
|
||||
if html, ok := params["html"].(bool); ok {
|
||||
isHTML = html && e.cfg.AllowHTML
|
||||
}
|
||||
|
||||
var cc, bcc []string
|
||||
if ccStr, ok := params["cc"].(string); ok && ccStr != "" {
|
||||
cc = strings.Split(ccStr, ",")
|
||||
for i := range cc {
|
||||
cc[i] = strings.TrimSpace(cc[i])
|
||||
}
|
||||
}
|
||||
if bccStr, ok := params["bcc"].(string); ok && bccStr != "" {
|
||||
bcc = strings.Split(bccStr, ",")
|
||||
for i := range bcc {
|
||||
bcc[i] = strings.TrimSpace(bcc[i])
|
||||
}
|
||||
}
|
||||
|
||||
from := e.cfg.FromAddress
|
||||
if e.cfg.FromName != "" {
|
||||
from = fmt.Sprintf("%s <%s>", e.cfg.FromName, e.cfg.FromAddress)
|
||||
}
|
||||
|
||||
var msg strings.Builder
|
||||
msg.WriteString(fmt.Sprintf("From: %s\r\n", from))
|
||||
msg.WriteString(fmt.Sprintf("To: %s\r\n", to))
|
||||
if len(cc) > 0 {
|
||||
msg.WriteString(fmt.Sprintf("Cc: %s\r\n", strings.Join(cc, ", ")))
|
||||
}
|
||||
msg.WriteString(fmt.Sprintf("Subject: %s\r\n", subject))
|
||||
msg.WriteString("MIME-Version: 1.0\r\n")
|
||||
|
||||
if isHTML {
|
||||
msg.WriteString("Content-Type: text/html; charset=\"UTF-8\"\r\n")
|
||||
} else {
|
||||
msg.WriteString("Content-Type: text/plain; charset=\"UTF-8\"\r\n")
|
||||
}
|
||||
|
||||
msg.WriteString("\r\n")
|
||||
msg.WriteString(body)
|
||||
|
||||
recipients := []string{to}
|
||||
recipients = append(recipients, cc...)
|
||||
recipients = append(recipients, bcc...)
|
||||
|
||||
addr := fmt.Sprintf("%s:%d", e.cfg.SMTPHost, e.cfg.SMTPPort)
|
||||
|
||||
var auth smtp.Auth
|
||||
if e.cfg.Username != "" && e.cfg.Password != "" {
|
||||
auth = smtp.PlainAuth("", e.cfg.Username, e.cfg.Password, e.cfg.SMTPHost)
|
||||
}
|
||||
|
||||
var err error
|
||||
if e.cfg.UseTLS {
|
||||
err = e.sendWithTLS(addr, auth, e.cfg.FromAddress, recipients, []byte(msg.String()))
|
||||
} else {
|
||||
err = smtp.SendMail(addr, auth, e.cfg.FromAddress, recipients, []byte(msg.String()))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return map[string]interface{}{
|
||||
"success": false,
|
||||
"error": err.Error(),
|
||||
}, err
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"success": true,
|
||||
"to": to,
|
||||
"subject": subject,
|
||||
"recipients": len(recipients),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *EmailConnector) sendWithTLS(addr string, auth smtp.Auth, from string, to []string, msg []byte) error {
|
||||
tlsConfig := &tls.Config{
|
||||
ServerName: e.cfg.SMTPHost,
|
||||
}
|
||||
|
||||
conn, err := tls.Dial("tcp", addr, tlsConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client, err := smtp.NewClient(conn, e.cfg.SMTPHost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
if auth != nil {
|
||||
if err := client.Auth(auth); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.Mail(from); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, recipient := range to {
|
||||
if err := client.Rcpt(recipient); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
w, err := client.Data()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = w.Write(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return client.Quit()
|
||||
}
|
||||
432
backend/internal/computer/connectors/storage.go
Normal file
432
backend/internal/computer/connectors/storage.go
Normal file
@@ -0,0 +1,432 @@
|
||||
package connectors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
)
|
||||
|
||||
type StorageConfig struct {
|
||||
Endpoint string
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
BucketName string
|
||||
UseSSL bool
|
||||
Region string
|
||||
PublicURL string
|
||||
}
|
||||
|
||||
type StorageConnector struct {
|
||||
cfg StorageConfig
|
||||
client *minio.Client
|
||||
}
|
||||
|
||||
func NewStorageConnector(cfg StorageConfig) (*StorageConnector, error) {
|
||||
client, err := minio.New(cfg.Endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(cfg.AccessKeyID, cfg.SecretAccessKey, ""),
|
||||
Secure: cfg.UseSSL,
|
||||
Region: cfg.Region,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create storage client: %w", err)
|
||||
}
|
||||
|
||||
return &StorageConnector{
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *StorageConnector) ID() string {
|
||||
return "storage"
|
||||
}
|
||||
|
||||
func (s *StorageConnector) Name() string {
|
||||
return "Storage"
|
||||
}
|
||||
|
||||
func (s *StorageConnector) Description() string {
|
||||
return "Store and retrieve files from S3-compatible storage"
|
||||
}
|
||||
|
||||
func (s *StorageConnector) GetActions() []Action {
|
||||
return []Action{
|
||||
{
|
||||
Name: "upload",
|
||||
Description: "Upload a file",
|
||||
Schema: map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"path": map[string]interface{}{"type": "string", "description": "Storage path/key"},
|
||||
"content": map[string]interface{}{"type": "string", "description": "File content (base64 or text)"},
|
||||
"content_type": map[string]interface{}{"type": "string", "description": "MIME type"},
|
||||
"public": map[string]interface{}{"type": "boolean", "description": "Make file publicly accessible"},
|
||||
},
|
||||
},
|
||||
Required: []string{"path", "content"},
|
||||
},
|
||||
{
|
||||
Name: "download",
|
||||
Description: "Download a file",
|
||||
Schema: map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"path": map[string]interface{}{"type": "string", "description": "Storage path/key"},
|
||||
},
|
||||
},
|
||||
Required: []string{"path"},
|
||||
},
|
||||
{
|
||||
Name: "delete",
|
||||
Description: "Delete a file",
|
||||
Schema: map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"path": map[string]interface{}{"type": "string", "description": "Storage path/key"},
|
||||
},
|
||||
},
|
||||
Required: []string{"path"},
|
||||
},
|
||||
{
|
||||
Name: "list",
|
||||
Description: "List files in a directory",
|
||||
Schema: map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"prefix": map[string]interface{}{"type": "string", "description": "Path prefix"},
|
||||
"limit": map[string]interface{}{"type": "integer", "description": "Max results"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "get_url",
|
||||
Description: "Get a presigned URL for a file",
|
||||
Schema: map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"path": map[string]interface{}{"type": "string", "description": "Storage path/key"},
|
||||
"expires": map[string]interface{}{"type": "integer", "description": "URL expiry in seconds"},
|
||||
},
|
||||
},
|
||||
Required: []string{"path"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageConnector) Validate(params map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StorageConnector) Execute(ctx context.Context, action string, params map[string]interface{}) (interface{}, error) {
|
||||
switch action {
|
||||
case "upload":
|
||||
return s.upload(ctx, params)
|
||||
case "download":
|
||||
return s.download(ctx, params)
|
||||
case "delete":
|
||||
return s.deleteFile(ctx, params)
|
||||
case "list":
|
||||
return s.list(ctx, params)
|
||||
case "get_url":
|
||||
return s.getURL(ctx, params)
|
||||
default:
|
||||
return nil, errors.New("unknown action: " + action)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageConnector) upload(ctx context.Context, params map[string]interface{}) (interface{}, error) {
|
||||
path := params["path"].(string)
|
||||
content := params["content"].(string)
|
||||
|
||||
contentType := "application/octet-stream"
|
||||
if ct, ok := params["content_type"].(string); ok {
|
||||
contentType = ct
|
||||
}
|
||||
|
||||
if contentType == "" {
|
||||
contentType = s.detectContentType(path)
|
||||
}
|
||||
|
||||
reader := bytes.NewReader([]byte(content))
|
||||
size := int64(len(content))
|
||||
|
||||
info, err := s.client.PutObject(ctx, s.cfg.BucketName, path, reader, size, minio.PutObjectOptions{
|
||||
ContentType: contentType,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("upload failed: %w", err)
|
||||
}
|
||||
|
||||
url := ""
|
||||
if s.cfg.PublicURL != "" {
|
||||
url = fmt.Sprintf("%s/%s/%s", strings.TrimSuffix(s.cfg.PublicURL, "/"), s.cfg.BucketName, path)
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"success": true,
|
||||
"path": path,
|
||||
"size": info.Size,
|
||||
"etag": info.ETag,
|
||||
"url": url,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *StorageConnector) UploadBytes(ctx context.Context, path string, content []byte, contentType string) (string, error) {
|
||||
if contentType == "" {
|
||||
contentType = s.detectContentType(path)
|
||||
}
|
||||
|
||||
reader := bytes.NewReader(content)
|
||||
size := int64(len(content))
|
||||
|
||||
_, err := s.client.PutObject(ctx, s.cfg.BucketName, path, reader, size, minio.PutObjectOptions{
|
||||
ContentType: contentType,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if s.cfg.PublicURL != "" {
|
||||
return fmt.Sprintf("%s/%s/%s", strings.TrimSuffix(s.cfg.PublicURL, "/"), s.cfg.BucketName, path), nil
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func (s *StorageConnector) download(ctx context.Context, params map[string]interface{}) (interface{}, error) {
|
||||
path := params["path"].(string)
|
||||
|
||||
obj, err := s.client.GetObject(ctx, s.cfg.BucketName, path, minio.GetObjectOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("download failed: %w", err)
|
||||
}
|
||||
defer obj.Close()
|
||||
|
||||
content, err := io.ReadAll(obj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read failed: %w", err)
|
||||
}
|
||||
|
||||
stat, _ := obj.Stat()
|
||||
|
||||
return map[string]interface{}{
|
||||
"success": true,
|
||||
"path": path,
|
||||
"content": string(content),
|
||||
"size": len(content),
|
||||
"content_type": stat.ContentType,
|
||||
"modified": stat.LastModified,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *StorageConnector) DownloadBytes(ctx context.Context, path string) ([]byte, error) {
|
||||
obj, err := s.client.GetObject(ctx, s.cfg.BucketName, path, minio.GetObjectOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer obj.Close()
|
||||
|
||||
return io.ReadAll(obj)
|
||||
}
|
||||
|
||||
func (s *StorageConnector) deleteFile(ctx context.Context, params map[string]interface{}) (interface{}, error) {
|
||||
path := params["path"].(string)
|
||||
|
||||
err := s.client.RemoveObject(ctx, s.cfg.BucketName, path, minio.RemoveObjectOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("delete failed: %w", err)
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"success": true,
|
||||
"path": path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *StorageConnector) list(ctx context.Context, params map[string]interface{}) (interface{}, error) {
|
||||
prefix := ""
|
||||
if p, ok := params["prefix"].(string); ok {
|
||||
prefix = p
|
||||
}
|
||||
|
||||
limit := 100
|
||||
if l, ok := params["limit"].(float64); ok {
|
||||
limit = int(l)
|
||||
}
|
||||
|
||||
objects := s.client.ListObjects(ctx, s.cfg.BucketName, minio.ListObjectsOptions{
|
||||
Prefix: prefix,
|
||||
Recursive: true,
|
||||
})
|
||||
|
||||
var files []map[string]interface{}
|
||||
count := 0
|
||||
|
||||
for obj := range objects {
|
||||
if obj.Err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
files = append(files, map[string]interface{}{
|
||||
"path": obj.Key,
|
||||
"size": obj.Size,
|
||||
"modified": obj.LastModified,
|
||||
"etag": obj.ETag,
|
||||
})
|
||||
|
||||
count++
|
||||
if count >= limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"success": true,
|
||||
"files": files,
|
||||
"count": len(files),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *StorageConnector) getURL(ctx context.Context, params map[string]interface{}) (interface{}, error) {
|
||||
path := params["path"].(string)
|
||||
|
||||
expires := 3600
|
||||
if e, ok := params["expires"].(float64); ok {
|
||||
expires = int(e)
|
||||
}
|
||||
|
||||
url, err := s.client.PresignedGetObject(ctx, s.cfg.BucketName, path, time.Duration(expires)*time.Second, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate URL: %w", err)
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"success": true,
|
||||
"url": url.String(),
|
||||
"expires": expires,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *StorageConnector) GetPublicURL(path string) string {
|
||||
if s.cfg.PublicURL != "" {
|
||||
return fmt.Sprintf("%s/%s/%s", strings.TrimSuffix(s.cfg.PublicURL, "/"), s.cfg.BucketName, path)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *StorageConnector) detectContentType(path string) string {
|
||||
ext := strings.ToLower(filepath.Ext(path))
|
||||
|
||||
contentTypes := map[string]string{
|
||||
".html": "text/html",
|
||||
".css": "text/css",
|
||||
".js": "application/javascript",
|
||||
".json": "application/json",
|
||||
".xml": "application/xml",
|
||||
".pdf": "application/pdf",
|
||||
".zip": "application/zip",
|
||||
".png": "image/png",
|
||||
".jpg": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".gif": "image/gif",
|
||||
".svg": "image/svg+xml",
|
||||
".mp4": "video/mp4",
|
||||
".mp3": "audio/mpeg",
|
||||
".txt": "text/plain",
|
||||
".md": "text/markdown",
|
||||
".csv": "text/csv",
|
||||
".py": "text/x-python",
|
||||
".go": "text/x-go",
|
||||
".rs": "text/x-rust",
|
||||
}
|
||||
|
||||
if ct, ok := contentTypes[ext]; ok {
|
||||
return ct
|
||||
}
|
||||
|
||||
return "application/octet-stream"
|
||||
}
|
||||
|
||||
func (s *StorageConnector) EnsureBucket(ctx context.Context) error {
|
||||
exists, err := s.client.BucketExists(ctx, s.cfg.BucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return s.client.MakeBucket(ctx, s.cfg.BucketName, minio.MakeBucketOptions{
|
||||
Region: s.cfg.Region,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewLocalStorageConnector(basePath string) *LocalStorageConnector {
|
||||
return &LocalStorageConnector{basePath: basePath}
|
||||
}
|
||||
|
||||
type LocalStorageConnector struct {
|
||||
basePath string
|
||||
}
|
||||
|
||||
func (l *LocalStorageConnector) ID() string {
|
||||
return "local_storage"
|
||||
}
|
||||
|
||||
func (l *LocalStorageConnector) Name() string {
|
||||
return "Local Storage"
|
||||
}
|
||||
|
||||
func (l *LocalStorageConnector) Description() string {
|
||||
return "Store files on local filesystem"
|
||||
}
|
||||
|
||||
func (l *LocalStorageConnector) GetActions() []Action {
|
||||
return []Action{
|
||||
{Name: "upload", Description: "Upload a file"},
|
||||
{Name: "download", Description: "Download a file"},
|
||||
{Name: "delete", Description: "Delete a file"},
|
||||
{Name: "list", Description: "List files"},
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LocalStorageConnector) Validate(params map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LocalStorageConnector) Execute(ctx context.Context, action string, params map[string]interface{}) (interface{}, error) {
|
||||
switch action {
|
||||
case "upload":
|
||||
path := params["path"].(string)
|
||||
content := params["content"].(string)
|
||||
fullPath := filepath.Join(l.basePath, path)
|
||||
os.MkdirAll(filepath.Dir(fullPath), 0755)
|
||||
err := os.WriteFile(fullPath, []byte(content), 0644)
|
||||
return map[string]interface{}{"success": err == nil, "path": path}, err
|
||||
|
||||
case "download":
|
||||
path := params["path"].(string)
|
||||
fullPath := filepath.Join(l.basePath, path)
|
||||
content, err := os.ReadFile(fullPath)
|
||||
return map[string]interface{}{"success": err == nil, "content": string(content)}, err
|
||||
|
||||
case "delete":
|
||||
path := params["path"].(string)
|
||||
fullPath := filepath.Join(l.basePath, path)
|
||||
err := os.Remove(fullPath)
|
||||
return map[string]interface{}{"success": err == nil}, err
|
||||
|
||||
default:
|
||||
return nil, errors.New("unknown action")
|
||||
}
|
||||
}
|
||||
263
backend/internal/computer/connectors/telegram.go
Normal file
263
backend/internal/computer/connectors/telegram.go
Normal file
@@ -0,0 +1,263 @@
|
||||
package connectors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TelegramConfig struct {
|
||||
BotToken string
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
type TelegramConnector struct {
|
||||
cfg TelegramConfig
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func NewTelegramConnector(cfg TelegramConfig) *TelegramConnector {
|
||||
timeout := cfg.Timeout
|
||||
if timeout == 0 {
|
||||
timeout = 30 * time.Second
|
||||
}
|
||||
|
||||
return &TelegramConnector{
|
||||
cfg: cfg,
|
||||
client: &http.Client{
|
||||
Timeout: timeout,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TelegramConnector) ID() string {
|
||||
return "telegram"
|
||||
}
|
||||
|
||||
func (t *TelegramConnector) Name() string {
|
||||
return "Telegram"
|
||||
}
|
||||
|
||||
func (t *TelegramConnector) Description() string {
|
||||
return "Send messages via Telegram Bot API"
|
||||
}
|
||||
|
||||
func (t *TelegramConnector) GetActions() []Action {
|
||||
return []Action{
|
||||
{
|
||||
Name: "send_message",
|
||||
Description: "Send a text message",
|
||||
Schema: map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"chat_id": map[string]interface{}{"type": "string", "description": "Chat ID or @username"},
|
||||
"text": map[string]interface{}{"type": "string", "description": "Message text"},
|
||||
"parse_mode": map[string]interface{}{"type": "string", "enum": []string{"HTML", "Markdown", "MarkdownV2"}},
|
||||
},
|
||||
},
|
||||
Required: []string{"chat_id", "text"},
|
||||
},
|
||||
{
|
||||
Name: "send_document",
|
||||
Description: "Send a document/file",
|
||||
Schema: map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"chat_id": map[string]interface{}{"type": "string", "description": "Chat ID"},
|
||||
"document": map[string]interface{}{"type": "string", "description": "File path or URL"},
|
||||
"caption": map[string]interface{}{"type": "string", "description": "Document caption"},
|
||||
},
|
||||
},
|
||||
Required: []string{"chat_id", "document"},
|
||||
},
|
||||
{
|
||||
Name: "send_photo",
|
||||
Description: "Send a photo",
|
||||
Schema: map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"chat_id": map[string]interface{}{"type": "string", "description": "Chat ID"},
|
||||
"photo": map[string]interface{}{"type": "string", "description": "Photo URL or file_id"},
|
||||
"caption": map[string]interface{}{"type": "string", "description": "Photo caption"},
|
||||
},
|
||||
},
|
||||
Required: []string{"chat_id", "photo"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TelegramConnector) Validate(params map[string]interface{}) error {
|
||||
if _, ok := params["chat_id"]; !ok {
|
||||
return errors.New("'chat_id' is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TelegramConnector) Execute(ctx context.Context, action string, params map[string]interface{}) (interface{}, error) {
|
||||
switch action {
|
||||
case "send_message":
|
||||
return t.sendMessage(ctx, params)
|
||||
case "send_document":
|
||||
return t.sendDocument(ctx, params)
|
||||
case "send_photo":
|
||||
return t.sendPhoto(ctx, params)
|
||||
default:
|
||||
return nil, errors.New("unknown action: " + action)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TelegramConnector) sendMessage(ctx context.Context, params map[string]interface{}) (interface{}, error) {
|
||||
chatID := params["chat_id"].(string)
|
||||
text := params["text"].(string)
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"chat_id": chatID,
|
||||
"text": text,
|
||||
}
|
||||
|
||||
if parseMode, ok := params["parse_mode"].(string); ok {
|
||||
payload["parse_mode"] = parseMode
|
||||
}
|
||||
|
||||
return t.apiCall(ctx, "sendMessage", payload)
|
||||
}
|
||||
|
||||
func (t *TelegramConnector) sendDocument(ctx context.Context, params map[string]interface{}) (interface{}, error) {
|
||||
chatID := params["chat_id"].(string)
|
||||
document := params["document"].(string)
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"chat_id": chatID,
|
||||
"document": document,
|
||||
}
|
||||
|
||||
if caption, ok := params["caption"].(string); ok {
|
||||
payload["caption"] = caption
|
||||
}
|
||||
|
||||
return t.apiCall(ctx, "sendDocument", payload)
|
||||
}
|
||||
|
||||
func (t *TelegramConnector) sendPhoto(ctx context.Context, params map[string]interface{}) (interface{}, error) {
|
||||
chatID := params["chat_id"].(string)
|
||||
photo := params["photo"].(string)
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"chat_id": chatID,
|
||||
"photo": photo,
|
||||
}
|
||||
|
||||
if caption, ok := params["caption"].(string); ok {
|
||||
payload["caption"] = caption
|
||||
}
|
||||
|
||||
return t.apiCall(ctx, "sendPhoto", payload)
|
||||
}
|
||||
|
||||
func (t *TelegramConnector) apiCall(ctx context.Context, method string, payload map[string]interface{}) (interface{}, error) {
|
||||
url := fmt.Sprintf("https://api.telegram.org/bot%s/%s", t.cfg.BotToken, method)
|
||||
|
||||
body, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := t.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := json.Unmarshal(respBody, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ok, exists := result["ok"].(bool); exists && !ok {
|
||||
desc := "unknown error"
|
||||
if d, exists := result["description"].(string); exists {
|
||||
desc = d
|
||||
}
|
||||
return result, errors.New("Telegram API error: " + desc)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (t *TelegramConnector) SendFileFromBytes(ctx context.Context, chatID string, filename string, content []byte, caption string) (interface{}, error) {
|
||||
url := fmt.Sprintf("https://api.telegram.org/bot%s/sendDocument", t.cfg.BotToken)
|
||||
|
||||
var b bytes.Buffer
|
||||
w := multipart.NewWriter(&b)
|
||||
|
||||
w.WriteField("chat_id", chatID)
|
||||
|
||||
if caption != "" {
|
||||
w.WriteField("caption", caption)
|
||||
}
|
||||
|
||||
fw, err := w.CreateFormFile("document", filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fw.Write(content)
|
||||
|
||||
w.Close()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, &b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", w.FormDataContentType())
|
||||
|
||||
resp, err := t.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := json.Unmarshal(respBody, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (t *TelegramConnector) GetChatID(chatIDOrUsername interface{}) string {
|
||||
switch v := chatIDOrUsername.(type) {
|
||||
case string:
|
||||
return v
|
||||
case int:
|
||||
return strconv.Itoa(v)
|
||||
case int64:
|
||||
return strconv.FormatInt(v, 10)
|
||||
case float64:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
default:
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
}
|
||||
275
backend/internal/computer/connectors/webhook.go
Normal file
275
backend/internal/computer/connectors/webhook.go
Normal file
@@ -0,0 +1,275 @@
|
||||
package connectors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type WebhookConfig struct {
|
||||
Timeout time.Duration
|
||||
MaxRetries int
|
||||
RetryDelay time.Duration
|
||||
DefaultSecret string
|
||||
}
|
||||
|
||||
type WebhookConnector struct {
|
||||
cfg WebhookConfig
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func NewWebhookConnector(cfg WebhookConfig) *WebhookConnector {
|
||||
timeout := cfg.Timeout
|
||||
if timeout == 0 {
|
||||
timeout = 30 * time.Second
|
||||
}
|
||||
if cfg.MaxRetries == 0 {
|
||||
cfg.MaxRetries = 3
|
||||
}
|
||||
if cfg.RetryDelay == 0 {
|
||||
cfg.RetryDelay = time.Second
|
||||
}
|
||||
|
||||
return &WebhookConnector{
|
||||
cfg: cfg,
|
||||
client: &http.Client{
|
||||
Timeout: timeout,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WebhookConnector) ID() string {
|
||||
return "webhook"
|
||||
}
|
||||
|
||||
func (w *WebhookConnector) Name() string {
|
||||
return "Webhook"
|
||||
}
|
||||
|
||||
func (w *WebhookConnector) Description() string {
|
||||
return "Send HTTP webhooks to external services"
|
||||
}
|
||||
|
||||
func (w *WebhookConnector) GetActions() []Action {
|
||||
return []Action{
|
||||
{
|
||||
Name: "post",
|
||||
Description: "Send POST request",
|
||||
Schema: map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"url": map[string]interface{}{"type": "string", "description": "Webhook URL"},
|
||||
"body": map[string]interface{}{"type": "object", "description": "Request body (JSON)"},
|
||||
"headers": map[string]interface{}{"type": "object", "description": "Custom headers"},
|
||||
"secret": map[string]interface{}{"type": "string", "description": "HMAC secret for signing"},
|
||||
},
|
||||
},
|
||||
Required: []string{"url"},
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Description: "Send GET request",
|
||||
Schema: map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"url": map[string]interface{}{"type": "string", "description": "Request URL"},
|
||||
"params": map[string]interface{}{"type": "object", "description": "Query parameters"},
|
||||
"headers": map[string]interface{}{"type": "object", "description": "Custom headers"},
|
||||
},
|
||||
},
|
||||
Required: []string{"url"},
|
||||
},
|
||||
{
|
||||
Name: "put",
|
||||
Description: "Send PUT request",
|
||||
Schema: map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"url": map[string]interface{}{"type": "string", "description": "Request URL"},
|
||||
"body": map[string]interface{}{"type": "object", "description": "Request body (JSON)"},
|
||||
"headers": map[string]interface{}{"type": "object", "description": "Custom headers"},
|
||||
},
|
||||
},
|
||||
Required: []string{"url"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WebhookConnector) Validate(params map[string]interface{}) error {
|
||||
urlStr, ok := params["url"].(string)
|
||||
if !ok {
|
||||
return errors.New("'url' is required")
|
||||
}
|
||||
|
||||
parsed, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid URL: %w", err)
|
||||
}
|
||||
|
||||
if parsed.Scheme != "http" && parsed.Scheme != "https" {
|
||||
return errors.New("URL must use http or https scheme")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WebhookConnector) Execute(ctx context.Context, action string, params map[string]interface{}) (interface{}, error) {
|
||||
switch action {
|
||||
case "post":
|
||||
return w.doRequest(ctx, "POST", params)
|
||||
case "get":
|
||||
return w.doRequest(ctx, "GET", params)
|
||||
case "put":
|
||||
return w.doRequest(ctx, "PUT", params)
|
||||
case "delete":
|
||||
return w.doRequest(ctx, "DELETE", params)
|
||||
case "patch":
|
||||
return w.doRequest(ctx, "PATCH", params)
|
||||
default:
|
||||
return nil, errors.New("unknown action: " + action)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WebhookConnector) doRequest(ctx context.Context, method string, params map[string]interface{}) (interface{}, error) {
|
||||
urlStr := params["url"].(string)
|
||||
|
||||
if method == "GET" {
|
||||
if queryParams, ok := params["params"].(map[string]interface{}); ok {
|
||||
parsedURL, _ := url.Parse(urlStr)
|
||||
q := parsedURL.Query()
|
||||
for k, v := range queryParams {
|
||||
q.Set(k, fmt.Sprintf("%v", v))
|
||||
}
|
||||
parsedURL.RawQuery = q.Encode()
|
||||
urlStr = parsedURL.String()
|
||||
}
|
||||
}
|
||||
|
||||
var bodyReader io.Reader
|
||||
var bodyBytes []byte
|
||||
|
||||
if body, ok := params["body"]; ok && method != "GET" {
|
||||
var err error
|
||||
bodyBytes, err = json.Marshal(body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal body: %w", err)
|
||||
}
|
||||
bodyReader = bytes.NewReader(bodyBytes)
|
||||
}
|
||||
|
||||
var lastErr error
|
||||
for attempt := 0; attempt <= w.cfg.MaxRetries; attempt++ {
|
||||
if attempt > 0 {
|
||||
time.Sleep(w.cfg.RetryDelay * time.Duration(attempt))
|
||||
if bodyBytes != nil {
|
||||
bodyReader = bytes.NewReader(bodyBytes)
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, method, urlStr, bodyReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", "GooSeek-Computer/1.0")
|
||||
|
||||
if headers, ok := params["headers"].(map[string]interface{}); ok {
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, fmt.Sprintf("%v", v))
|
||||
}
|
||||
}
|
||||
|
||||
if bodyBytes != nil {
|
||||
secret := w.cfg.DefaultSecret
|
||||
if s, ok := params["secret"].(string); ok {
|
||||
secret = s
|
||||
}
|
||||
if secret != "" {
|
||||
signature := w.signPayload(bodyBytes, secret)
|
||||
req.Header.Set("X-Signature-256", "sha256="+signature)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := w.client.Do(req)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
result := map[string]interface{}{
|
||||
"status_code": resp.StatusCode,
|
||||
"headers": w.headersToMap(resp.Header),
|
||||
}
|
||||
|
||||
var jsonBody interface{}
|
||||
if err := json.Unmarshal(respBody, &jsonBody); err == nil {
|
||||
result["body"] = jsonBody
|
||||
} else {
|
||||
result["body"] = string(respBody)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
|
||||
result["success"] = true
|
||||
return result, nil
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 500 {
|
||||
lastErr = fmt.Errorf("server error: %d", resp.StatusCode)
|
||||
continue
|
||||
}
|
||||
|
||||
result["success"] = false
|
||||
return result, nil
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"success": false,
|
||||
"error": lastErr.Error(),
|
||||
}, lastErr
|
||||
}
|
||||
|
||||
func (w *WebhookConnector) signPayload(payload []byte, secret string) string {
|
||||
mac := hmac.New(sha256.New, []byte(secret))
|
||||
mac.Write(payload)
|
||||
return hex.EncodeToString(mac.Sum(nil))
|
||||
}
|
||||
|
||||
func (w *WebhookConnector) headersToMap(headers http.Header) map[string]string {
|
||||
result := make(map[string]string)
|
||||
for k, v := range headers {
|
||||
result[k] = strings.Join(v, ", ")
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (w *WebhookConnector) PostJSON(ctx context.Context, webhookURL string, data interface{}) (interface{}, error) {
|
||||
return w.Execute(ctx, "post", map[string]interface{}{
|
||||
"url": webhookURL,
|
||||
"body": data,
|
||||
})
|
||||
}
|
||||
|
||||
func (w *WebhookConnector) GetJSON(ctx context.Context, webhookURL string, params map[string]interface{}) (interface{}, error) {
|
||||
return w.Execute(ctx, "get", map[string]interface{}{
|
||||
"url": webhookURL,
|
||||
"params": params,
|
||||
})
|
||||
}
|
||||
574
backend/internal/computer/executor.go
Normal file
574
backend/internal/computer/executor.go
Normal file
@@ -0,0 +1,574 @@
|
||||
package computer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gooseek/backend/internal/llm"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type Executor struct {
|
||||
router *Router
|
||||
sandbox *SandboxManager
|
||||
maxWorkers int
|
||||
}
|
||||
|
||||
func NewExecutor(router *Router, maxWorkers int) *Executor {
|
||||
if maxWorkers <= 0 {
|
||||
maxWorkers = 5
|
||||
}
|
||||
return &Executor{
|
||||
router: router,
|
||||
maxWorkers: maxWorkers,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Executor) SetSandbox(sandbox *SandboxManager) {
|
||||
e.sandbox = sandbox
|
||||
}
|
||||
|
||||
func (e *Executor) ExecuteGroup(ctx context.Context, tasks []SubTask, budget float64) ([]ExecutionResult, error) {
|
||||
results := make([]ExecutionResult, len(tasks))
|
||||
var mu sync.Mutex
|
||||
|
||||
perTaskBudget := budget / float64(len(tasks))
|
||||
|
||||
g, gctx := errgroup.WithContext(ctx)
|
||||
g.SetLimit(e.maxWorkers)
|
||||
|
||||
for i, task := range tasks {
|
||||
i, task := i, task
|
||||
g.Go(func() error {
|
||||
result, err := e.ExecuteTask(gctx, &task, perTaskBudget)
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
results[i] = ExecutionResult{
|
||||
TaskID: task.ID,
|
||||
SubTaskID: task.ID,
|
||||
Error: err,
|
||||
}
|
||||
} else {
|
||||
results[i] = *result
|
||||
}
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return results, err
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (e *Executor) ExecuteTask(ctx context.Context, task *SubTask, budget float64) (*ExecutionResult, error) {
|
||||
startTime := time.Now()
|
||||
|
||||
client, spec, err := e.router.Route(task, budget)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("routing failed: %w", err)
|
||||
}
|
||||
|
||||
task.ModelID = spec.ID
|
||||
now := time.Now()
|
||||
task.StartedAt = &now
|
||||
|
||||
var result *ExecutionResult
|
||||
|
||||
switch task.Type {
|
||||
case TaskResearch:
|
||||
result, err = e.executeResearch(ctx, client, task)
|
||||
case TaskCode:
|
||||
result, err = e.executeCode(ctx, client, task)
|
||||
case TaskAnalysis:
|
||||
result, err = e.executeAnalysis(ctx, client, task)
|
||||
case TaskDesign:
|
||||
result, err = e.executeDesign(ctx, client, task)
|
||||
case TaskDeploy:
|
||||
result, err = e.executeDeploy(ctx, client, task)
|
||||
case TaskReport:
|
||||
result, err = e.executeReport(ctx, client, task)
|
||||
case TaskCommunicate:
|
||||
result, err = e.executeCommunicate(ctx, client, task)
|
||||
case TaskTransform:
|
||||
result, err = e.executeTransform(ctx, client, task)
|
||||
case TaskValidate:
|
||||
result, err = e.executeValidate(ctx, client, task)
|
||||
default:
|
||||
result, err = e.executeGeneric(ctx, client, task)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result.Duration = time.Since(startTime)
|
||||
result.Cost = e.router.EstimateCost(task, 1000, 500)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (e *Executor) executeResearch(ctx context.Context, client llm.Client, task *SubTask) (*ExecutionResult, error) {
|
||||
prompt := fmt.Sprintf(`You are a research assistant. Complete this research task:
|
||||
|
||||
Task: %s
|
||||
|
||||
Additional context: %v
|
||||
|
||||
Provide a comprehensive research result with:
|
||||
1. Key findings
|
||||
2. Sources/references
|
||||
3. Summary
|
||||
|
||||
Respond in JSON:
|
||||
{
|
||||
"findings": ["finding 1", "finding 2"],
|
||||
"sources": ["source 1", "source 2"],
|
||||
"summary": "...",
|
||||
"data": {}
|
||||
}`, task.Description, task.Input)
|
||||
|
||||
response, err := client.GenerateText(ctx, llm.StreamRequest{
|
||||
Messages: []llm.Message{{Role: llm.RoleUser, Content: prompt}},
|
||||
Options: llm.StreamOptions{MaxTokens: 4096},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output := parseJSONOutput(response)
|
||||
|
||||
return &ExecutionResult{
|
||||
TaskID: task.ID,
|
||||
SubTaskID: task.ID,
|
||||
Output: output,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *Executor) executeCode(ctx context.Context, client llm.Client, task *SubTask) (*ExecutionResult, error) {
|
||||
inputContext := ""
|
||||
if task.Input != nil {
|
||||
inputJSON, _ := json.Marshal(task.Input)
|
||||
inputContext = fmt.Sprintf("\n\nContext from previous tasks:\n%s", string(inputJSON))
|
||||
}
|
||||
|
||||
prompt := fmt.Sprintf(`You are an expert programmer. Complete this coding task:
|
||||
|
||||
Task: %s%s
|
||||
|
||||
Requirements:
|
||||
1. Write clean, production-ready code
|
||||
2. Include error handling
|
||||
3. Add necessary imports
|
||||
4. Follow best practices
|
||||
|
||||
Respond in JSON:
|
||||
{
|
||||
"language": "python",
|
||||
"code": "...",
|
||||
"filename": "main.py",
|
||||
"dependencies": ["package1", "package2"],
|
||||
"explanation": "..."
|
||||
}`, task.Description, inputContext)
|
||||
|
||||
response, err := client.GenerateText(ctx, llm.StreamRequest{
|
||||
Messages: []llm.Message{{Role: llm.RoleUser, Content: prompt}},
|
||||
Options: llm.StreamOptions{MaxTokens: 8192},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output := parseJSONOutput(response)
|
||||
|
||||
var artifacts []Artifact
|
||||
if code, ok := output["code"].(string); ok {
|
||||
filename := "main.py"
|
||||
if fn, ok := output["filename"].(string); ok {
|
||||
filename = fn
|
||||
}
|
||||
artifacts = append(artifacts, Artifact{
|
||||
ID: uuid.New().String(),
|
||||
TaskID: task.ID,
|
||||
Type: ArtifactTypeCode,
|
||||
Name: filename,
|
||||
Content: []byte(code),
|
||||
Size: int64(len(code)),
|
||||
CreatedAt: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
return &ExecutionResult{
|
||||
TaskID: task.ID,
|
||||
SubTaskID: task.ID,
|
||||
Output: output,
|
||||
Artifacts: artifacts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *Executor) executeAnalysis(ctx context.Context, client llm.Client, task *SubTask) (*ExecutionResult, error) {
|
||||
inputJSON, _ := json.Marshal(task.Input)
|
||||
|
||||
prompt := fmt.Sprintf(`You are a data analyst. Analyze this data/information:
|
||||
|
||||
Task: %s
|
||||
|
||||
Input data:
|
||||
%s
|
||||
|
||||
Provide:
|
||||
1. Key insights
|
||||
2. Patterns observed
|
||||
3. Recommendations
|
||||
4. Visualizations needed (describe)
|
||||
|
||||
Respond in JSON:
|
||||
{
|
||||
"insights": ["insight 1", "insight 2"],
|
||||
"patterns": ["pattern 1"],
|
||||
"recommendations": ["rec 1"],
|
||||
"visualizations": ["chart type 1"],
|
||||
"summary": "..."
|
||||
}`, task.Description, string(inputJSON))
|
||||
|
||||
response, err := client.GenerateText(ctx, llm.StreamRequest{
|
||||
Messages: []llm.Message{{Role: llm.RoleUser, Content: prompt}},
|
||||
Options: llm.StreamOptions{MaxTokens: 4096},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output := parseJSONOutput(response)
|
||||
|
||||
return &ExecutionResult{
|
||||
TaskID: task.ID,
|
||||
SubTaskID: task.ID,
|
||||
Output: output,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *Executor) executeDesign(ctx context.Context, client llm.Client, task *SubTask) (*ExecutionResult, error) {
|
||||
inputJSON, _ := json.Marshal(task.Input)
|
||||
|
||||
prompt := fmt.Sprintf(`You are a software architect. Design a solution:
|
||||
|
||||
Task: %s
|
||||
|
||||
Context:
|
||||
%s
|
||||
|
||||
Provide:
|
||||
1. Architecture overview
|
||||
2. Components and their responsibilities
|
||||
3. Data flow
|
||||
4. Technology recommendations
|
||||
5. Implementation plan
|
||||
|
||||
Respond in JSON:
|
||||
{
|
||||
"architecture": "...",
|
||||
"components": [{"name": "...", "responsibility": "..."}],
|
||||
"dataFlow": "...",
|
||||
"technologies": ["tech1", "tech2"],
|
||||
"implementationSteps": ["step1", "step2"],
|
||||
"diagram": "mermaid diagram code"
|
||||
}`, task.Description, string(inputJSON))
|
||||
|
||||
response, err := client.GenerateText(ctx, llm.StreamRequest{
|
||||
Messages: []llm.Message{{Role: llm.RoleUser, Content: prompt}},
|
||||
Options: llm.StreamOptions{MaxTokens: 4096},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output := parseJSONOutput(response)
|
||||
|
||||
return &ExecutionResult{
|
||||
TaskID: task.ID,
|
||||
SubTaskID: task.ID,
|
||||
Output: output,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *Executor) executeDeploy(ctx context.Context, client llm.Client, task *SubTask) (*ExecutionResult, error) {
|
||||
if e.sandbox == nil {
|
||||
return e.executeGeneric(ctx, client, task)
|
||||
}
|
||||
|
||||
var code string
|
||||
if task.Input != nil {
|
||||
if c, ok := task.Input["code"].(string); ok {
|
||||
code = c
|
||||
}
|
||||
}
|
||||
|
||||
if code == "" {
|
||||
return e.executeGeneric(ctx, client, task)
|
||||
}
|
||||
|
||||
sandbox, err := e.sandbox.Create(ctx, task.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create sandbox: %w", err)
|
||||
}
|
||||
defer e.sandbox.Destroy(ctx, sandbox)
|
||||
|
||||
result, err := e.sandbox.Execute(ctx, sandbox, code, "python")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sandbox execution failed: %w", err)
|
||||
}
|
||||
|
||||
output := map[string]interface{}{
|
||||
"stdout": result.Stdout,
|
||||
"stderr": result.Stderr,
|
||||
"exitCode": result.ExitCode,
|
||||
"duration": result.Duration.String(),
|
||||
}
|
||||
|
||||
var artifacts []Artifact
|
||||
for name, content := range result.Files {
|
||||
artifacts = append(artifacts, Artifact{
|
||||
ID: uuid.New().String(),
|
||||
TaskID: task.ID,
|
||||
Type: ArtifactTypeFile,
|
||||
Name: name,
|
||||
Content: content,
|
||||
Size: int64(len(content)),
|
||||
CreatedAt: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
return &ExecutionResult{
|
||||
TaskID: task.ID,
|
||||
SubTaskID: task.ID,
|
||||
Output: output,
|
||||
Artifacts: artifacts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *Executor) executeReport(ctx context.Context, client llm.Client, task *SubTask) (*ExecutionResult, error) {
|
||||
inputJSON, _ := json.Marshal(task.Input)
|
||||
|
||||
prompt := fmt.Sprintf(`You are a report writer. Generate a comprehensive report:
|
||||
|
||||
Task: %s
|
||||
|
||||
Data/Context:
|
||||
%s
|
||||
|
||||
Create a well-structured report with:
|
||||
1. Executive Summary
|
||||
2. Key Findings
|
||||
3. Detailed Analysis
|
||||
4. Conclusions
|
||||
5. Recommendations
|
||||
|
||||
Use markdown formatting.`, task.Description, string(inputJSON))
|
||||
|
||||
response, err := client.GenerateText(ctx, llm.StreamRequest{
|
||||
Messages: []llm.Message{{Role: llm.RoleUser, Content: prompt}},
|
||||
Options: llm.StreamOptions{MaxTokens: 8192},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output := map[string]interface{}{
|
||||
"report": response,
|
||||
"format": "markdown",
|
||||
"wordCount": len(strings.Fields(response)),
|
||||
}
|
||||
|
||||
artifacts := []Artifact{
|
||||
{
|
||||
ID: uuid.New().String(),
|
||||
TaskID: task.ID,
|
||||
Type: ArtifactTypeReport,
|
||||
Name: "report.md",
|
||||
Content: []byte(response),
|
||||
MimeType: "text/markdown",
|
||||
Size: int64(len(response)),
|
||||
CreatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
return &ExecutionResult{
|
||||
TaskID: task.ID,
|
||||
SubTaskID: task.ID,
|
||||
Output: output,
|
||||
Artifacts: artifacts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *Executor) executeCommunicate(ctx context.Context, client llm.Client, task *SubTask) (*ExecutionResult, error) {
|
||||
inputJSON, _ := json.Marshal(task.Input)
|
||||
|
||||
prompt := fmt.Sprintf(`Generate a message/notification:
|
||||
|
||||
Task: %s
|
||||
|
||||
Context:
|
||||
%s
|
||||
|
||||
Create an appropriate message. Respond in JSON:
|
||||
{
|
||||
"subject": "...",
|
||||
"body": "...",
|
||||
"format": "text|html",
|
||||
"priority": "low|normal|high"
|
||||
}`, task.Description, string(inputJSON))
|
||||
|
||||
response, err := client.GenerateText(ctx, llm.StreamRequest{
|
||||
Messages: []llm.Message{{Role: llm.RoleUser, Content: prompt}},
|
||||
Options: llm.StreamOptions{MaxTokens: 2048},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output := parseJSONOutput(response)
|
||||
output["status"] = "prepared"
|
||||
|
||||
return &ExecutionResult{
|
||||
TaskID: task.ID,
|
||||
SubTaskID: task.ID,
|
||||
Output: output,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *Executor) executeTransform(ctx context.Context, client llm.Client, task *SubTask) (*ExecutionResult, error) {
|
||||
inputJSON, _ := json.Marshal(task.Input)
|
||||
|
||||
prompt := fmt.Sprintf(`Transform data as requested:
|
||||
|
||||
Task: %s
|
||||
|
||||
Input data:
|
||||
%s
|
||||
|
||||
Perform the transformation and return the result in JSON:
|
||||
{
|
||||
"transformed": ...,
|
||||
"format": "...",
|
||||
"changes": ["change 1", "change 2"]
|
||||
}`, task.Description, string(inputJSON))
|
||||
|
||||
response, err := client.GenerateText(ctx, llm.StreamRequest{
|
||||
Messages: []llm.Message{{Role: llm.RoleUser, Content: prompt}},
|
||||
Options: llm.StreamOptions{MaxTokens: 4096},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output := parseJSONOutput(response)
|
||||
|
||||
return &ExecutionResult{
|
||||
TaskID: task.ID,
|
||||
SubTaskID: task.ID,
|
||||
Output: output,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *Executor) executeValidate(ctx context.Context, client llm.Client, task *SubTask) (*ExecutionResult, error) {
|
||||
inputJSON, _ := json.Marshal(task.Input)
|
||||
|
||||
prompt := fmt.Sprintf(`Validate the following:
|
||||
|
||||
Task: %s
|
||||
|
||||
Data to validate:
|
||||
%s
|
||||
|
||||
Check for:
|
||||
1. Correctness
|
||||
2. Completeness
|
||||
3. Consistency
|
||||
4. Quality
|
||||
|
||||
Respond in JSON:
|
||||
{
|
||||
"valid": true|false,
|
||||
"score": 0-100,
|
||||
"issues": ["issue 1", "issue 2"],
|
||||
"suggestions": ["suggestion 1"],
|
||||
"summary": "..."
|
||||
}`, task.Description, string(inputJSON))
|
||||
|
||||
response, err := client.GenerateText(ctx, llm.StreamRequest{
|
||||
Messages: []llm.Message{{Role: llm.RoleUser, Content: prompt}},
|
||||
Options: llm.StreamOptions{MaxTokens: 2048},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output := parseJSONOutput(response)
|
||||
|
||||
return &ExecutionResult{
|
||||
TaskID: task.ID,
|
||||
SubTaskID: task.ID,
|
||||
Output: output,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *Executor) executeGeneric(ctx context.Context, client llm.Client, task *SubTask) (*ExecutionResult, error) {
|
||||
inputJSON, _ := json.Marshal(task.Input)
|
||||
|
||||
prompt := fmt.Sprintf(`Complete this task:
|
||||
|
||||
Task type: %s
|
||||
Description: %s
|
||||
|
||||
Context:
|
||||
%s
|
||||
|
||||
Provide a comprehensive result in JSON format.`, task.Type, task.Description, string(inputJSON))
|
||||
|
||||
response, err := client.GenerateText(ctx, llm.StreamRequest{
|
||||
Messages: []llm.Message{{Role: llm.RoleUser, Content: prompt}},
|
||||
Options: llm.StreamOptions{MaxTokens: 4096},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output := parseJSONOutput(response)
|
||||
if len(output) == 0 {
|
||||
output = map[string]interface{}{
|
||||
"result": response,
|
||||
}
|
||||
}
|
||||
|
||||
return &ExecutionResult{
|
||||
TaskID: task.ID,
|
||||
SubTaskID: task.ID,
|
||||
Output: output,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseJSONOutput(response string) map[string]interface{} {
|
||||
startIdx := strings.Index(response, "{")
|
||||
endIdx := strings.LastIndex(response, "}")
|
||||
|
||||
if startIdx == -1 || endIdx == -1 || endIdx <= startIdx {
|
||||
return map[string]interface{}{"raw": response}
|
||||
}
|
||||
|
||||
jsonStr := response[startIdx : endIdx+1]
|
||||
|
||||
var output map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(jsonStr), &output); err != nil {
|
||||
return map[string]interface{}{"raw": response}
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
377
backend/internal/computer/memory.go
Normal file
377
backend/internal/computer/memory.go
Normal file
@@ -0,0 +1,377 @@
|
||||
package computer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type MemoryStore struct {
|
||||
repo MemoryRepository
|
||||
cache map[string][]MemoryEntry
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewMemoryStore(repo MemoryRepository) *MemoryStore {
|
||||
return &MemoryStore{
|
||||
repo: repo,
|
||||
cache: make(map[string][]MemoryEntry),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MemoryStore) Store(ctx context.Context, userID string, entry *MemoryEntry) error {
|
||||
if entry.ID == "" {
|
||||
entry.ID = uuid.New().String()
|
||||
}
|
||||
entry.UserID = userID
|
||||
if entry.CreatedAt.IsZero() {
|
||||
entry.CreatedAt = time.Now()
|
||||
}
|
||||
|
||||
if m.repo != nil {
|
||||
if err := m.repo.Store(ctx, entry); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.cache[userID] = append(m.cache[userID], *entry)
|
||||
if len(m.cache[userID]) > 1000 {
|
||||
m.cache[userID] = m.cache[userID][len(m.cache[userID])-500:]
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MemoryStore) StoreResult(ctx context.Context, userID, taskID, key string, value interface{}) error {
|
||||
valueJSON, _ := json.Marshal(value)
|
||||
|
||||
entry := &MemoryEntry{
|
||||
UserID: userID,
|
||||
TaskID: taskID,
|
||||
Key: key,
|
||||
Value: string(valueJSON),
|
||||
Type: MemoryTypeResult,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
return m.Store(ctx, userID, entry)
|
||||
}
|
||||
|
||||
func (m *MemoryStore) StoreFact(ctx context.Context, userID, key string, value interface{}, tags []string) error {
|
||||
entry := &MemoryEntry{
|
||||
UserID: userID,
|
||||
Key: key,
|
||||
Value: value,
|
||||
Type: MemoryTypeFact,
|
||||
Tags: tags,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
return m.Store(ctx, userID, entry)
|
||||
}
|
||||
|
||||
func (m *MemoryStore) StorePreference(ctx context.Context, userID, key string, value interface{}) error {
|
||||
entry := &MemoryEntry{
|
||||
UserID: userID,
|
||||
Key: key,
|
||||
Value: value,
|
||||
Type: MemoryTypePreference,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
return m.Store(ctx, userID, entry)
|
||||
}
|
||||
|
||||
func (m *MemoryStore) StoreContext(ctx context.Context, userID, taskID, key string, value interface{}, ttl time.Duration) error {
|
||||
expiresAt := time.Now().Add(ttl)
|
||||
|
||||
entry := &MemoryEntry{
|
||||
UserID: userID,
|
||||
TaskID: taskID,
|
||||
Key: key,
|
||||
Value: value,
|
||||
Type: MemoryTypeContext,
|
||||
CreatedAt: time.Now(),
|
||||
ExpiresAt: &expiresAt,
|
||||
}
|
||||
|
||||
return m.Store(ctx, userID, entry)
|
||||
}
|
||||
|
||||
func (m *MemoryStore) Recall(ctx context.Context, userID string, query string, limit int) ([]MemoryEntry, error) {
|
||||
if m.repo != nil {
|
||||
entries, err := m.repo.Search(ctx, userID, query, limit)
|
||||
if err == nil && len(entries) > 0 {
|
||||
return entries, nil
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.RLock()
|
||||
cached := m.cache[userID]
|
||||
m.mu.RUnlock()
|
||||
|
||||
if len(cached) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
queryLower := strings.ToLower(query)
|
||||
queryTerms := strings.Fields(queryLower)
|
||||
|
||||
type scored struct {
|
||||
entry MemoryEntry
|
||||
score int
|
||||
}
|
||||
|
||||
var results []scored
|
||||
now := time.Now()
|
||||
|
||||
for _, entry := range cached {
|
||||
if entry.ExpiresAt != nil && entry.ExpiresAt.Before(now) {
|
||||
continue
|
||||
}
|
||||
|
||||
score := 0
|
||||
|
||||
keyLower := strings.ToLower(entry.Key)
|
||||
for _, term := range queryTerms {
|
||||
if strings.Contains(keyLower, term) {
|
||||
score += 3
|
||||
}
|
||||
}
|
||||
|
||||
if valueStr, ok := entry.Value.(string); ok {
|
||||
valueLower := strings.ToLower(valueStr)
|
||||
for _, term := range queryTerms {
|
||||
if strings.Contains(valueLower, term) {
|
||||
score += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, tag := range entry.Tags {
|
||||
tagLower := strings.ToLower(tag)
|
||||
for _, term := range queryTerms {
|
||||
if strings.Contains(tagLower, term) {
|
||||
score += 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if score > 0 {
|
||||
results = append(results, scored{entry: entry, score: score})
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(results)-1; i++ {
|
||||
for j := i + 1; j < len(results); j++ {
|
||||
if results[j].score > results[i].score {
|
||||
results[i], results[j] = results[j], results[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(results) > limit {
|
||||
results = results[:limit]
|
||||
}
|
||||
|
||||
entries := make([]MemoryEntry, len(results))
|
||||
for i, r := range results {
|
||||
entries[i] = r.entry
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (m *MemoryStore) GetByUser(ctx context.Context, userID string, limit int) ([]MemoryEntry, error) {
|
||||
if m.repo != nil {
|
||||
return m.repo.GetByUser(ctx, userID, limit)
|
||||
}
|
||||
|
||||
m.mu.RLock()
|
||||
cached := m.cache[userID]
|
||||
m.mu.RUnlock()
|
||||
|
||||
if len(cached) > limit {
|
||||
return cached[len(cached)-limit:], nil
|
||||
}
|
||||
|
||||
return cached, nil
|
||||
}
|
||||
|
||||
func (m *MemoryStore) GetByTask(ctx context.Context, taskID string) ([]MemoryEntry, error) {
|
||||
if m.repo != nil {
|
||||
return m.repo.GetByTask(ctx, taskID)
|
||||
}
|
||||
|
||||
var result []MemoryEntry
|
||||
|
||||
m.mu.RLock()
|
||||
for _, entries := range m.cache {
|
||||
for _, e := range entries {
|
||||
if e.TaskID == taskID {
|
||||
result = append(result, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (m *MemoryStore) GetTaskContext(ctx context.Context, taskID string) (map[string]interface{}, error) {
|
||||
entries, err := m.GetByTask(ctx, taskID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
context := make(map[string]interface{})
|
||||
for _, e := range entries {
|
||||
context[e.Key] = e.Value
|
||||
}
|
||||
|
||||
return context, nil
|
||||
}
|
||||
|
||||
func (m *MemoryStore) GetUserContext(ctx context.Context, userID string) (map[string]interface{}, error) {
|
||||
entries, err := m.GetByUser(ctx, userID, 100)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
context := make(map[string]interface{})
|
||||
for _, e := range entries {
|
||||
if e.Type == MemoryTypePreference || e.Type == MemoryTypeFact {
|
||||
context[e.Key] = e.Value
|
||||
}
|
||||
}
|
||||
|
||||
return context, nil
|
||||
}
|
||||
|
||||
func (m *MemoryStore) GetPreferences(ctx context.Context, userID string) (map[string]interface{}, error) {
|
||||
entries, err := m.GetByUser(ctx, userID, 100)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
prefs := make(map[string]interface{})
|
||||
for _, e := range entries {
|
||||
if e.Type == MemoryTypePreference {
|
||||
prefs[e.Key] = e.Value
|
||||
}
|
||||
}
|
||||
|
||||
return prefs, nil
|
||||
}
|
||||
|
||||
func (m *MemoryStore) GetFacts(ctx context.Context, userID string) ([]MemoryEntry, error) {
|
||||
entries, err := m.GetByUser(ctx, userID, 100)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var facts []MemoryEntry
|
||||
for _, e := range entries {
|
||||
if e.Type == MemoryTypeFact {
|
||||
facts = append(facts, e)
|
||||
}
|
||||
}
|
||||
|
||||
return facts, nil
|
||||
}
|
||||
|
||||
func (m *MemoryStore) Delete(ctx context.Context, id string) error {
|
||||
if m.repo != nil {
|
||||
return m.repo.Delete(ctx, id)
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
for userID, entries := range m.cache {
|
||||
for i, e := range entries {
|
||||
if e.ID == id {
|
||||
m.cache[userID] = append(entries[:i], entries[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MemoryStore) Clear(ctx context.Context, userID string) error {
|
||||
m.mu.Lock()
|
||||
delete(m.cache, userID)
|
||||
m.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MemoryStore) ClearTask(ctx context.Context, taskID string) error {
|
||||
m.mu.Lock()
|
||||
for userID, entries := range m.cache {
|
||||
var filtered []MemoryEntry
|
||||
for _, e := range entries {
|
||||
if e.TaskID != taskID {
|
||||
filtered = append(filtered, e)
|
||||
}
|
||||
}
|
||||
m.cache[userID] = filtered
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MemoryStore) Cleanup(ctx context.Context) error {
|
||||
now := time.Now()
|
||||
|
||||
m.mu.Lock()
|
||||
for userID, entries := range m.cache {
|
||||
var valid []MemoryEntry
|
||||
for _, e := range entries {
|
||||
if e.ExpiresAt == nil || e.ExpiresAt.After(now) {
|
||||
valid = append(valid, e)
|
||||
}
|
||||
}
|
||||
m.cache[userID] = valid
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MemoryStore) Stats(userID string) map[string]int {
|
||||
m.mu.RLock()
|
||||
entries := m.cache[userID]
|
||||
m.mu.RUnlock()
|
||||
|
||||
stats := map[string]int{
|
||||
"total": len(entries),
|
||||
"facts": 0,
|
||||
"preferences": 0,
|
||||
"context": 0,
|
||||
"results": 0,
|
||||
}
|
||||
|
||||
for _, e := range entries {
|
||||
switch e.Type {
|
||||
case MemoryTypeFact:
|
||||
stats["facts"]++
|
||||
case MemoryTypePreference:
|
||||
stats["preferences"]++
|
||||
case MemoryTypeContext:
|
||||
stats["context"]++
|
||||
case MemoryTypeResult:
|
||||
stats["results"]++
|
||||
}
|
||||
}
|
||||
|
||||
return stats
|
||||
}
|
||||
371
backend/internal/computer/planner.go
Normal file
371
backend/internal/computer/planner.go
Normal file
@@ -0,0 +1,371 @@
|
||||
package computer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/gooseek/backend/internal/llm"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type Planner struct {
|
||||
registry *llm.ModelRegistry
|
||||
}
|
||||
|
||||
func NewPlanner(registry *llm.ModelRegistry) *Planner {
|
||||
return &Planner{
|
||||
registry: registry,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Planner) Plan(ctx context.Context, query string, memory map[string]interface{}) (*TaskPlan, error) {
|
||||
client, _, err := p.registry.GetBest(llm.CapReasoning)
|
||||
if err != nil {
|
||||
client, _, err = p.registry.GetBest(llm.CapCoding)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("no suitable model for planning: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
memoryContext := ""
|
||||
if len(memory) > 0 {
|
||||
memoryJSON, _ := json.Marshal(memory)
|
||||
memoryContext = fmt.Sprintf("\n\nUser context and memory:\n%s", string(memoryJSON))
|
||||
}
|
||||
|
||||
prompt := fmt.Sprintf(`You are a task planning AI. Analyze this query and create an execution plan.
|
||||
|
||||
Query: %s%s
|
||||
|
||||
Break this into subtasks. Each subtask should be:
|
||||
1. Atomic - one clear action
|
||||
2. Independent where possible (for parallel execution)
|
||||
3. Have clear dependencies when needed
|
||||
|
||||
Available task types:
|
||||
- research: Search web, gather information
|
||||
- code: Write/generate code
|
||||
- analysis: Analyze data, extract insights
|
||||
- design: Design architecture, create plans
|
||||
- deploy: Deploy applications, run code
|
||||
- monitor: Set up monitoring, tracking
|
||||
- report: Generate reports, summaries
|
||||
- communicate: Send emails, messages
|
||||
- transform: Convert data formats
|
||||
- validate: Check, verify results
|
||||
|
||||
For each subtask specify:
|
||||
- type: one of the task types above
|
||||
- description: what to do
|
||||
- dependencies: list of subtask IDs this depends on (empty if none)
|
||||
- capabilities: required AI capabilities (reasoning, coding, search, creative, fast, long_context, vision, math)
|
||||
|
||||
Respond in JSON format:
|
||||
{
|
||||
"summary": "Brief summary of the plan",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": "1",
|
||||
"type": "research",
|
||||
"description": "Search for...",
|
||||
"dependencies": [],
|
||||
"capabilities": ["search"]
|
||||
},
|
||||
{
|
||||
"id": "2",
|
||||
"type": "code",
|
||||
"description": "Write code to...",
|
||||
"dependencies": ["1"],
|
||||
"capabilities": ["coding"]
|
||||
}
|
||||
],
|
||||
"estimatedCost": 0.05,
|
||||
"estimatedTimeSeconds": 120
|
||||
}
|
||||
|
||||
Create 3-10 subtasks. Be specific and actionable.`, query, memoryContext)
|
||||
|
||||
messages := []llm.Message{
|
||||
{Role: llm.RoleUser, Content: prompt},
|
||||
}
|
||||
|
||||
response, err := client.GenerateText(ctx, llm.StreamRequest{
|
||||
Messages: messages,
|
||||
Options: llm.StreamOptions{MaxTokens: 4096},
|
||||
})
|
||||
if err != nil {
|
||||
return p.createDefaultPlan(query), nil
|
||||
}
|
||||
|
||||
plan, err := p.parsePlanResponse(response)
|
||||
if err != nil {
|
||||
return p.createDefaultPlan(query), nil
|
||||
}
|
||||
|
||||
plan.Query = query
|
||||
plan.ExecutionOrder = p.calculateExecutionOrder(plan.SubTasks)
|
||||
|
||||
return plan, nil
|
||||
}
|
||||
|
||||
func (p *Planner) parsePlanResponse(response string) (*TaskPlan, error) {
|
||||
jsonRegex := regexp.MustCompile(`\{[\s\S]*\}`)
|
||||
jsonMatch := jsonRegex.FindString(response)
|
||||
if jsonMatch == "" {
|
||||
return nil, fmt.Errorf("no JSON found in response")
|
||||
}
|
||||
|
||||
var rawPlan struct {
|
||||
Summary string `json:"summary"`
|
||||
EstimatedCost float64 `json:"estimatedCost"`
|
||||
EstimatedTimeSeconds int `json:"estimatedTimeSeconds"`
|
||||
SubTasks []struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Description string `json:"description"`
|
||||
Dependencies []string `json:"dependencies"`
|
||||
Capabilities []string `json:"capabilities"`
|
||||
} `json:"subtasks"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(jsonMatch), &rawPlan); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse plan JSON: %w", err)
|
||||
}
|
||||
|
||||
plan := &TaskPlan{
|
||||
Summary: rawPlan.Summary,
|
||||
EstimatedCost: rawPlan.EstimatedCost,
|
||||
EstimatedTime: rawPlan.EstimatedTimeSeconds,
|
||||
SubTasks: make([]SubTask, len(rawPlan.SubTasks)),
|
||||
}
|
||||
|
||||
for i, st := range rawPlan.SubTasks {
|
||||
caps := make([]llm.ModelCapability, len(st.Capabilities))
|
||||
for j, c := range st.Capabilities {
|
||||
caps[j] = llm.ModelCapability(c)
|
||||
}
|
||||
|
||||
plan.SubTasks[i] = SubTask{
|
||||
ID: st.ID,
|
||||
Type: TaskType(st.Type),
|
||||
Description: st.Description,
|
||||
Dependencies: st.Dependencies,
|
||||
RequiredCaps: caps,
|
||||
Status: StatusPending,
|
||||
MaxRetries: 3,
|
||||
}
|
||||
}
|
||||
|
||||
return plan, nil
|
||||
}
|
||||
|
||||
func (p *Planner) calculateExecutionOrder(subTasks []SubTask) [][]string {
|
||||
taskMap := make(map[string]*SubTask)
|
||||
for i := range subTasks {
|
||||
taskMap[subTasks[i].ID] = &subTasks[i]
|
||||
}
|
||||
|
||||
inDegree := make(map[string]int)
|
||||
for _, st := range subTasks {
|
||||
if _, ok := inDegree[st.ID]; !ok {
|
||||
inDegree[st.ID] = 0
|
||||
}
|
||||
for _, dep := range st.Dependencies {
|
||||
inDegree[st.ID]++
|
||||
if _, ok := inDegree[dep]; !ok {
|
||||
inDegree[dep] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var order [][]string
|
||||
completed := make(map[string]bool)
|
||||
|
||||
for len(completed) < len(subTasks) {
|
||||
var wave []string
|
||||
|
||||
for _, st := range subTasks {
|
||||
if completed[st.ID] {
|
||||
continue
|
||||
}
|
||||
|
||||
canExecute := true
|
||||
for _, dep := range st.Dependencies {
|
||||
if !completed[dep] {
|
||||
canExecute = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if canExecute {
|
||||
wave = append(wave, st.ID)
|
||||
}
|
||||
}
|
||||
|
||||
if len(wave) == 0 {
|
||||
for _, st := range subTasks {
|
||||
if !completed[st.ID] {
|
||||
wave = append(wave, st.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, id := range wave {
|
||||
completed[id] = true
|
||||
}
|
||||
|
||||
order = append(order, wave)
|
||||
}
|
||||
|
||||
return order
|
||||
}
|
||||
|
||||
func (p *Planner) createDefaultPlan(query string) *TaskPlan {
|
||||
queryLower := strings.ToLower(query)
|
||||
|
||||
subTasks := []SubTask{
|
||||
{
|
||||
ID: uuid.New().String(),
|
||||
Type: TaskResearch,
|
||||
Description: "Research and gather information about: " + query,
|
||||
Dependencies: []string{},
|
||||
RequiredCaps: []llm.ModelCapability{llm.CapSearch},
|
||||
Status: StatusPending,
|
||||
MaxRetries: 3,
|
||||
},
|
||||
}
|
||||
|
||||
if strings.Contains(queryLower, "код") || strings.Contains(queryLower, "code") ||
|
||||
strings.Contains(queryLower, "приложение") || strings.Contains(queryLower, "app") ||
|
||||
strings.Contains(queryLower, "скрипт") || strings.Contains(queryLower, "script") {
|
||||
subTasks = append(subTasks, SubTask{
|
||||
ID: uuid.New().String(),
|
||||
Type: TaskDesign,
|
||||
Description: "Design architecture and structure",
|
||||
Dependencies: []string{subTasks[0].ID},
|
||||
RequiredCaps: []llm.ModelCapability{llm.CapReasoning},
|
||||
Status: StatusPending,
|
||||
MaxRetries: 3,
|
||||
})
|
||||
subTasks = append(subTasks, SubTask{
|
||||
ID: uuid.New().String(),
|
||||
Type: TaskCode,
|
||||
Description: "Generate code implementation",
|
||||
Dependencies: []string{subTasks[1].ID},
|
||||
RequiredCaps: []llm.ModelCapability{llm.CapCoding},
|
||||
Status: StatusPending,
|
||||
MaxRetries: 3,
|
||||
})
|
||||
}
|
||||
|
||||
if strings.Contains(queryLower, "отчёт") || strings.Contains(queryLower, "report") ||
|
||||
strings.Contains(queryLower, "анализ") || strings.Contains(queryLower, "analysis") {
|
||||
subTasks = append(subTasks, SubTask{
|
||||
ID: uuid.New().String(),
|
||||
Type: TaskAnalysis,
|
||||
Description: "Analyze gathered information",
|
||||
Dependencies: []string{subTasks[0].ID},
|
||||
RequiredCaps: []llm.ModelCapability{llm.CapReasoning},
|
||||
Status: StatusPending,
|
||||
MaxRetries: 3,
|
||||
})
|
||||
subTasks = append(subTasks, SubTask{
|
||||
ID: uuid.New().String(),
|
||||
Type: TaskReport,
|
||||
Description: "Generate comprehensive report",
|
||||
Dependencies: []string{subTasks[len(subTasks)-1].ID},
|
||||
RequiredCaps: []llm.ModelCapability{llm.CapCreative},
|
||||
Status: StatusPending,
|
||||
MaxRetries: 3,
|
||||
})
|
||||
}
|
||||
|
||||
if strings.Contains(queryLower, "email") || strings.Contains(queryLower, "письмо") ||
|
||||
strings.Contains(queryLower, "telegram") || strings.Contains(queryLower, "отправ") {
|
||||
subTasks = append(subTasks, SubTask{
|
||||
ID: uuid.New().String(),
|
||||
Type: TaskCommunicate,
|
||||
Description: "Send notification/message",
|
||||
Dependencies: []string{subTasks[len(subTasks)-1].ID},
|
||||
RequiredCaps: []llm.ModelCapability{llm.CapFast},
|
||||
Status: StatusPending,
|
||||
MaxRetries: 3,
|
||||
})
|
||||
}
|
||||
|
||||
plan := &TaskPlan{
|
||||
Query: query,
|
||||
Summary: "Auto-generated plan for: " + query,
|
||||
SubTasks: subTasks,
|
||||
EstimatedCost: float64(len(subTasks)) * 0.01,
|
||||
EstimatedTime: len(subTasks) * 30,
|
||||
}
|
||||
|
||||
plan.ExecutionOrder = p.calculateExecutionOrder(subTasks)
|
||||
|
||||
return plan
|
||||
}
|
||||
|
||||
func (p *Planner) Replan(ctx context.Context, plan *TaskPlan, newContext string) (*TaskPlan, error) {
|
||||
completedTasks := make([]SubTask, 0)
|
||||
pendingTasks := make([]SubTask, 0)
|
||||
|
||||
for _, st := range plan.SubTasks {
|
||||
if st.Status == StatusCompleted {
|
||||
completedTasks = append(completedTasks, st)
|
||||
} else if st.Status == StatusPending || st.Status == StatusFailed {
|
||||
pendingTasks = append(pendingTasks, st)
|
||||
}
|
||||
}
|
||||
|
||||
completedJSON, _ := json.Marshal(completedTasks)
|
||||
pendingJSON, _ := json.Marshal(pendingTasks)
|
||||
|
||||
client, _, err := p.registry.GetBest(llm.CapReasoning)
|
||||
if err != nil {
|
||||
return plan, nil
|
||||
}
|
||||
|
||||
prompt := fmt.Sprintf(`You need to replan a task based on new context.
|
||||
|
||||
Original query: %s
|
||||
|
||||
Completed subtasks:
|
||||
%s
|
||||
|
||||
Pending subtasks:
|
||||
%s
|
||||
|
||||
New context/feedback:
|
||||
%s
|
||||
|
||||
Adjust the plan. Keep completed tasks, modify or remove pending tasks as needed.
|
||||
Add new subtasks if the new context requires it.
|
||||
|
||||
Respond in the same JSON format as before.`, plan.Query, string(completedJSON), string(pendingJSON), newContext)
|
||||
|
||||
messages := []llm.Message{
|
||||
{Role: llm.RoleUser, Content: prompt},
|
||||
}
|
||||
|
||||
response, err := client.GenerateText(ctx, llm.StreamRequest{
|
||||
Messages: messages,
|
||||
Options: llm.StreamOptions{MaxTokens: 4096},
|
||||
})
|
||||
if err != nil {
|
||||
return plan, nil
|
||||
}
|
||||
|
||||
newPlan, err := p.parsePlanResponse(response)
|
||||
if err != nil {
|
||||
return plan, nil
|
||||
}
|
||||
|
||||
newPlan.Query = plan.Query
|
||||
newPlan.ExecutionOrder = p.calculateExecutionOrder(newPlan.SubTasks)
|
||||
|
||||
return newPlan, nil
|
||||
}
|
||||
244
backend/internal/computer/router.go
Normal file
244
backend/internal/computer/router.go
Normal file
@@ -0,0 +1,244 @@
|
||||
package computer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
|
||||
"github.com/gooseek/backend/internal/llm"
|
||||
)
|
||||
|
||||
type RoutingRule struct {
|
||||
TaskType TaskType
|
||||
Preferred []llm.ModelCapability
|
||||
Fallback []string
|
||||
MaxCost float64
|
||||
MaxLatency int
|
||||
}
|
||||
|
||||
type Router struct {
|
||||
registry *llm.ModelRegistry
|
||||
rules map[TaskType]RoutingRule
|
||||
}
|
||||
|
||||
func NewRouter(registry *llm.ModelRegistry) *Router {
|
||||
r := &Router{
|
||||
registry: registry,
|
||||
rules: make(map[TaskType]RoutingRule),
|
||||
}
|
||||
|
||||
r.rules[TaskResearch] = RoutingRule{
|
||||
TaskType: TaskResearch,
|
||||
Preferred: []llm.ModelCapability{llm.CapSearch, llm.CapLongContext},
|
||||
Fallback: []string{"gemini-1.5-pro", "gpt-4o"},
|
||||
MaxCost: 0.1,
|
||||
}
|
||||
|
||||
r.rules[TaskCode] = RoutingRule{
|
||||
TaskType: TaskCode,
|
||||
Preferred: []llm.ModelCapability{llm.CapCoding},
|
||||
Fallback: []string{"claude-3-sonnet", "claude-3-opus", "gpt-4o"},
|
||||
MaxCost: 0.2,
|
||||
}
|
||||
|
||||
r.rules[TaskAnalysis] = RoutingRule{
|
||||
TaskType: TaskAnalysis,
|
||||
Preferred: []llm.ModelCapability{llm.CapReasoning, llm.CapMath},
|
||||
Fallback: []string{"claude-3-opus", "gpt-4o"},
|
||||
MaxCost: 0.15,
|
||||
}
|
||||
|
||||
r.rules[TaskDesign] = RoutingRule{
|
||||
TaskType: TaskDesign,
|
||||
Preferred: []llm.ModelCapability{llm.CapReasoning, llm.CapCreative},
|
||||
Fallback: []string{"claude-3-opus", "gpt-4o"},
|
||||
MaxCost: 0.15,
|
||||
}
|
||||
|
||||
r.rules[TaskDeploy] = RoutingRule{
|
||||
TaskType: TaskDeploy,
|
||||
Preferred: []llm.ModelCapability{llm.CapCoding, llm.CapFast},
|
||||
Fallback: []string{"claude-3-sonnet", "gpt-4o-mini"},
|
||||
MaxCost: 0.05,
|
||||
}
|
||||
|
||||
r.rules[TaskMonitor] = RoutingRule{
|
||||
TaskType: TaskMonitor,
|
||||
Preferred: []llm.ModelCapability{llm.CapFast},
|
||||
Fallback: []string{"gpt-4o-mini", "gemini-1.5-flash"},
|
||||
MaxCost: 0.02,
|
||||
}
|
||||
|
||||
r.rules[TaskReport] = RoutingRule{
|
||||
TaskType: TaskReport,
|
||||
Preferred: []llm.ModelCapability{llm.CapCreative, llm.CapLongContext},
|
||||
Fallback: []string{"claude-3-opus", "gpt-4o"},
|
||||
MaxCost: 0.1,
|
||||
}
|
||||
|
||||
r.rules[TaskCommunicate] = RoutingRule{
|
||||
TaskType: TaskCommunicate,
|
||||
Preferred: []llm.ModelCapability{llm.CapFast, llm.CapCreative},
|
||||
Fallback: []string{"gpt-4o-mini", "gemini-1.5-flash"},
|
||||
MaxCost: 0.02,
|
||||
}
|
||||
|
||||
r.rules[TaskTransform] = RoutingRule{
|
||||
TaskType: TaskTransform,
|
||||
Preferred: []llm.ModelCapability{llm.CapFast, llm.CapCoding},
|
||||
Fallback: []string{"gpt-4o-mini", "claude-3-sonnet"},
|
||||
MaxCost: 0.03,
|
||||
}
|
||||
|
||||
r.rules[TaskValidate] = RoutingRule{
|
||||
TaskType: TaskValidate,
|
||||
Preferred: []llm.ModelCapability{llm.CapReasoning},
|
||||
Fallback: []string{"gpt-4o", "claude-3-sonnet"},
|
||||
MaxCost: 0.05,
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Router) Route(task *SubTask, budget float64) (llm.Client, llm.ModelSpec, error) {
|
||||
if task.ModelID != "" {
|
||||
client, spec, err := r.registry.GetByID(task.ModelID)
|
||||
if err == nil && spec.CostPer1K <= budget {
|
||||
return client, spec, nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(task.RequiredCaps) > 0 {
|
||||
for _, cap := range task.RequiredCaps {
|
||||
client, spec, err := r.registry.GetBest(cap)
|
||||
if err == nil && spec.CostPer1K <= budget {
|
||||
return client, spec, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rule, ok := r.rules[task.Type]
|
||||
if ok {
|
||||
for _, cap := range rule.Preferred {
|
||||
client, spec, err := r.registry.GetBest(cap)
|
||||
if err == nil && spec.CostPer1K <= budget {
|
||||
return client, spec, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, modelID := range rule.Fallback {
|
||||
client, spec, err := r.registry.GetByID(modelID)
|
||||
if err == nil && spec.CostPer1K <= budget {
|
||||
return client, spec, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
models := r.registry.GetAll()
|
||||
if len(models) == 0 {
|
||||
return nil, llm.ModelSpec{}, errors.New("no models available")
|
||||
}
|
||||
|
||||
sort.Slice(models, func(i, j int) bool {
|
||||
return models[i].CostPer1K < models[j].CostPer1K
|
||||
})
|
||||
|
||||
for _, spec := range models {
|
||||
if spec.CostPer1K <= budget {
|
||||
client, err := r.registry.GetClient(spec.ID)
|
||||
if err == nil {
|
||||
return client, spec, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
client, err := r.registry.GetClient(models[0].ID)
|
||||
if err != nil {
|
||||
return nil, llm.ModelSpec{}, err
|
||||
}
|
||||
return client, models[0], nil
|
||||
}
|
||||
|
||||
func (r *Router) RouteMultiple(task *SubTask, count int, budget float64) ([]llm.Client, []llm.ModelSpec, error) {
|
||||
var clients []llm.Client
|
||||
var specs []llm.ModelSpec
|
||||
|
||||
usedModels := make(map[string]bool)
|
||||
perModelBudget := budget / float64(count)
|
||||
|
||||
rule, ok := r.rules[task.Type]
|
||||
if !ok {
|
||||
rule = RoutingRule{
|
||||
Preferred: []llm.ModelCapability{llm.CapReasoning, llm.CapCoding, llm.CapFast},
|
||||
}
|
||||
}
|
||||
|
||||
for _, cap := range rule.Preferred {
|
||||
if len(clients) >= count {
|
||||
break
|
||||
}
|
||||
|
||||
models := r.registry.GetAllWithCapability(cap)
|
||||
for _, spec := range models {
|
||||
if len(clients) >= count {
|
||||
break
|
||||
}
|
||||
if usedModels[spec.ID] {
|
||||
continue
|
||||
}
|
||||
if spec.CostPer1K > perModelBudget {
|
||||
continue
|
||||
}
|
||||
|
||||
client, err := r.registry.GetClient(spec.ID)
|
||||
if err == nil {
|
||||
clients = append(clients, client)
|
||||
specs = append(specs, spec)
|
||||
usedModels[spec.ID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(clients) < count {
|
||||
models := r.registry.GetAll()
|
||||
for _, spec := range models {
|
||||
if len(clients) >= count {
|
||||
break
|
||||
}
|
||||
if usedModels[spec.ID] {
|
||||
continue
|
||||
}
|
||||
|
||||
client, err := r.registry.GetClient(spec.ID)
|
||||
if err == nil {
|
||||
clients = append(clients, client)
|
||||
specs = append(specs, spec)
|
||||
usedModels[spec.ID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(clients) == 0 {
|
||||
return nil, nil, errors.New("no models available for consensus")
|
||||
}
|
||||
|
||||
return clients, specs, nil
|
||||
}
|
||||
|
||||
func (r *Router) SetRule(taskType TaskType, rule RoutingRule) {
|
||||
r.rules[taskType] = rule
|
||||
}
|
||||
|
||||
func (r *Router) GetRule(taskType TaskType) (RoutingRule, bool) {
|
||||
rule, ok := r.rules[taskType]
|
||||
return rule, ok
|
||||
}
|
||||
|
||||
func (r *Router) EstimateCost(task *SubTask, inputTokens, outputTokens int) float64 {
|
||||
_, spec, err := r.Route(task, 1.0)
|
||||
if err != nil {
|
||||
return 0.01
|
||||
}
|
||||
|
||||
totalTokens := inputTokens + outputTokens
|
||||
return spec.CostPer1K * float64(totalTokens) / 1000.0
|
||||
}
|
||||
431
backend/internal/computer/sandbox.go
Normal file
431
backend/internal/computer/sandbox.go
Normal file
@@ -0,0 +1,431 @@
|
||||
package computer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type SandboxConfig struct {
|
||||
Image string
|
||||
Timeout time.Duration
|
||||
MemoryLimit string
|
||||
CPULimit string
|
||||
NetworkMode string
|
||||
WorkDir string
|
||||
MaxFileSize int64
|
||||
AllowNetwork bool
|
||||
}
|
||||
|
||||
func DefaultSandboxConfig() SandboxConfig {
|
||||
return SandboxConfig{
|
||||
Image: "gooseek/sandbox:latest",
|
||||
Timeout: 5 * time.Minute,
|
||||
MemoryLimit: "512m",
|
||||
CPULimit: "1.0",
|
||||
NetworkMode: "none",
|
||||
WorkDir: "/workspace",
|
||||
MaxFileSize: 10 * 1024 * 1024,
|
||||
AllowNetwork: false,
|
||||
}
|
||||
}
|
||||
|
||||
type Sandbox struct {
|
||||
ID string
|
||||
ContainerID string
|
||||
WorkDir string
|
||||
Status string
|
||||
TaskID string
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
type SandboxManager struct {
|
||||
cfg SandboxConfig
|
||||
sandboxes map[string]*Sandbox
|
||||
mu sync.RWMutex
|
||||
useDocker bool
|
||||
}
|
||||
|
||||
func NewSandboxManager(cfg SandboxConfig) *SandboxManager {
|
||||
if cfg.Timeout == 0 {
|
||||
cfg.Timeout = 5 * time.Minute
|
||||
}
|
||||
if cfg.MemoryLimit == "" {
|
||||
cfg.MemoryLimit = "512m"
|
||||
}
|
||||
if cfg.WorkDir == "" {
|
||||
cfg.WorkDir = "/workspace"
|
||||
}
|
||||
|
||||
useDocker := isDockerAvailable()
|
||||
|
||||
return &SandboxManager{
|
||||
cfg: cfg,
|
||||
sandboxes: make(map[string]*Sandbox),
|
||||
useDocker: useDocker,
|
||||
}
|
||||
}
|
||||
|
||||
func isDockerAvailable() bool {
|
||||
cmd := exec.Command("docker", "version")
|
||||
return cmd.Run() == nil
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) Create(ctx context.Context, taskID string) (*Sandbox, error) {
|
||||
sandboxID := uuid.New().String()[:8]
|
||||
|
||||
sandbox := &Sandbox{
|
||||
ID: sandboxID,
|
||||
TaskID: taskID,
|
||||
Status: "creating",
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
if sm.useDocker {
|
||||
workDir, err := os.MkdirTemp("", fmt.Sprintf("sandbox-%s-", sandboxID))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp dir: %w", err)
|
||||
}
|
||||
sandbox.WorkDir = workDir
|
||||
|
||||
args := []string{
|
||||
"create",
|
||||
"--name", fmt.Sprintf("gooseek-sandbox-%s", sandboxID),
|
||||
"-v", fmt.Sprintf("%s:%s", workDir, sm.cfg.WorkDir),
|
||||
"-w", sm.cfg.WorkDir,
|
||||
"--memory", sm.cfg.MemoryLimit,
|
||||
"--cpus", sm.cfg.CPULimit,
|
||||
}
|
||||
|
||||
if !sm.cfg.AllowNetwork {
|
||||
args = append(args, "--network", "none")
|
||||
}
|
||||
|
||||
args = append(args, sm.cfg.Image, "tail", "-f", "/dev/null")
|
||||
|
||||
cmd := exec.CommandContext(ctx, "docker", args...)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
os.RemoveAll(workDir)
|
||||
return nil, fmt.Errorf("failed to create container: %w - %s", err, string(output))
|
||||
}
|
||||
|
||||
sandbox.ContainerID = strings.TrimSpace(string(output))
|
||||
|
||||
startCmd := exec.CommandContext(ctx, "docker", "start", sandbox.ContainerID)
|
||||
if err := startCmd.Run(); err != nil {
|
||||
sm.cleanupContainer(sandbox)
|
||||
return nil, fmt.Errorf("failed to start container: %w", err)
|
||||
}
|
||||
} else {
|
||||
workDir, err := os.MkdirTemp("", fmt.Sprintf("sandbox-%s-", sandboxID))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp dir: %w", err)
|
||||
}
|
||||
sandbox.WorkDir = workDir
|
||||
}
|
||||
|
||||
sandbox.Status = "running"
|
||||
|
||||
sm.mu.Lock()
|
||||
sm.sandboxes[sandboxID] = sandbox
|
||||
sm.mu.Unlock()
|
||||
|
||||
return sandbox, nil
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) Execute(ctx context.Context, sandbox *Sandbox, code string, lang string) (*SandboxResult, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, sm.cfg.Timeout)
|
||||
defer cancel()
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
filename, err := sm.writeCodeFile(sandbox, code, lang)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cmd *exec.Cmd
|
||||
var stdout, stderr bytes.Buffer
|
||||
|
||||
if sm.useDocker {
|
||||
runCmd := sm.getRunCommand(lang, filename)
|
||||
cmd = exec.CommandContext(ctx, "docker", "exec", sandbox.ContainerID, "sh", "-c", runCmd)
|
||||
} else {
|
||||
cmd = sm.getLocalCommand(ctx, lang, filepath.Join(sandbox.WorkDir, filename))
|
||||
}
|
||||
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err = cmd.Run()
|
||||
exitCode := 0
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
exitCode = exitErr.ExitCode()
|
||||
} else if ctx.Err() == context.DeadlineExceeded {
|
||||
return &SandboxResult{
|
||||
Stderr: "Execution timeout exceeded",
|
||||
ExitCode: -1,
|
||||
Duration: time.Since(startTime),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
files, _ := sm.collectOutputFiles(sandbox)
|
||||
|
||||
return &SandboxResult{
|
||||
Stdout: stdout.String(),
|
||||
Stderr: stderr.String(),
|
||||
ExitCode: exitCode,
|
||||
Files: files,
|
||||
Duration: time.Since(startTime),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) RunCommand(ctx context.Context, sandbox *Sandbox, command string) (*SandboxResult, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, sm.cfg.Timeout)
|
||||
defer cancel()
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
var cmd *exec.Cmd
|
||||
var stdout, stderr bytes.Buffer
|
||||
|
||||
if sm.useDocker {
|
||||
cmd = exec.CommandContext(ctx, "docker", "exec", sandbox.ContainerID, "sh", "-c", command)
|
||||
} else {
|
||||
cmd = exec.CommandContext(ctx, "sh", "-c", command)
|
||||
cmd.Dir = sandbox.WorkDir
|
||||
}
|
||||
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
exitCode := 0
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
exitCode = exitErr.ExitCode()
|
||||
}
|
||||
}
|
||||
|
||||
return &SandboxResult{
|
||||
Stdout: stdout.String(),
|
||||
Stderr: stderr.String(),
|
||||
ExitCode: exitCode,
|
||||
Duration: time.Since(startTime),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) WriteFile(ctx context.Context, sandbox *Sandbox, path string, content []byte) error {
|
||||
if int64(len(content)) > sm.cfg.MaxFileSize {
|
||||
return fmt.Errorf("file size exceeds limit: %d > %d", len(content), sm.cfg.MaxFileSize)
|
||||
}
|
||||
|
||||
fullPath := filepath.Join(sandbox.WorkDir, path)
|
||||
dir := filepath.Dir(fullPath)
|
||||
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
|
||||
return os.WriteFile(fullPath, content, 0644)
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) ReadFile(ctx context.Context, sandbox *Sandbox, path string) ([]byte, error) {
|
||||
fullPath := filepath.Join(sandbox.WorkDir, path)
|
||||
return os.ReadFile(fullPath)
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) Destroy(ctx context.Context, sandbox *Sandbox) error {
|
||||
sm.mu.Lock()
|
||||
delete(sm.sandboxes, sandbox.ID)
|
||||
sm.mu.Unlock()
|
||||
|
||||
if sm.useDocker && sandbox.ContainerID != "" {
|
||||
sm.cleanupContainer(sandbox)
|
||||
}
|
||||
|
||||
if sandbox.WorkDir != "" {
|
||||
os.RemoveAll(sandbox.WorkDir)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) cleanupContainer(sandbox *Sandbox) {
|
||||
exec.Command("docker", "stop", sandbox.ContainerID).Run()
|
||||
exec.Command("docker", "rm", "-f", sandbox.ContainerID).Run()
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) writeCodeFile(sandbox *Sandbox, code string, lang string) (string, error) {
|
||||
var filename string
|
||||
switch lang {
|
||||
case "python", "py":
|
||||
filename = "main.py"
|
||||
case "javascript", "js", "node":
|
||||
filename = "main.js"
|
||||
case "typescript", "ts":
|
||||
filename = "main.ts"
|
||||
case "go", "golang":
|
||||
filename = "main.go"
|
||||
case "bash", "sh", "shell":
|
||||
filename = "script.sh"
|
||||
case "ruby", "rb":
|
||||
filename = "main.rb"
|
||||
default:
|
||||
filename = "main.txt"
|
||||
}
|
||||
|
||||
fullPath := filepath.Join(sandbox.WorkDir, filename)
|
||||
if err := os.WriteFile(fullPath, []byte(code), 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to write code file: %w", err)
|
||||
}
|
||||
|
||||
return filename, nil
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) getRunCommand(lang, filename string) string {
|
||||
switch lang {
|
||||
case "python", "py":
|
||||
return fmt.Sprintf("python3 %s/%s", sm.cfg.WorkDir, filename)
|
||||
case "javascript", "js", "node":
|
||||
return fmt.Sprintf("node %s/%s", sm.cfg.WorkDir, filename)
|
||||
case "typescript", "ts":
|
||||
return fmt.Sprintf("npx ts-node %s/%s", sm.cfg.WorkDir, filename)
|
||||
case "go", "golang":
|
||||
return fmt.Sprintf("go run %s/%s", sm.cfg.WorkDir, filename)
|
||||
case "bash", "sh", "shell":
|
||||
return fmt.Sprintf("bash %s/%s", sm.cfg.WorkDir, filename)
|
||||
case "ruby", "rb":
|
||||
return fmt.Sprintf("ruby %s/%s", sm.cfg.WorkDir, filename)
|
||||
default:
|
||||
return fmt.Sprintf("cat %s/%s", sm.cfg.WorkDir, filename)
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) getLocalCommand(ctx context.Context, lang, filepath string) *exec.Cmd {
|
||||
switch lang {
|
||||
case "python", "py":
|
||||
return exec.CommandContext(ctx, "python3", filepath)
|
||||
case "javascript", "js", "node":
|
||||
return exec.CommandContext(ctx, "node", filepath)
|
||||
case "go", "golang":
|
||||
return exec.CommandContext(ctx, "go", "run", filepath)
|
||||
case "bash", "sh", "shell":
|
||||
return exec.CommandContext(ctx, "bash", filepath)
|
||||
case "ruby", "rb":
|
||||
return exec.CommandContext(ctx, "ruby", filepath)
|
||||
default:
|
||||
return exec.CommandContext(ctx, "cat", filepath)
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) collectOutputFiles(sandbox *Sandbox) (map[string][]byte, error) {
|
||||
files := make(map[string][]byte)
|
||||
|
||||
err := filepath.Walk(sandbox.WorkDir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(sandbox.WorkDir, path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(relPath, "main.") || strings.HasPrefix(relPath, "script.") {
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.Size() > sm.cfg.MaxFileSize {
|
||||
return nil
|
||||
}
|
||||
|
||||
content, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
files[relPath] = content
|
||||
return nil
|
||||
})
|
||||
|
||||
return files, err
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) ListSandboxes() []*Sandbox {
|
||||
sm.mu.RLock()
|
||||
defer sm.mu.RUnlock()
|
||||
|
||||
result := make([]*Sandbox, 0, len(sm.sandboxes))
|
||||
for _, s := range sm.sandboxes {
|
||||
result = append(result, s)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) GetSandbox(id string) (*Sandbox, bool) {
|
||||
sm.mu.RLock()
|
||||
defer sm.mu.RUnlock()
|
||||
s, ok := sm.sandboxes[id]
|
||||
return s, ok
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) CopyToContainer(ctx context.Context, sandbox *Sandbox, src string, dst string) error {
|
||||
if !sm.useDocker {
|
||||
srcData, err := os.ReadFile(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return sm.WriteFile(ctx, sandbox, dst, srcData)
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "docker", "cp", src, fmt.Sprintf("%s:%s", sandbox.ContainerID, dst))
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) CopyFromContainer(ctx context.Context, sandbox *Sandbox, src string, dst string) error {
|
||||
if !sm.useDocker {
|
||||
srcPath := filepath.Join(sandbox.WorkDir, src)
|
||||
srcData, err := os.ReadFile(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(dst, srcData, 0644)
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "docker", "cp", fmt.Sprintf("%s:%s", sandbox.ContainerID, src), dst)
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func (sm *SandboxManager) StreamLogs(ctx context.Context, sandbox *Sandbox) (io.ReadCloser, error) {
|
||||
if !sm.useDocker {
|
||||
return nil, fmt.Errorf("streaming not supported without Docker")
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "docker", "logs", "-f", sandbox.ContainerID)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return stdout, nil
|
||||
}
|
||||
386
backend/internal/computer/scheduler.go
Normal file
386
backend/internal/computer/scheduler.go
Normal file
@@ -0,0 +1,386 @@
|
||||
package computer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/robfig/cron/v3"
|
||||
)
|
||||
|
||||
type Scheduler struct {
|
||||
taskRepo TaskRepository
|
||||
computer *Computer
|
||||
cron *cron.Cron
|
||||
jobs map[string]cron.EntryID
|
||||
running map[string]bool
|
||||
mu sync.RWMutex
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
func NewScheduler(taskRepo TaskRepository, computer *Computer) *Scheduler {
|
||||
return &Scheduler{
|
||||
taskRepo: taskRepo,
|
||||
computer: computer,
|
||||
cron: cron.New(cron.WithSeconds()),
|
||||
jobs: make(map[string]cron.EntryID),
|
||||
running: make(map[string]bool),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scheduler) Start(ctx context.Context) {
|
||||
s.cron.Start()
|
||||
|
||||
go s.pollScheduledTasks(ctx)
|
||||
|
||||
log.Println("[Scheduler] Started")
|
||||
}
|
||||
|
||||
func (s *Scheduler) Stop() {
|
||||
close(s.stopCh)
|
||||
s.cron.Stop()
|
||||
log.Println("[Scheduler] Stopped")
|
||||
}
|
||||
|
||||
func (s *Scheduler) pollScheduledTasks(ctx context.Context) {
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
s.loadScheduledTasks(ctx)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-s.stopCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
s.checkAndExecute(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scheduler) loadScheduledTasks(ctx context.Context) {
|
||||
tasks, err := s.taskRepo.GetScheduled(ctx)
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] Failed to load scheduled tasks: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, task := range tasks {
|
||||
if task.Schedule != nil && task.Schedule.Enabled {
|
||||
s.scheduleTask(&task)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[Scheduler] Loaded %d scheduled tasks", len(tasks))
|
||||
}
|
||||
|
||||
func (s *Scheduler) scheduleTask(task *ComputerTask) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if oldID, exists := s.jobs[task.ID]; exists {
|
||||
s.cron.Remove(oldID)
|
||||
}
|
||||
|
||||
if task.Schedule == nil || !task.Schedule.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
var entryID cron.EntryID
|
||||
var err error
|
||||
|
||||
switch task.Schedule.Type {
|
||||
case "cron":
|
||||
if task.Schedule.CronExpr == "" {
|
||||
return nil
|
||||
}
|
||||
entryID, err = s.cron.AddFunc(task.Schedule.CronExpr, func() {
|
||||
s.executeScheduledTask(task.ID)
|
||||
})
|
||||
|
||||
case "interval":
|
||||
if task.Schedule.Interval <= 0 {
|
||||
return nil
|
||||
}
|
||||
cronExpr := s.intervalToCron(task.Schedule.Interval)
|
||||
entryID, err = s.cron.AddFunc(cronExpr, func() {
|
||||
s.executeScheduledTask(task.ID)
|
||||
})
|
||||
|
||||
case "once":
|
||||
go func() {
|
||||
if task.Schedule.NextRun.After(time.Now()) {
|
||||
time.Sleep(time.Until(task.Schedule.NextRun))
|
||||
}
|
||||
s.executeScheduledTask(task.ID)
|
||||
}()
|
||||
return nil
|
||||
|
||||
case "daily":
|
||||
entryID, err = s.cron.AddFunc("0 0 9 * * *", func() {
|
||||
s.executeScheduledTask(task.ID)
|
||||
})
|
||||
|
||||
case "hourly":
|
||||
entryID, err = s.cron.AddFunc("0 0 * * * *", func() {
|
||||
s.executeScheduledTask(task.ID)
|
||||
})
|
||||
|
||||
case "weekly":
|
||||
entryID, err = s.cron.AddFunc("0 0 9 * * 1", func() {
|
||||
s.executeScheduledTask(task.ID)
|
||||
})
|
||||
|
||||
case "monthly":
|
||||
entryID, err = s.cron.AddFunc("0 0 9 1 * *", func() {
|
||||
s.executeScheduledTask(task.ID)
|
||||
})
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] Failed to schedule task %s: %v", task.ID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
s.jobs[task.ID] = entryID
|
||||
log.Printf("[Scheduler] Scheduled task %s with type %s", task.ID, task.Schedule.Type)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) intervalToCron(seconds int) string {
|
||||
if seconds < 60 {
|
||||
return "*/30 * * * * *"
|
||||
}
|
||||
if seconds < 3600 {
|
||||
minutes := seconds / 60
|
||||
return "0 */" + itoa(minutes) + " * * * *"
|
||||
}
|
||||
if seconds < 86400 {
|
||||
hours := seconds / 3600
|
||||
return "0 0 */" + itoa(hours) + " * * *"
|
||||
}
|
||||
return "0 0 0 * * *"
|
||||
}
|
||||
|
||||
func itoa(i int) string {
|
||||
if i < 10 {
|
||||
return string(rune('0' + i))
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *Scheduler) executeScheduledTask(taskID string) {
|
||||
s.mu.Lock()
|
||||
if s.running[taskID] {
|
||||
s.mu.Unlock()
|
||||
log.Printf("[Scheduler] Task %s is already running, skipping", taskID)
|
||||
return
|
||||
}
|
||||
s.running[taskID] = true
|
||||
s.mu.Unlock()
|
||||
|
||||
defer func() {
|
||||
s.mu.Lock()
|
||||
delete(s.running, taskID)
|
||||
s.mu.Unlock()
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
task, err := s.taskRepo.GetByID(ctx, taskID)
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] Failed to get task %s: %v", taskID, err)
|
||||
return
|
||||
}
|
||||
|
||||
if task.Schedule != nil {
|
||||
if task.Schedule.ExpiresAt != nil && time.Now().After(*task.Schedule.ExpiresAt) {
|
||||
log.Printf("[Scheduler] Task %s has expired, removing", taskID)
|
||||
s.Cancel(taskID)
|
||||
return
|
||||
}
|
||||
|
||||
if task.Schedule.MaxRuns > 0 && task.Schedule.RunCount >= task.Schedule.MaxRuns {
|
||||
log.Printf("[Scheduler] Task %s reached max runs (%d), removing", taskID, task.Schedule.MaxRuns)
|
||||
s.Cancel(taskID)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[Scheduler] Executing scheduled task %s (run #%d)", taskID, task.RunCount+1)
|
||||
|
||||
_, err = s.computer.Execute(ctx, task.UserID, task.Query, ExecuteOptions{
|
||||
Async: false,
|
||||
Context: task.Memory,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] Task %s execution failed: %v", taskID, err)
|
||||
} else {
|
||||
log.Printf("[Scheduler] Task %s completed successfully", taskID)
|
||||
}
|
||||
|
||||
task.RunCount++
|
||||
if task.Schedule != nil {
|
||||
task.Schedule.RunCount = task.RunCount
|
||||
task.Schedule.NextRun = s.calculateNextRun(task.Schedule)
|
||||
task.NextRunAt = &task.Schedule.NextRun
|
||||
}
|
||||
task.UpdatedAt = time.Now()
|
||||
|
||||
if err := s.taskRepo.Update(ctx, task); err != nil {
|
||||
log.Printf("[Scheduler] Failed to update task %s: %v", taskID, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scheduler) calculateNextRun(schedule *Schedule) time.Time {
|
||||
switch schedule.Type {
|
||||
case "interval":
|
||||
return time.Now().Add(time.Duration(schedule.Interval) * time.Second)
|
||||
case "hourly":
|
||||
return time.Now().Add(time.Hour).Truncate(time.Hour)
|
||||
case "daily":
|
||||
next := time.Now().Add(24 * time.Hour)
|
||||
return time.Date(next.Year(), next.Month(), next.Day(), 9, 0, 0, 0, next.Location())
|
||||
case "weekly":
|
||||
next := time.Now().Add(7 * 24 * time.Hour)
|
||||
return time.Date(next.Year(), next.Month(), next.Day(), 9, 0, 0, 0, next.Location())
|
||||
case "monthly":
|
||||
next := time.Now().AddDate(0, 1, 0)
|
||||
return time.Date(next.Year(), next.Month(), 1, 9, 0, 0, 0, next.Location())
|
||||
default:
|
||||
return time.Now().Add(time.Hour)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scheduler) checkAndExecute(ctx context.Context) {
|
||||
tasks, err := s.taskRepo.GetScheduled(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for _, task := range tasks {
|
||||
if task.NextRunAt != nil && task.NextRunAt.Before(now) {
|
||||
if task.Schedule != nil && task.Schedule.Enabled {
|
||||
go s.executeScheduledTask(task.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scheduler) Schedule(taskID string, schedule Schedule) error {
|
||||
ctx := context.Background()
|
||||
task, err := s.taskRepo.GetByID(ctx, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
task.Schedule = &schedule
|
||||
task.Schedule.Enabled = true
|
||||
task.Schedule.NextRun = s.calculateNextRun(&schedule)
|
||||
task.NextRunAt = &task.Schedule.NextRun
|
||||
task.Status = StatusScheduled
|
||||
task.UpdatedAt = time.Now()
|
||||
|
||||
if err := s.taskRepo.Update(ctx, task); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.scheduleTask(task)
|
||||
}
|
||||
|
||||
func (s *Scheduler) Cancel(taskID string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if entryID, exists := s.jobs[taskID]; exists {
|
||||
s.cron.Remove(entryID)
|
||||
delete(s.jobs, taskID)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
task, err := s.taskRepo.GetByID(ctx, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if task.Schedule != nil {
|
||||
task.Schedule.Enabled = false
|
||||
}
|
||||
task.Status = StatusCancelled
|
||||
task.UpdatedAt = time.Now()
|
||||
|
||||
return s.taskRepo.Update(ctx, task)
|
||||
}
|
||||
|
||||
func (s *Scheduler) Pause(taskID string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if entryID, exists := s.jobs[taskID]; exists {
|
||||
s.cron.Remove(entryID)
|
||||
delete(s.jobs, taskID)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
task, err := s.taskRepo.GetByID(ctx, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if task.Schedule != nil {
|
||||
task.Schedule.Enabled = false
|
||||
}
|
||||
task.UpdatedAt = time.Now()
|
||||
|
||||
return s.taskRepo.Update(ctx, task)
|
||||
}
|
||||
|
||||
func (s *Scheduler) Resume(taskID string) error {
|
||||
ctx := context.Background()
|
||||
task, err := s.taskRepo.GetByID(ctx, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if task.Schedule != nil {
|
||||
task.Schedule.Enabled = true
|
||||
task.Schedule.NextRun = s.calculateNextRun(task.Schedule)
|
||||
task.NextRunAt = &task.Schedule.NextRun
|
||||
}
|
||||
task.Status = StatusScheduled
|
||||
task.UpdatedAt = time.Now()
|
||||
|
||||
if err := s.taskRepo.Update(ctx, task); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.scheduleTask(task)
|
||||
}
|
||||
|
||||
func (s *Scheduler) GetScheduledTasks() []string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
result := make([]string, 0, len(s.jobs))
|
||||
for taskID := range s.jobs {
|
||||
result = append(result, taskID)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *Scheduler) IsRunning(taskID string) bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.running[taskID]
|
||||
}
|
||||
376
backend/internal/computer/types.go
Normal file
376
backend/internal/computer/types.go
Normal file
@@ -0,0 +1,376 @@
|
||||
package computer
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gooseek/backend/internal/llm"
|
||||
)
|
||||
|
||||
type TaskStatus string
|
||||
|
||||
const (
|
||||
StatusPending TaskStatus = "pending"
|
||||
StatusPlanning TaskStatus = "planning"
|
||||
StatusExecuting TaskStatus = "executing"
|
||||
StatusWaiting TaskStatus = "waiting_user"
|
||||
StatusCompleted TaskStatus = "completed"
|
||||
StatusFailed TaskStatus = "failed"
|
||||
StatusCancelled TaskStatus = "cancelled"
|
||||
StatusScheduled TaskStatus = "scheduled"
|
||||
StatusPaused TaskStatus = "paused"
|
||||
StatusCheckpoint TaskStatus = "checkpoint"
|
||||
StatusLongRunning TaskStatus = "long_running"
|
||||
)
|
||||
|
||||
type TaskType string
|
||||
|
||||
const (
|
||||
TaskResearch TaskType = "research"
|
||||
TaskCode TaskType = "code"
|
||||
TaskAnalysis TaskType = "analysis"
|
||||
TaskDesign TaskType = "design"
|
||||
TaskDeploy TaskType = "deploy"
|
||||
TaskMonitor TaskType = "monitor"
|
||||
TaskReport TaskType = "report"
|
||||
TaskCommunicate TaskType = "communicate"
|
||||
TaskSchedule TaskType = "schedule"
|
||||
TaskTransform TaskType = "transform"
|
||||
TaskValidate TaskType = "validate"
|
||||
)
|
||||
|
||||
type ComputerTask struct {
|
||||
ID string `json:"id"`
|
||||
UserID string `json:"userId"`
|
||||
Query string `json:"query"`
|
||||
Status TaskStatus `json:"status"`
|
||||
Plan *TaskPlan `json:"plan,omitempty"`
|
||||
SubTasks []SubTask `json:"subTasks,omitempty"`
|
||||
Artifacts []Artifact `json:"artifacts,omitempty"`
|
||||
Memory map[string]interface{} `json:"memory,omitempty"`
|
||||
Progress int `json:"progress"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Schedule *Schedule `json:"schedule,omitempty"`
|
||||
NextRunAt *time.Time `json:"nextRunAt,omitempty"`
|
||||
RunCount int `json:"runCount"`
|
||||
TotalCost float64 `json:"totalCost"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
CompletedAt *time.Time `json:"completedAt,omitempty"`
|
||||
|
||||
DurationMode DurationMode `json:"durationMode"`
|
||||
Checkpoint *Checkpoint `json:"checkpoint,omitempty"`
|
||||
Checkpoints []Checkpoint `json:"checkpoints,omitempty"`
|
||||
MaxDuration time.Duration `json:"maxDuration"`
|
||||
EstimatedEnd *time.Time `json:"estimatedEnd,omitempty"`
|
||||
Iterations int `json:"iterations"`
|
||||
MaxIterations int `json:"maxIterations"`
|
||||
PausedAt *time.Time `json:"pausedAt,omitempty"`
|
||||
ResumedAt *time.Time `json:"resumedAt,omitempty"`
|
||||
TotalRuntime time.Duration `json:"totalRuntime"`
|
||||
HeartbeatAt *time.Time `json:"heartbeatAt,omitempty"`
|
||||
Priority TaskPriority `json:"priority"`
|
||||
ResourceLimits *ResourceLimits `json:"resourceLimits,omitempty"`
|
||||
}
|
||||
|
||||
type DurationMode string
|
||||
|
||||
const (
|
||||
DurationShort DurationMode = "short"
|
||||
DurationMedium DurationMode = "medium"
|
||||
DurationLong DurationMode = "long"
|
||||
DurationExtended DurationMode = "extended"
|
||||
DurationUnlimited DurationMode = "unlimited"
|
||||
)
|
||||
|
||||
type TaskPriority string
|
||||
|
||||
const (
|
||||
PriorityLow TaskPriority = "low"
|
||||
PriorityNormal TaskPriority = "normal"
|
||||
PriorityHigh TaskPriority = "high"
|
||||
PriorityCritical TaskPriority = "critical"
|
||||
)
|
||||
|
||||
type Checkpoint struct {
|
||||
ID string `json:"id"`
|
||||
TaskID string `json:"taskId"`
|
||||
SubTaskIndex int `json:"subTaskIndex"`
|
||||
WaveIndex int `json:"waveIndex"`
|
||||
State map[string]interface{} `json:"state"`
|
||||
Progress int `json:"progress"`
|
||||
Artifacts []string `json:"artifacts"`
|
||||
Memory map[string]interface{} `json:"memory"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
RuntimeSoFar time.Duration `json:"runtimeSoFar"`
|
||||
CostSoFar float64 `json:"costSoFar"`
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
type ResourceLimits struct {
|
||||
MaxCPU float64 `json:"maxCpu"`
|
||||
MaxMemoryMB int `json:"maxMemoryMb"`
|
||||
MaxDiskMB int `json:"maxDiskMb"`
|
||||
MaxNetworkMbps int `json:"maxNetworkMbps"`
|
||||
MaxCostPerHour float64 `json:"maxCostPerHour"`
|
||||
MaxTotalCost float64 `json:"maxTotalCost"`
|
||||
MaxConcurrent int `json:"maxConcurrent"`
|
||||
IdleTimeoutMins int `json:"idleTimeoutMins"`
|
||||
}
|
||||
|
||||
var DurationModeConfigs = map[DurationMode]struct {
|
||||
MaxDuration time.Duration
|
||||
CheckpointFreq time.Duration
|
||||
HeartbeatFreq time.Duration
|
||||
MaxIterations int
|
||||
}{
|
||||
DurationShort: {30 * time.Minute, 5 * time.Minute, 30 * time.Second, 10},
|
||||
DurationMedium: {4 * time.Hour, 15 * time.Minute, time.Minute, 50},
|
||||
DurationLong: {24 * time.Hour, 30 * time.Minute, 2 * time.Minute, 200},
|
||||
DurationExtended: {7 * 24 * time.Hour, time.Hour, 5 * time.Minute, 1000},
|
||||
DurationUnlimited: {365 * 24 * time.Hour, 4 * time.Hour, 10 * time.Minute, 0},
|
||||
}
|
||||
|
||||
type SubTask struct {
|
||||
ID string `json:"id"`
|
||||
Type TaskType `json:"type"`
|
||||
Description string `json:"description"`
|
||||
Dependencies []string `json:"dependencies,omitempty"`
|
||||
ModelID string `json:"modelId,omitempty"`
|
||||
RequiredCaps []llm.ModelCapability `json:"requiredCaps,omitempty"`
|
||||
Input map[string]interface{} `json:"input,omitempty"`
|
||||
Output map[string]interface{} `json:"output,omitempty"`
|
||||
Status TaskStatus `json:"status"`
|
||||
Progress int `json:"progress"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Cost float64 `json:"cost"`
|
||||
StartedAt *time.Time `json:"startedAt,omitempty"`
|
||||
CompletedAt *time.Time `json:"completedAt,omitempty"`
|
||||
Retries int `json:"retries"`
|
||||
MaxRetries int `json:"maxRetries"`
|
||||
}
|
||||
|
||||
type TaskPlan struct {
|
||||
Query string `json:"query"`
|
||||
Summary string `json:"summary"`
|
||||
SubTasks []SubTask `json:"subTasks"`
|
||||
ExecutionOrder [][]string `json:"executionOrder"`
|
||||
EstimatedCost float64 `json:"estimatedCost"`
|
||||
EstimatedTime int `json:"estimatedTimeSeconds"`
|
||||
}
|
||||
|
||||
type Artifact struct {
|
||||
ID string `json:"id"`
|
||||
TaskID string `json:"taskId"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Content []byte `json:"-"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Size int64 `json:"size"`
|
||||
MimeType string `json:"mimeType,omitempty"`
|
||||
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
}
|
||||
|
||||
type Schedule struct {
|
||||
Type string `json:"type"`
|
||||
CronExpr string `json:"cronExpr,omitempty"`
|
||||
Interval int `json:"intervalSeconds,omitempty"`
|
||||
NextRun time.Time `json:"nextRun"`
|
||||
MaxRuns int `json:"maxRuns"`
|
||||
RunCount int `json:"runCount"`
|
||||
ExpiresAt *time.Time `json:"expiresAt,omitempty"`
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
DurationMode DurationMode `json:"durationMode,omitempty"`
|
||||
RetryOnFail bool `json:"retryOnFail"`
|
||||
MaxRetries int `json:"maxRetries"`
|
||||
RetryDelay time.Duration `json:"retryDelay"`
|
||||
Timezone string `json:"timezone,omitempty"`
|
||||
WindowStart string `json:"windowStart,omitempty"`
|
||||
WindowEnd string `json:"windowEnd,omitempty"`
|
||||
Conditions []Condition `json:"conditions,omitempty"`
|
||||
}
|
||||
|
||||
type Condition struct {
|
||||
Type string `json:"type"`
|
||||
Field string `json:"field"`
|
||||
Operator string `json:"operator"`
|
||||
Value interface{} `json:"value"`
|
||||
Params map[string]interface{} `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
ScheduleOnce = "once"
|
||||
ScheduleInterval = "interval"
|
||||
ScheduleCron = "cron"
|
||||
ScheduleHourly = "hourly"
|
||||
ScheduleDaily = "daily"
|
||||
ScheduleWeekly = "weekly"
|
||||
ScheduleMonthly = "monthly"
|
||||
ScheduleQuarterly = "quarterly"
|
||||
ScheduleYearly = "yearly"
|
||||
ScheduleContinuous = "continuous"
|
||||
ScheduleOnCondition = "on_condition"
|
||||
)
|
||||
|
||||
type TaskEvent struct {
|
||||
Type string `json:"type"`
|
||||
TaskID string `json:"taskId"`
|
||||
SubTaskID string `json:"subTaskId,omitempty"`
|
||||
Status TaskStatus `json:"status,omitempty"`
|
||||
Progress int `json:"progress,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Data map[string]interface{} `json:"data,omitempty"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
type ExecuteOptions struct {
|
||||
Async bool `json:"async"`
|
||||
MaxCost float64 `json:"maxCost"`
|
||||
Timeout int `json:"timeoutSeconds"`
|
||||
EnableSandbox bool `json:"enableSandbox"`
|
||||
Schedule *Schedule `json:"schedule,omitempty"`
|
||||
Context map[string]interface{} `json:"context,omitempty"`
|
||||
|
||||
DurationMode DurationMode `json:"durationMode,omitempty"`
|
||||
Priority TaskPriority `json:"priority,omitempty"`
|
||||
ResourceLimits *ResourceLimits `json:"resourceLimits,omitempty"`
|
||||
ResumeFromID string `json:"resumeFromId,omitempty"`
|
||||
EnableBrowser bool `json:"enableBrowser"`
|
||||
BrowserOptions *BrowserOptions `json:"browserOptions,omitempty"`
|
||||
NotifyOnEvents []string `json:"notifyOnEvents,omitempty"`
|
||||
WebhookURL string `json:"webhookUrl,omitempty"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
type BrowserOptions struct {
|
||||
Headless bool `json:"headless"`
|
||||
UserAgent string `json:"userAgent,omitempty"`
|
||||
Viewport *Viewport `json:"viewport,omitempty"`
|
||||
ProxyURL string `json:"proxyUrl,omitempty"`
|
||||
Timeout int `json:"timeout"`
|
||||
Screenshots bool `json:"screenshots"`
|
||||
RecordVideo bool `json:"recordVideo"`
|
||||
BlockAds bool `json:"blockAds"`
|
||||
AcceptCookies bool `json:"acceptCookies"`
|
||||
}
|
||||
|
||||
type Viewport struct {
|
||||
Width int `json:"width"`
|
||||
Height int `json:"height"`
|
||||
}
|
||||
|
||||
type ExecutionResult struct {
|
||||
TaskID string
|
||||
SubTaskID string
|
||||
Output map[string]interface{}
|
||||
Artifacts []Artifact
|
||||
Duration time.Duration
|
||||
Cost float64
|
||||
Error error
|
||||
}
|
||||
|
||||
type SandboxResult struct {
|
||||
Stdout string
|
||||
Stderr string
|
||||
ExitCode int
|
||||
Files map[string][]byte
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
type MemoryEntry struct {
|
||||
ID string `json:"id"`
|
||||
UserID string `json:"userId"`
|
||||
TaskID string `json:"taskId,omitempty"`
|
||||
Key string `json:"key"`
|
||||
Value interface{} `json:"value"`
|
||||
Type string `json:"type"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
ExpiresAt *time.Time `json:"expiresAt,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
EventTaskCreated = "task_created"
|
||||
EventTaskStarted = "task_started"
|
||||
EventTaskProgress = "task_progress"
|
||||
EventTaskCompleted = "task_completed"
|
||||
EventTaskFailed = "task_failed"
|
||||
EventSubTaskStart = "subtask_start"
|
||||
EventSubTaskDone = "subtask_done"
|
||||
EventSubTaskFail = "subtask_fail"
|
||||
EventArtifact = "artifact"
|
||||
EventMessage = "message"
|
||||
EventUserInput = "user_input_required"
|
||||
EventCheckpoint = "checkpoint"
|
||||
EventCheckpointSaved = "checkpoint_saved"
|
||||
EventResumed = "resumed"
|
||||
EventPaused = "paused"
|
||||
EventHeartbeat = "heartbeat"
|
||||
EventIteration = "iteration"
|
||||
EventBrowserAction = "browser_action"
|
||||
EventScreenshot = "screenshot"
|
||||
EventResourceAlert = "resource_alert"
|
||||
EventScheduleUpdate = "schedule_update"
|
||||
)
|
||||
|
||||
type BrowserAction struct {
|
||||
ID string `json:"id"`
|
||||
Type BrowserActionType `json:"type"`
|
||||
Selector string `json:"selector,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Value string `json:"value,omitempty"`
|
||||
Options map[string]interface{} `json:"options,omitempty"`
|
||||
Screenshot bool `json:"screenshot"`
|
||||
WaitAfter int `json:"waitAfterMs"`
|
||||
Timeout int `json:"timeoutMs"`
|
||||
Result *BrowserActionResult `json:"result,omitempty"`
|
||||
}
|
||||
|
||||
type BrowserActionType string
|
||||
|
||||
const (
|
||||
BrowserNavigate BrowserActionType = "navigate"
|
||||
BrowserClick BrowserActionType = "click"
|
||||
BrowserType BrowserActionType = "type"
|
||||
BrowserScroll BrowserActionType = "scroll"
|
||||
BrowserScreenshot BrowserActionType = "screenshot"
|
||||
BrowserWait BrowserActionType = "wait"
|
||||
BrowserWaitSelector BrowserActionType = "wait_selector"
|
||||
BrowserExtract BrowserActionType = "extract"
|
||||
BrowserEval BrowserActionType = "eval"
|
||||
BrowserSelect BrowserActionType = "select"
|
||||
BrowserUpload BrowserActionType = "upload"
|
||||
BrowserDownload BrowserActionType = "download"
|
||||
BrowserPDF BrowserActionType = "pdf"
|
||||
BrowserClose BrowserActionType = "close"
|
||||
)
|
||||
|
||||
type BrowserActionResult struct {
|
||||
Success bool `json:"success"`
|
||||
Data interface{} `json:"data,omitempty"`
|
||||
Screenshot string `json:"screenshot,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Duration time.Duration `json:"duration"`
|
||||
PageTitle string `json:"pageTitle,omitempty"`
|
||||
PageURL string `json:"pageUrl,omitempty"`
|
||||
Cookies []map[string]string `json:"cookies,omitempty"`
|
||||
LocalStorage map[string]string `json:"localStorage,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
ArtifactTypeFile = "file"
|
||||
ArtifactTypeCode = "code"
|
||||
ArtifactTypeReport = "report"
|
||||
ArtifactTypeDeployment = "deployment"
|
||||
ArtifactTypeImage = "image"
|
||||
ArtifactTypeData = "data"
|
||||
)
|
||||
|
||||
const (
|
||||
MemoryTypeFact = "fact"
|
||||
MemoryTypePreference = "preference"
|
||||
MemoryTypeContext = "context"
|
||||
MemoryTypeResult = "result"
|
||||
)
|
||||
Reference in New Issue
Block a user