Deploy: migrate k3s → Docker; search logic → master-agents-svc

- deploy/k3s удалён, deploy/docker добавлен (Caddyfile, docker-compose, searxng)
- chat-svc: agents/models/prompts удалены, использует llm-svc (LLMClient, EmbeddingClient)
- master-agents-svc: SearchOrchestrator, classifier, researcher, actions, widgets
- web-svc: ChatModelSelector, Optimization, Sources удалены; InputBarPlus; UnregisterSW
- geo-device-svc, localization-svc: Dockerfiles
- docs: 02-k3s-services-spec.md, RUNBOOK/TELEMETRY/WORKING удалены

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
home
2026-02-23 22:14:00 +03:00
parent cd6b7857ba
commit 328d968f3f
180 changed files with 3022 additions and 9798 deletions

View File

@@ -1,7 +1,9 @@
# syntax=docker/dockerfile:1
FROM node:22-alpine AS builder
WORKDIR /app
COPY package.json ./
RUN npm install
RUN --mount=type=cache,target=/root/.npm \
npm install
COPY tsconfig.json ./
COPY src ./src
RUN npm run build
@@ -9,7 +11,8 @@ RUN npm run build
FROM node:22-alpine
WORKDIR /app
COPY package.json ./
RUN npm install --omit=dev
RUN --mount=type=cache,target=/root/.npm \
npm install --omit=dev
COPY --from=builder /app/dist ./dist
EXPOSE 3015
ENV PORT=3015

View File

@@ -4,9 +4,9 @@ import { fileURLToPath } from 'node:url';
config({ path: path.resolve(fileURLToPath(import.meta.url), '../../../../.env') });
/**
* api-gateway — прокси к микросервисам
* api-gateway — прокси к сервисам (СОА)
* web-svc = только UI, вся логика и API здесь
* docs/architecture: 02-k3s-microservices-spec.md §5
* docs/architecture: 02-k3s-services-spec.md §5
*/
import Fastify, { FastifyRequest, FastifyReply } from 'fastify';
@@ -76,12 +76,17 @@ async function proxyRequest(req: FastifyRequest, reply: FastifyReply, stream = f
const fullUrl = `${target.base.replace(/\/$/, '')}${target.rewrite}${url.search}`;
const headers: Record<string, string> = {};
const pass = ['authorization', 'content-type', 'accept', 'x-forwarded-for', 'x-real-ip', 'user-agent', 'accept-language'];
const pass = ['authorization', 'accept', 'x-forwarded-for', 'x-real-ip', 'user-agent', 'accept-language'];
for (const h of pass) {
const v = req.headers[h];
if (v && typeof v === 'string') headers[h] = v;
}
if (!headers['Content-Type'] && req.method !== 'GET') headers['Content-Type'] = 'application/json';
const ct = req.headers['content-type'];
if (ct && typeof ct === 'string' && ct.toLowerCase().includes('application/json')) {
headers['Content-Type'] = 'application/json';
} else if (req.method !== 'GET') {
headers['Content-Type'] = 'application/json';
}
try {
const method = req.method;
@@ -129,11 +134,25 @@ async function proxyRequest(req: FastifyRequest, reply: FastifyReply, stream = f
return reply.send(data);
} catch (err: unknown) {
req.log.error(err);
// Заглушки для сервисов, не запущенных в Docker
if (path.startsWith('/api/v1/discover')) return reply.send({ items: [] });
if (path.startsWith('/api/geo-context')) return reply.send({ country: null, city: null });
if (path.startsWith('/api/translations')) return reply.send({});
if (path.startsWith('/api/v1/weather')) return reply.send({});
return reply.status(503).send({ error: 'Service unavailable' });
}
}
const app = Fastify({ logger: true });
// Парсер JSON — принимает application/json с charset и дублированием
app.addContentTypeParser(/application\/json/i, { parseAs: 'string' }, (_, body, done) => {
try {
const str = typeof body === 'string' ? body : (body ? String(body) : '');
done(null, str ? JSON.parse(str) : {});
} catch (e) {
done(e as Error, undefined);
}
});
const corsOrigin = process.env.ALLOWED_ORIGINS
? process.env.ALLOWED_ORIGINS.split(',')
.map((s) => s.trim())

View File

@@ -1,19 +1,24 @@
# syntax=docker/dockerfile:1
# Сборка из корня: docker build -t gooseek/auth-svc:latest -f services/auth-svc/Dockerfile .
FROM node:22-alpine AS builder
RUN apk add --no-cache python3 make g++
WORKDIR /app
COPY package.json package-lock.json ./
COPY services/auth-svc/package.json ./services/auth-svc/
RUN npm ci -w auth-svc
RUN --mount=type=cache,target=/root/.npm \
npm ci -w auth-svc
COPY services/auth-svc/tsconfig.json ./services/auth-svc/
COPY services/auth-svc/src ./services/auth-svc/src
WORKDIR /app/services/auth-svc
RUN npm run build
FROM node:22-alpine
RUN apk add --no-cache python3 make g++
WORKDIR /app
COPY package.json package-lock.json ./
COPY services/auth-svc/package.json ./services/auth-svc/
RUN npm ci -w auth-svc --omit=dev
RUN --mount=type=cache,target=/root/.npm \
npm ci -w auth-svc --omit=dev
COPY --from=builder /app/services/auth-svc/dist ./services/auth-svc/dist
WORKDIR /app/services/auth-svc
EXPOSE 3014

View File

@@ -1,6 +1,6 @@
/**
* billing-svc — тарифы, подписки, ЮKassa
* docs/architecture: 01-perplexity-analogue-design.md §2.2.J, 02-k3s-microservices-spec.md §3.10
* docs/architecture: 01-perplexity-analogue-design.md §2.2.J, 02-k3s-services-spec.md §3.10
* API: GET /api/v1/billing/plans, /subscription, /payments; POST /checkout
*/

View File

@@ -1,7 +1,7 @@
FROM node:22-alpine AS builder
WORKDIR /app
COPY package*.json ./
RUN npm ci
RUN npm install
COPY tsconfig.json ./
COPY src ./src
RUN npm run build
@@ -9,7 +9,7 @@ RUN npm run build
FROM node:22-alpine
WORKDIR /app
COPY package*.json ./
RUN npm ci --omit=dev
RUN npm install --omit=dev
COPY --from=builder /app/dist ./dist
EXPOSE 3005
ENV DATA_DIR=/app/data

View File

@@ -10,7 +10,7 @@
},
"dependencies": {
"@fastify/cors": "^9.0.1",
"@fastify/multipart": "^9.4.0",
"@fastify/multipart": "^8.3.1",
"@google/genai": "^1.34.0",
"@toolsycc/json-repair": "^0.1.22",
"axios": "^1.8.3",

View File

@@ -7,18 +7,12 @@
import Fastify from 'fastify';
import multipart from '@fastify/multipart';
import { LibraryClient } from './lib/library-client.js';
import cors from '@fastify/cors';
import { z } from 'zod';
import ModelRegistry from './lib/models/registry.js';
import UploadManager from './lib/uploads/manager.js';
import configManager from './lib/config/index.js';
import { isEnvOnlyMode } from './lib/config/serverRegistry.js';
import { ConfigModelProvider } from './lib/config/types.js';
import SearchAgent from './lib/agents/search/index.js';
import SessionManager from './lib/session.js';
import { ChatTurnMessage } from './lib/types.js';
import { SearchSources } from './lib/agents/search/types.js';
import { createEmbeddingClient } from './lib/embedding-client.js';
import path from 'node:path';
import fs from 'node:fs';
@@ -45,6 +39,7 @@ if (!fs.existsSync(configPath)) {
const PORT = parseInt(process.env.PORT ?? '3005', 10);
const MEMORY_SVC_URL = process.env.MEMORY_SVC_URL ?? '';
const LLM_SVC_URL = process.env.LLM_SVC_URL ?? '';
const MASTER_AGENTS_SVC_URL = process.env.MASTER_AGENTS_SVC_URL?.trim() ?? '';
const messageSchema = z.object({
messageId: z.string().min(1),
@@ -52,8 +47,11 @@ const messageSchema = z.object({
content: z.string().min(1),
});
const answerModeEnum = z.enum(['standard', 'focus', 'academic', 'writing', 'travel', 'finance']);
const councilModelSchema = z.object({ providerId: z.string(), key: z.string() });
const answerModeEnum = z.enum([
'standard', 'focus', 'academic', 'writing', 'travel', 'finance',
'health', 'education', 'medicine', 'realEstate', 'psychology', 'sports',
'children', 'goods', 'shopping', 'games', 'taxes', 'legislation',
]);
const bodySchema = z.object({
message: messageSchema,
optimizationMode: z.enum(['speed', 'balanced', 'quality']),
@@ -73,9 +71,6 @@ const bodySchema = z.object({
})
.optional(),
learningMode: z.boolean().optional().default(false),
/** Model Council (Max): 3 models in parallel → synthesis */
modelCouncil: z.boolean().optional().default(false),
councilModels: z.array(councilModelSchema).length(3).optional(),
});
type Body = z.infer<typeof bodySchema>;
@@ -85,7 +80,54 @@ const corsOrigin = process.env.ALLOWED_ORIGINS
? process.env.ALLOWED_ORIGINS.split(',').map((s) => s.trim()).filter(Boolean)
: true;
await app.register(cors, { origin: corsOrigin });
await app.register(multipart, { limits: { fileSize: 50 * 1024 * 1024 } });
// multipart только для uploads — иначе application/json на /api/v1/chat даёт 415
await app.register(
async (scope) => {
await scope.register(multipart, { limits: { fileSize: 50 * 1024 * 1024 } });
scope.post('/api/v1/uploads', async function uploadsHandler(req, reply) {
try {
if (!LLM_SVC_URL) {
return reply.status(503).send({ message: 'LLM_SVC_URL not configured. llm-svc required for embeddings.' });
}
let providerId = '';
let modelKey = '';
const fileParts: { buffer: Buffer; filename: string; mimetype: string }[] = [];
const parts = req.parts();
for await (const part of parts) {
if (part.type === 'field') {
const val = (part as { value?: string }).value ?? '';
if (part.fieldname === 'embedding_model_provider_id') providerId = val;
else if (part.fieldname === 'embedding_model_key') modelKey = val;
} else if (part.type === 'file' && part.file) {
const buffer = await part.toBuffer();
fileParts.push({
buffer,
filename: part.filename ?? 'file',
mimetype: part.mimetype ?? 'application/octet-stream',
});
}
}
if (!providerId || !modelKey) {
return reply.status(400).send({ message: 'Missing embedding model or provider' });
}
if (fileParts.length === 0) {
return reply.status(400).send({ message: 'No files uploaded' });
}
const embedding = createEmbeddingClient({ providerId, key: modelKey });
const manager = new UploadManager({ embeddingModel: embedding });
const fileObjects = fileParts.map(
(p) => new File([new Uint8Array(p.buffer)], p.filename, { type: p.mimetype }),
);
const processed = await manager.processFiles(fileObjects);
return reply.send({ files: processed });
} catch (err) {
req.log.error(err);
return reply.status(500).send({ message: 'An error has occurred.' });
}
});
},
{ prefix: '' },
);
app.get('/health', async () => ({ status: 'ok' }));
app.get('/ready', async () => ({ status: 'ready' }));
@@ -103,33 +145,28 @@ app.get('/metrics', async (_req, reply) => {
app.get('/api/v1/config', async (_req, reply) => {
try {
const values = configManager.getCurrentConfig();
const fields = configManager.getUIConfigSections();
let modelProviders: ConfigModelProvider[];
let envOnlyMode: boolean;
if (LLM_SVC_URL) {
const base = LLM_SVC_URL.replace(/\/$/, '');
const res = await fetch(`${base}/api/v1/providers`, {
signal: AbortSignal.timeout(5000),
});
if (!res.ok) throw new Error(`llm-svc fetch failed: ${res.status}`);
const data = (await res.json()) as { providers: ConfigModelProvider[]; envOnlyMode?: boolean };
modelProviders = data.providers ?? [];
envOnlyMode = data.envOnlyMode ?? false;
values.modelProviders = modelProviders;
} else {
const registry = new ModelRegistry();
const providers = await registry.getActiveProviders();
modelProviders = values.modelProviders.map((mp: ConfigModelProvider) => {
const activeProvider = providers.find((p) => p.id === mp.id);
return {
...mp,
chatModels: activeProvider?.chatModels ?? mp.chatModels,
embeddingModels: activeProvider?.embeddingModels ?? mp.embeddingModels,
};
});
values.modelProviders = modelProviders;
envOnlyMode = isEnvOnlyMode();
if (!LLM_SVC_URL) {
return reply.status(503).send({ message: 'LLM_SVC_URL not configured. llm-svc required.' });
}
const base = LLM_SVC_URL.replace(/\/$/, '');
const [providersRes, uiConfigRes] = await Promise.all([
fetch(`${base}/api/v1/providers`, { signal: AbortSignal.timeout(5000) }),
fetch(`${base}/api/v1/providers/ui-config`, { signal: AbortSignal.timeout(5000) }),
]);
if (!providersRes.ok) throw new Error(`llm-svc providers failed: ${providersRes.status}`);
const providersData = (await providersRes.json()) as { providers: ConfigModelProvider[]; envOnlyMode?: boolean };
const modelProviders = providersData.providers ?? [];
const envOnlyMode = providersData.envOnlyMode ?? false;
values.modelProviders = modelProviders;
let modelProviderSections: { name: string; key: string; fields: unknown[] }[] = [];
if (uiConfigRes.ok) {
const uiData = (await uiConfigRes.json()) as { sections: { name: string; key: string; fields: unknown[] }[] };
modelProviderSections = uiData.sections ?? [];
}
const fields = {
...configManager.getUIConfigSections(),
modelProviders: modelProviderSections,
};
return reply.send({ values, fields, modelProviders, envOnlyMode });
} catch (err) {
app.log.error(err);
@@ -169,51 +206,6 @@ app.post<{ Body: unknown }>('/api/v1/config/setup-complete', async (req, reply)
/* --- Providers: routed to llm-svc via api-gateway --- */
/* --- Uploads --- */
app.post('/api/v1/uploads', async (req, reply) => {
try {
let providerId = '';
let modelKey = '';
const fileParts: { buffer: Buffer; filename: string; mimetype: string }[] = [];
const parts = req.parts();
for await (const part of parts) {
if (part.type === 'field') {
const val = (part as { value?: string }).value ?? '';
if (part.fieldname === 'embedding_model_provider_id') providerId = val;
else if (part.fieldname === 'embedding_model_key') modelKey = val;
} else if (part.type === 'file' && part.file) {
const buffer = await part.toBuffer();
fileParts.push({
buffer,
filename: part.filename ?? 'file',
mimetype: part.mimetype ?? 'application/octet-stream',
});
}
}
if (!providerId || !modelKey) {
return reply.status(400).send({ message: 'Missing embedding model or provider' });
}
if (fileParts.length === 0) {
return reply.status(400).send({ message: 'No files uploaded' });
}
const registry = new ModelRegistry();
const embedding = await registry.loadEmbeddingModel(providerId, modelKey);
const manager = new UploadManager({ embeddingModel: embedding });
const fileObjects = fileParts.map(
(p) => new File([new Blob([p.buffer], { type: p.mimetype })], p.filename, { type: p.mimetype }),
);
const processed = await manager.processFiles(fileObjects);
return reply.send({ files: processed });
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: 'An error has occurred.' });
}
});
app.post<{ Body: unknown }>('/api/v1/chat', async (req, reply) => {
const parseBody = bodySchema.safeParse(req.body);
if (!parseBody.success) {
@@ -246,71 +238,41 @@ app.post<{ Body: unknown }>('/api/v1/chat', async (req, reply) => {
}
try {
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(body.chatModel.providerId, body.chatModel.key);
let embedding = null;
if (body.embeddingModel?.providerId) {
embedding = await registry.loadEmbeddingModel(body.embeddingModel.providerId, body.embeddingModel.key);
if (!MASTER_AGENTS_SVC_URL) {
return reply.status(503).send({ message: 'MASTER_AGENTS_SVC_URL not configured. master-agents-svc required for chat.' });
}
let councilLlms: [Awaited<ReturnType<ModelRegistry['loadChatModel']>>, Awaited<ReturnType<ModelRegistry['loadChatModel']>>, Awaited<ReturnType<ModelRegistry['loadChatModel']>>] | undefined;
if (body.modelCouncil && body.councilModels && body.councilModels.length === 3) {
try {
councilLlms = [
await registry.loadChatModel(body.councilModels[0].providerId, body.councilModels[0].key),
await registry.loadChatModel(body.councilModels[1].providerId, body.councilModels[1].key),
await registry.loadChatModel(body.councilModels[2].providerId, body.councilModels[2].key),
];
} catch (councilErr) {
req.log.warn({ err: councilErr }, 'Model Council: failed to load 3 models, falling back to single');
}
}
const history: ChatTurnMessage[] = body.history.map((msg) =>
msg[0] === 'human' ? { role: 'user' as const, content: msg[1] } : { role: 'assistant' as const, content: msg[1] });
const libraryClient = LibraryClient.create(authHeader);
const config = {
llm,
embedding,
sources: body.sources as SearchSources[],
mode: body.optimizationMode,
fileIds: body.files,
systemInstructions: body.systemInstructions || 'None',
locale: body.locale ?? 'en',
memoryContext,
answerMode: body.answerMode,
responsePrefs: body.responsePrefs,
learningMode: body.learningMode,
libraryClient,
...(councilLlms && { councilLlms }),
};
const agent = new SearchAgent();
const session = SessionManager.createSession();
const encoder = new TextEncoder();
const stream = new ReadableStream({
start(controller) {
const disconnect = session.subscribe((event: string, data: unknown) => {
const d = data as { type?: string; block?: unknown; blockId?: string; patch?: unknown; data?: unknown };
if (event === 'data') {
if (d.type === 'block') controller.enqueue(encoder.encode(JSON.stringify({ type: 'block', block: d.block }) + '\n'));
else if (d.type === 'updateBlock') controller.enqueue(encoder.encode(JSON.stringify({ type: 'updateBlock', blockId: d.blockId, patch: d.patch }) + '\n'));
else if (d.type === 'researchComplete') controller.enqueue(encoder.encode(JSON.stringify({ type: 'researchComplete' }) + '\n'));
} else if (event === 'end') {
controller.enqueue(encoder.encode(JSON.stringify({ type: 'messageEnd' }) + '\n'));
controller.close();
session.removeAllListeners();
} else if (event === 'error') {
controller.enqueue(encoder.encode(JSON.stringify({ type: 'error', data: d.data }) + '\n'));
controller.close();
session.removeAllListeners();
}
});
agent.searchAsync(session, { chatHistory: history, followUp: body.message.content, chatId: body.message.chatId, messageId: body.message.messageId, config }).catch((err: Error) => {
req.log.error(err);
session.emit('error', { data: err?.message ?? 'Error during search.' });
});
req.raw.on?.('abort', () => { disconnect(); try { controller.close(); } catch {} });
const base = MASTER_AGENTS_SVC_URL.replace(/\/$/, '');
const proxyRes = await fetch(`${base}/api/v1/agents/search`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
...(authHeader && { Authorization: authHeader }),
},
});
return reply.header('Content-Type', 'application/x-ndjson').header('Cache-Control', 'no-cache').send(stream);
body: JSON.stringify({
message: body.message,
optimizationMode: body.optimizationMode,
sources: body.sources,
history: body.history,
files: body.files,
chatModel: body.chatModel,
systemInstructions: body.systemInstructions,
locale: body.locale,
answerMode: body.answerMode,
responsePrefs: body.responsePrefs,
learningMode: body.learningMode,
}),
signal: AbortSignal.timeout(300000),
duplex: 'half',
} as RequestInit);
if (!proxyRes.ok) {
const errText = await proxyRes.text();
return reply.status(proxyRes.status).send({ message: errText || 'master-agents-svc error' });
}
return reply
.header('Content-Type', 'application/x-ndjson')
.header('Cache-Control', 'no-cache')
.send(proxyRes.body);
} catch (err) {
req.log.error(err);
return reply.status(500).send({ message: 'An error occurred while processing chat request' });

View File

@@ -1,102 +0,0 @@
import { ResearcherOutput, SearchAgentInput } from './types.js';
import SessionManager from '../../session.js';
import { classify } from './classifier.js';
import Researcher from './researcher/index.js';
import { getWriterPrompt } from '../../prompts/search/writer.js';
import { WidgetExecutor } from './widgets/index.js';
class APISearchAgent {
async searchAsync(session: SessionManager, input: SearchAgentInput) {
const classification = await classify({
chatHistory: input.chatHistory,
enabledSources: input.config.sources,
query: input.followUp,
llm: input.config.llm,
locale: input.config.locale,
});
const widgetPromise = WidgetExecutor.executeAll({
classification,
chatHistory: input.chatHistory,
followUp: input.followUp,
llm: input.config.llm,
});
let searchPromise: Promise<ResearcherOutput> | null = null;
if (!classification.classification.skipSearch) {
const researcher = new Researcher();
searchPromise = researcher.research(SessionManager.createSession(), {
chatHistory: input.chatHistory,
followUp: input.followUp,
classification: classification,
config: input.config,
});
}
const [widgetOutputs, searchResults] = await Promise.all([
widgetPromise,
searchPromise,
]);
if (searchResults) {
session.emit('data', {
type: 'searchResults',
data: searchResults.searchFindings,
});
}
session.emit('data', {
type: 'researchComplete',
});
const finalContext =
searchResults?.searchFindings
.map(
(f, index) =>
`<result index=${index + 1} title=${f.metadata.title}>${f.content}</result>`,
)
.join('\n') || '';
const widgetContext = widgetOutputs
.map((o) => {
return `<result>${o.llmContext}</result>`;
})
.join('\n-------------\n');
const finalContextWithWidgets = `<search_results note="These are the search results and assistant can cite these">\n${finalContext}\n</search_results>\n<widgets_result noteForAssistant="Its output is already showed to the user, assistant can use this information to answer the query but do not CITE this as a souce">\n${widgetContext}\n</widgets_result>`;
const writerPrompt = getWriterPrompt(
finalContextWithWidgets,
input.config.systemInstructions,
input.config.mode,
input.config.locale,
input.config.memoryContext,
);
const answerStream = input.config.llm.streamText({
messages: [
{
role: 'system',
content: writerPrompt,
},
...input.chatHistory,
{
role: 'user',
content: input.followUp,
},
],
});
for await (const chunk of answerStream) {
session.emit('data', {
type: 'response',
data: chunk.contentChunk,
});
}
session.emit('end', {});
}
}
export default APISearchAgent;

View File

@@ -1,53 +0,0 @@
import z from 'zod';
import { ClassifierInput } from './types.js';
import { getClassifierPrompt } from '../../prompts/search/classifier.js';
import formatChatHistoryAsString from '../../utils/formatHistory.js';
const schema = z.object({
classification: z.object({
skipSearch: z
.boolean()
.describe('Indicates whether to skip the search step.'),
personalSearch: z
.boolean()
.describe('Indicates whether to perform a personal search.'),
academicSearch: z
.boolean()
.describe('Indicates whether to perform an academic search.'),
discussionSearch: z
.boolean()
.describe('Indicates whether to perform a discussion search.'),
showWeatherWidget: z
.boolean()
.describe('Indicates whether to show the weather widget.'),
showStockWidget: z
.boolean()
.describe('Indicates whether to show the stock widget.'),
showCalculationWidget: z
.boolean()
.describe('Indicates whether to show the calculation widget.'),
}),
standaloneFollowUp: z
.string()
.describe(
"A self-contained, context-independent reformulation of the user's question.",
),
});
export const classify = async (input: ClassifierInput) => {
const output = await input.llm.generateObject<typeof schema>({
messages: [
{
role: 'system',
content: getClassifierPrompt(input.locale),
},
{
role: 'user',
content: `<conversation_history>\n${formatChatHistoryAsString(input.chatHistory)}\n</conversation_history>\n<user_query>\n${input.query}\n</user_query>`,
},
],
schema,
});
return output;
};

View File

@@ -1,309 +0,0 @@
import type BaseLLM from '../../models/base/llm.js';
import { ResearcherOutput, SearchAgentInput } from './types.js';
import SessionManager from '../../session.js';
import { classify } from './classifier.js';
import Researcher from './researcher/index.js';
import { getWriterPrompt, getSynthesisPrompt } from '../../prompts/search/writer.js';
import { WidgetExecutor } from './widgets/index.js';
import { TextBlock } from '../../types.js';
class SearchAgent {
async searchAsync(session: SessionManager, input: SearchAgentInput) {
try {
await this.doSearch(session, input);
} catch (err) {
console.error('[SearchAgent] Fatal:', err);
const blocks = session.getAllBlocks();
const sourceBlock = blocks.find((b) => b.type === 'source');
let sources: { metadata?: { title?: string } }[] =
sourceBlock?.type === 'source' ? sourceBlock.data : [];
if (sources.length === 0) {
const researchBlock = blocks.find(
(b): b is typeof b & { data: { subSteps?: { type: string; reading?: { metadata?: { title?: string } }[] }[] } } =>
b.type === 'research' && 'subSteps' in (b.data ?? {}),
);
const searchStep = researchBlock?.data?.subSteps?.find(
(s) => s.type === 'search_results' && Array.isArray(s.reading),
);
if (searchStep && 'reading' in searchStep) {
sources = searchStep.reading ?? [];
}
}
if (sources.length > 0) {
const lines = sources.slice(0, 10).map(
(s: { metadata?: { title?: string } }, i: number) =>
`${i + 1}. **${s?.metadata?.title ?? 'Источник'}**`,
);
session.emitBlock({
id: crypto.randomUUID(),
type: 'text',
data: `## По найденным источникам\n\n${lines.join('\n')}\n\n*Ответ LLM недоступен (400). Проверьте модель gemini-3-flash в Settings или попробуйте другую модель.*`,
});
} else {
session.emitBlock({
id: crypto.randomUUID(),
type: 'text',
data: `Ошибка: ${err instanceof Error ? err.message : String(err)}. Проверьте LLM в Settings.`,
});
}
session.emit('end', {});
}
}
private async doSearch(session: SessionManager, input: SearchAgentInput) {
const lib = input.config.libraryClient;
if (lib?.enabled) {
await lib.upsertMessage(input.chatId, input.messageId, input.followUp, {
backendId: session.id,
responseBlocks: [],
status: 'answering',
sources: (input.config.sources as string[]) ?? [],
});
}
const classification = await classify({
chatHistory: input.chatHistory,
enabledSources: input.config.sources,
query: input.followUp,
llm: input.config.llm,
locale: input.config.locale,
});
const widgetPromise = WidgetExecutor.executeAll({
classification,
chatHistory: input.chatHistory,
followUp: input.followUp,
llm: input.config.llm,
}).then((widgetOutputs) => {
widgetOutputs.forEach((o) => {
session.emitBlock({
id: crypto.randomUUID(),
type: 'widget',
data: {
widgetType: o.type,
params: o.data,
},
});
});
return widgetOutputs;
});
let searchPromise: Promise<ResearcherOutput> | null = null;
if (!classification.classification.skipSearch) {
const researcher = new Researcher();
searchPromise = researcher.research(session, {
chatHistory: input.chatHistory,
followUp: input.followUp,
classification: classification,
config: input.config,
});
}
const [widgetOutputs, searchResults] = await Promise.all([
widgetPromise,
searchPromise,
]);
session.emit('data', {
type: 'researchComplete',
});
const MAX_RESULTS_FOR_WRITER = 15;
const MAX_CONTENT_PER_RESULT = 180;
const findingsForWriter =
searchResults?.searchFindings.slice(0, MAX_RESULTS_FOR_WRITER) ?? [];
const finalContext =
findingsForWriter
.map((f, index) => {
const content =
f.content.length > MAX_CONTENT_PER_RESULT
? f.content.slice(0, MAX_CONTENT_PER_RESULT) + '…'
: f.content;
return `<result index=${index + 1} title="${String(f.metadata.title).replace(/"/g, "'")}">${content}</result>`;
})
.join('\n') || '';
const widgetContext = widgetOutputs
.map((o) => {
return `<result>${o.llmContext}</result>`;
})
.join('\n-------------\n');
const finalContextWithWidgets = `<search_results note="These are the search results and assistant can cite these">\n${finalContext}\n</search_results>\n<widgets_result noteForAssistant="Its output is already showed to the user, assistant can use this information to answer the query but do not CITE this as a souce">\n${widgetContext}\n</widgets_result>`;
const writerPrompt = getWriterPrompt(
finalContextWithWidgets,
input.config.systemInstructions,
input.config.mode,
input.config.locale,
input.config.memoryContext,
input.config.answerMode,
input.config.responsePrefs,
input.config.learningMode,
);
const councilLlms = input.config.councilLlms;
if (councilLlms && councilLlms.length === 3) {
await this.runCouncilWritersAndSynthesis(
session,
input,
writerPrompt,
findingsForWriter,
councilLlms,
);
return;
}
const answerStream = input.config.llm.streamText({
messages: [
{ role: 'system', content: writerPrompt },
...input.chatHistory,
{ role: 'user', content: input.followUp },
],
options: { maxTokens: 4096 },
});
let responseBlockId = '';
let hasContent = false;
for await (const chunk of answerStream) {
if (!chunk.contentChunk && !responseBlockId) continue;
if (!responseBlockId) {
const block: TextBlock = {
id: crypto.randomUUID(),
type: 'text',
data: chunk.contentChunk,
};
session.emitBlock(block);
responseBlockId = block.id;
if (chunk.contentChunk) hasContent = true;
} else {
const block = session.getBlock(responseBlockId) as TextBlock | null;
if (block) {
block.data += chunk.contentChunk;
if (chunk.contentChunk) hasContent = true;
session.updateBlock(block.id, [
{ op: 'replace', path: '/data', value: block.data },
]);
}
}
}
if (!hasContent && findingsForWriter.length > 0) {
const lines = findingsForWriter.slice(0, 10).map((f, i) => {
const title = f.metadata.title ?? 'Без названия';
const excerpt =
f.content.length > 120 ? f.content.slice(0, 120) + '…' : f.content;
return `${i + 1}. **${title}** — ${excerpt}`;
});
session.emitBlock({
id: crypto.randomUUID(),
type: 'text',
data: `## По найденным источникам\n\n${lines.join('\n\n')}\n\n*Ответ LLM недоступен. Проверьте модель в Settings.*`,
});
}
session.emit('end', {});
await this.persistMessage(session, input.chatId, input.messageId, input.config.libraryClient);
}
/** Model Council: run 3 writers in parallel, synthesize, stream synthesis */
private async runCouncilWritersAndSynthesis(
session: SessionManager,
input: SearchAgentInput,
writerPrompt: string,
findingsForWriter: { content: string; metadata: { title?: string } }[],
councilLlms: [BaseLLM<any>, BaseLLM<any>, BaseLLM<any>],
) {
const messages = [
{ role: 'system' as const, content: writerPrompt },
...input.chatHistory,
{ role: 'user' as const, content: input.followUp },
];
const writerInput = { messages, options: { maxTokens: 4096 } };
const [r1, r2, r3] = await Promise.all([
councilLlms[0].generateText(writerInput),
councilLlms[1].generateText(writerInput),
councilLlms[2].generateText(writerInput),
]);
const answer1 = r1.content ?? '';
const answer2 = r2.content ?? '';
const answer3 = r3.content ?? '';
const synthesisPrompt = getSynthesisPrompt(
input.followUp,
answer1,
answer2,
answer3,
input.config.locale,
);
const synthesisStream = councilLlms[0].streamText({
messages: [{ role: 'user' as const, content: synthesisPrompt }],
options: { maxTokens: 4096 },
});
let responseBlockId = '';
let hasContent = false;
for await (const chunk of synthesisStream) {
if (!chunk.contentChunk && !responseBlockId) continue;
if (!responseBlockId) {
const block: TextBlock = {
id: crypto.randomUUID(),
type: 'text',
data: chunk.contentChunk ?? '',
};
session.emitBlock(block);
responseBlockId = block.id;
if (chunk.contentChunk) hasContent = true;
} else {
const block = session.getBlock(responseBlockId) as TextBlock | null;
if (block) {
block.data += chunk.contentChunk ?? '';
if (chunk.contentChunk) hasContent = true;
session.updateBlock(block.id, [
{ op: 'replace', path: '/data', value: block.data },
]);
}
}
}
if (!hasContent && findingsForWriter.length > 0) {
const lines = findingsForWriter.slice(0, 10).map((f, i) => {
const title = f.metadata.title ?? 'Без названия';
const excerpt = f.content.length > 120 ? f.content.slice(0, 120) + '…' : f.content;
return `${i + 1}. **${title}** — ${excerpt}`;
});
session.emitBlock({
id: crypto.randomUUID(),
type: 'text',
data: `## По найденным источникам\n\n${lines.join('\n\n')}\n\n*Model Council: синтез недоступен. Проверьте модели в Settings.*`,
});
}
session.emit('end', {});
await this.persistMessage(session, input.chatId, input.messageId, input.config.libraryClient);
}
private async persistMessage(
session: SessionManager,
chatId: string,
messageId: string,
libraryClient?: SearchAgentInput['config']['libraryClient'],
) {
if (!libraryClient?.enabled) return;
try {
await libraryClient.updateMessage(chatId, messageId, {
status: 'completed',
responseBlocks: session.getAllBlocks(),
});
} catch (err) {
console.error('[SearchAgent] LibraryClient update failed:', err);
}
}
}
export default SearchAgent;

View File

@@ -1,129 +0,0 @@
import z from 'zod';
import { ResearchAction } from '../../types.js';
import { Chunk, SearchResultsResearchBlock } from '../../../../types.js';
import { searchSearxng } from '../../../../searxng.js';
const schema = z.object({
queries: z.array(z.string()).describe('List of academic search queries'),
});
const academicSearchDescription = `
Use this tool to perform academic searches for scholarly articles, papers, and research studies relevant to the user's query. Provide a list of concise search queries that will help gather comprehensive academic information on the topic at hand.
You can provide up to 3 queries at a time. Make sure the queries are specific and relevant to the user's needs.
For example, if the user is interested in recent advancements in renewable energy, your queries could be:
1. "Recent advancements in renewable energy 2024"
2. "Cutting-edge research on solar power technologies"
3. "Innovations in wind energy systems"
If this tool is present and no other tools are more relevant, you MUST use this tool to get the needed academic information.
`;
const academicSearchAction: ResearchAction<typeof schema> = {
name: 'academic_search',
schema: schema,
getDescription: () => academicSearchDescription,
getToolDescription: () =>
"Use this tool to perform academic searches for scholarly articles, papers, and research studies relevant to the user's query. Provide a list of concise search queries that will help gather comprehensive academic information on the topic at hand.",
enabled: (config) =>
config.sources.includes('academic') &&
config.classification.classification.skipSearch === false &&
config.classification.classification.academicSearch === true,
execute: async (input, additionalConfig) => {
input.queries = input.queries.slice(0, 3);
const researchBlock = additionalConfig.session.getBlock(
additionalConfig.researchBlockId,
);
if (researchBlock && researchBlock.type === 'research') {
researchBlock.data.subSteps.push({
type: 'searching',
id: crypto.randomUUID(),
searching: input.queries,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{
op: 'replace',
path: '/data/subSteps',
value: researchBlock.data.subSteps,
},
]);
}
const searchResultsBlockId = crypto.randomUUID();
let searchResultsEmitted = false;
let results: Chunk[] = [];
const search = async (q: string) => {
const res = await searchSearxng(q, {
engines: ['arxiv', 'google scholar', 'pubmed'],
});
const resultChunks: Chunk[] = res.results.map((r) => ({
content: r.content || r.title,
metadata: {
title: r.title,
url: r.url,
},
}));
results.push(...resultChunks);
if (
!searchResultsEmitted &&
researchBlock &&
researchBlock.type === 'research'
) {
searchResultsEmitted = true;
researchBlock.data.subSteps.push({
id: searchResultsBlockId,
type: 'search_results',
reading: resultChunks,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{
op: 'replace',
path: '/data/subSteps',
value: researchBlock.data.subSteps,
},
]);
} else if (
searchResultsEmitted &&
researchBlock &&
researchBlock.type === 'research'
) {
const subStepIndex = researchBlock.data.subSteps.findIndex(
(step) => step.id === searchResultsBlockId,
);
const subStep = researchBlock.data.subSteps[
subStepIndex
] as SearchResultsResearchBlock;
subStep.reading.push(...resultChunks);
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{
op: 'replace',
path: '/data/subSteps',
value: researchBlock.data.subSteps,
},
]);
}
};
await Promise.all(input.queries.map(search));
return {
type: 'search_results',
results,
};
},
};
export default academicSearchAction;

View File

@@ -1,24 +0,0 @@
import z from 'zod';
import { ResearchAction } from '../../types.js';
const actionDescription = `
Use this action ONLY when you have completed all necessary research and are ready to provide a final answer to the user. This indicates that you have gathered sufficient information from previous steps and are concluding the research process.
YOU MUST CALL THIS ACTION TO SIGNAL COMPLETION; DO NOT OUTPUT FINAL ANSWERS DIRECTLY TO THE USER.
IT WILL BE AUTOMATICALLY TRIGGERED IF MAXIMUM ITERATIONS ARE REACHED SO IF YOU'RE LOW ON ITERATIONS, DON'T CALL IT AND INSTEAD FOCUS ON GATHERING ESSENTIAL INFO FIRST.
`;
const doneAction: ResearchAction<any> = {
name: 'done',
schema: z.object({}),
getToolDescription: () =>
'Only call this after __reasoning_preamble AND after any other needed tool calls when you truly have enough to answer. Do not call if information is still missing.',
getDescription: () => actionDescription,
enabled: (_) => true,
execute: async (params, additionalConfig) => {
return {
type: 'done',
};
},
};
export default doneAction;

View File

@@ -1,18 +0,0 @@
import academicSearchAction from './academicSearch.js';
import doneAction from './done.js';
import planAction from './plan.js';
import ActionRegistry from './registry.js';
import scrapeURLAction from './scrapeURL.js';
import socialSearchAction from './socialSearch.js';
import uploadsSearchAction from './uploadsSearch.js';
import webSearchAction from './webSearch.js';
ActionRegistry.register(webSearchAction);
ActionRegistry.register(doneAction);
ActionRegistry.register(planAction);
ActionRegistry.register(scrapeURLAction);
ActionRegistry.register(uploadsSearchAction);
ActionRegistry.register(academicSearchAction);
ActionRegistry.register(socialSearchAction);
export { ActionRegistry };

View File

@@ -1,40 +0,0 @@
import z from 'zod';
import { ResearchAction } from '../../types.js';
const schema = z.object({
plan: z
.string()
.describe(
'A concise natural-language plan in one short paragraph. Open with a short intent phrase (e.g., "Okay, the user wants to...", "Searching for...", "Looking into...") and lay out the steps you will take.',
),
});
const actionDescription = `
Use this tool FIRST on every turn to state your plan in natural language before any other action. Keep it short, action-focused, and tailored to the current query.
Make sure to not include reference to any tools or actions you might take, just the plan itself. The user isn't aware about tools, but they love to see your thought process.
Here are some examples of good plans:
<examples>
- "Okay, the user wants to know the latest advancements in renewable energy. I will start by looking for recent articles and studies on this topic, then summarize the key points." -> "I have gathered enough information to provide a comprehensive answer."
- "The user is asking about the health benefits of a Mediterranean diet. I will search for scientific studies and expert opinions on this diet, then compile the findings into a clear summary." -> "I have gathered information about the Mediterranean diet and its health benefits, I will now look up for any recent studies to ensure the information is current."
</examples>
YOU CAN NEVER CALL ANY OTHER TOOL BEFORE CALLING THIS ONE FIRST, IF YOU DO, THAT CALL WOULD BE IGNORED.
`;
const planAction: ResearchAction<typeof schema> = {
name: '__reasoning_preamble',
schema: schema,
getToolDescription: () =>
'Use this FIRST on every turn to state your plan in natural language before any other action. Keep it short, action-focused, and tailored to the current query.',
getDescription: () => actionDescription,
enabled: (config) => config.mode !== 'speed',
execute: async (input, _) => {
return {
type: 'reasoning',
reasoning: input.plan,
};
},
};
export default planAction;

View File

@@ -1,108 +0,0 @@
import { Tool, ToolCall } from '../../../../models/types.js';
import {
ActionOutput,
AdditionalConfig,
ClassifierOutput,
ResearchAction,
SearchAgentConfig,
SearchSources,
} from '../../types.js';
class ActionRegistry {
private static actions: Map<string, ResearchAction> = new Map();
static register(action: ResearchAction<any>) {
this.actions.set(action.name, action);
}
static get(name: string): ResearchAction | undefined {
return this.actions.get(name);
}
static getAvailableActions(config: {
classification: ClassifierOutput;
fileIds: string[];
mode: SearchAgentConfig['mode'];
sources: SearchSources[];
hasEmbedding?: boolean;
}): ResearchAction[] {
return Array.from(this.actions.values()).filter((action) =>
action.enabled(config),
);
}
static getAvailableActionTools(config: {
classification: ClassifierOutput;
fileIds: string[];
mode: SearchAgentConfig['mode'];
sources: SearchSources[];
hasEmbedding?: boolean;
}): Tool[] {
const availableActions = this.getAvailableActions(config);
return availableActions.map((action) => ({
name: action.name,
description: action.getToolDescription({ mode: config.mode }),
schema: action.schema,
}));
}
static getAvailableActionsDescriptions(config: {
classification: ClassifierOutput;
fileIds: string[];
mode: SearchAgentConfig['mode'];
sources: SearchSources[];
hasEmbedding?: boolean;
}): string {
const availableActions = this.getAvailableActions(config);
return availableActions
.map(
(action) =>
`<tool name="${action.name}">\n${action.getDescription({ mode: config.mode })}\n</tool>`,
)
.join('\n\n');
}
static async execute(
name: string,
params: any,
additionalConfig: AdditionalConfig & {
researchBlockId: string;
fileIds: string[];
},
) {
const action = this.actions.get(name);
if (!action) {
throw new Error(`Action with name ${name} not found`);
}
return action.execute(params, additionalConfig);
}
static async executeAll(
actions: ToolCall[],
additionalConfig: AdditionalConfig & {
researchBlockId: string;
fileIds: string[];
},
): Promise<ActionOutput[]> {
const results: ActionOutput[] = [];
await Promise.all(
actions.map(async (actionConfig) => {
const output = await this.execute(
actionConfig.name,
actionConfig.arguments,
additionalConfig,
);
results.push(output);
}),
);
return results;
}
}
export default ActionRegistry;

View File

@@ -1,139 +0,0 @@
import z from 'zod';
import { ResearchAction } from '../../types.js';
import { Chunk, ReadingResearchBlock } from '../../../../types.js';
import TurnDown from 'turndown';
import path from 'path';
const turndownService = new TurnDown();
const schema = z.object({
urls: z.array(z.string()).describe('A list of URLs to scrape content from.'),
});
const actionDescription = `
Use this tool to scrape and extract content from the provided URLs. This is useful when you the user has asked you to extract or summarize information from specific web pages. You can provide up to 3 URLs at a time. NEVER CALL THIS TOOL EXPLICITLY YOURSELF UNLESS INSTRUCTED TO DO SO BY THE USER.
You should only call this tool when the user has specifically requested information from certain web pages, never call this yourself to get extra information without user instruction.
For example, if the user says "Please summarize the content of https://example.com/article", you can call this tool with that URL to get the content and then provide the summary or "What does X mean according to https://example.com/page", you can call this tool with that URL to get the content and provide the explanation.
`;
const scrapeURLAction: ResearchAction<typeof schema> = {
name: 'scrape_url',
schema: schema,
getToolDescription: () =>
'Use this tool to scrape and extract content from the provided URLs. This is useful when you the user has asked you to extract or summarize information from specific web pages. You can provide up to 3 URLs at a time. NEVER CALL THIS TOOL EXPLICITLY YOURSELF UNLESS INSTRUCTED TO DO SO BY THE USER.',
getDescription: () => actionDescription,
enabled: (_) => true,
execute: async (params, additionalConfig) => {
params.urls = params.urls.slice(0, 3);
let readingBlockId = crypto.randomUUID();
let readingEmitted = false;
const researchBlock = additionalConfig.session.getBlock(
additionalConfig.researchBlockId,
);
const results: Chunk[] = [];
await Promise.all(
params.urls.map(async (url) => {
try {
const res = await fetch(url);
const text = await res.text();
const title =
text.match(/<title>(.*?)<\/title>/i)?.[1] || `Content from ${url}`;
if (
!readingEmitted &&
researchBlock &&
researchBlock.type === 'research'
) {
readingEmitted = true;
researchBlock.data.subSteps.push({
id: readingBlockId,
type: 'reading',
reading: [
{
content: '',
metadata: {
url,
title: title,
},
},
],
});
additionalConfig.session.updateBlock(
additionalConfig.researchBlockId,
[
{
op: 'replace',
path: '/data/subSteps',
value: researchBlock.data.subSteps,
},
],
);
} else if (
readingEmitted &&
researchBlock &&
researchBlock.type === 'research'
) {
const subStepIndex = researchBlock.data.subSteps.findIndex(
(step: any) => step.id === readingBlockId,
);
const subStep = researchBlock.data.subSteps[
subStepIndex
] as ReadingResearchBlock;
subStep.reading.push({
content: '',
metadata: {
url,
title: title,
},
});
additionalConfig.session.updateBlock(
additionalConfig.researchBlockId,
[
{
op: 'replace',
path: '/data/subSteps',
value: researchBlock.data.subSteps,
},
],
);
}
const markdown = turndownService.turndown(text);
results.push({
content: markdown,
metadata: {
url,
title: title,
},
});
} catch (error) {
results.push({
content: `Failed to fetch content from ${url}: ${error}`,
metadata: {
url,
title: `Error fetching ${url}`,
},
});
}
}),
);
return {
type: 'search_results',
results,
};
},
};
export default scrapeURLAction;

View File

@@ -1,129 +0,0 @@
import z from 'zod';
import { ResearchAction } from '../../types.js';
import { Chunk, SearchResultsResearchBlock } from '../../../../types.js';
import { searchSearxng } from '../../../../searxng.js';
const schema = z.object({
queries: z.array(z.string()).describe('List of social search queries'),
});
const socialSearchDescription = `
Use this tool to perform social media searches for relevant posts, discussions, and trends related to the user's query. Provide a list of concise search queries that will help gather comprehensive social media information on the topic at hand.
You can provide up to 3 queries at a time. Make sure the queries are specific and relevant to the user's needs.
For example, if the user is interested in public opinion on electric vehicles, your queries could be:
1. "Electric vehicles public opinion 2024"
2. "Social media discussions on EV adoption"
3. "Trends in electric vehicle usage"
If this tool is present and no other tools are more relevant, you MUST use this tool to get the needed social media information.
`;
const socialSearchAction: ResearchAction<typeof schema> = {
name: 'social_search',
schema: schema,
getDescription: () => socialSearchDescription,
getToolDescription: () =>
"Use this tool to perform social media searches for relevant posts, discussions, and trends related to the user's query. Provide a list of concise search queries that will help gather comprehensive social media information on the topic at hand.",
enabled: (config) =>
config.sources.includes('discussions') &&
config.classification.classification.skipSearch === false &&
config.classification.classification.discussionSearch === true,
execute: async (input, additionalConfig) => {
input.queries = input.queries.slice(0, 3);
const researchBlock = additionalConfig.session.getBlock(
additionalConfig.researchBlockId,
);
if (researchBlock && researchBlock.type === 'research') {
researchBlock.data.subSteps.push({
type: 'searching',
id: crypto.randomUUID(),
searching: input.queries,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{
op: 'replace',
path: '/data/subSteps',
value: researchBlock.data.subSteps,
},
]);
}
const searchResultsBlockId = crypto.randomUUID();
let searchResultsEmitted = false;
let results: Chunk[] = [];
const search = async (q: string) => {
const res = await searchSearxng(q, {
engines: ['reddit'],
});
const resultChunks: Chunk[] = res.results.map((r) => ({
content: r.content || r.title,
metadata: {
title: r.title,
url: r.url,
},
}));
results.push(...resultChunks);
if (
!searchResultsEmitted &&
researchBlock &&
researchBlock.type === 'research'
) {
searchResultsEmitted = true;
researchBlock.data.subSteps.push({
id: searchResultsBlockId,
type: 'search_results',
reading: resultChunks,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{
op: 'replace',
path: '/data/subSteps',
value: researchBlock.data.subSteps,
},
]);
} else if (
searchResultsEmitted &&
researchBlock &&
researchBlock.type === 'research'
) {
const subStepIndex = researchBlock.data.subSteps.findIndex(
(step) => step.id === searchResultsBlockId,
);
const subStep = researchBlock.data.subSteps[
subStepIndex
] as SearchResultsResearchBlock;
subStep.reading.push(...resultChunks);
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{
op: 'replace',
path: '/data/subSteps',
value: researchBlock.data.subSteps,
},
]);
}
};
await Promise.all(input.queries.map(search));
return {
type: 'search_results',
results,
};
},
};
export default socialSearchAction;

View File

@@ -1,107 +0,0 @@
import z from 'zod';
import { ResearchAction } from '../../types.js';
import UploadStore from '../../../../uploads/store.js';
const schema = z.object({
queries: z
.array(z.string())
.describe(
'A list of queries to search in user uploaded files. Can be a maximum of 3 queries.',
),
});
const uploadsSearchAction: ResearchAction<typeof schema> = {
name: 'uploads_search',
enabled: (config) =>
config.hasEmbedding !== false &&
((config.classification.classification.personalSearch &&
config.fileIds.length > 0) ||
config.fileIds.length > 0),
schema,
getToolDescription: () =>
`Use this tool to perform searches over the user's uploaded files. This is useful when you need to gather information from the user's documents to answer their questions. You can provide up to 3 queries at a time. You will have to use this every single time if this is present and relevant.`,
getDescription: () => `
Use this tool to perform searches over the user's uploaded files. This is useful when you need to gather information from the user's documents to answer their questions. You can provide up to 3 queries at a time. You will have to use this every single time if this is present and relevant.
Always ensure that the queries you use are directly relevant to the user's request and pertain to the content of their uploaded files.
For example, if the user says "Please find information about X in my uploaded documents", you can call this tool with a query related to X to retrieve the relevant information from their files.
Never use this tool to search the web or for information that is not contained within the user's uploaded files.
`,
execute: async (input, additionalConfig) => {
if (!additionalConfig.embedding) {
return { type: 'search_results' as const, results: [] };
}
input.queries = input.queries.slice(0, 3);
const researchBlock = additionalConfig.session.getBlock(
additionalConfig.researchBlockId,
);
if (researchBlock && researchBlock.type === 'research') {
researchBlock.data.subSteps.push({
id: crypto.randomUUID(),
type: 'upload_searching',
queries: input.queries,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{
op: 'replace',
path: '/data/subSteps',
value: researchBlock.data.subSteps,
},
]);
}
const uploadStore = new UploadStore({
embeddingModel: additionalConfig.embedding,
fileIds: additionalConfig.fileIds,
});
const results = await uploadStore.query(input.queries, 10);
const seenIds = new Map<string, number>();
const filteredSearchResults = results
.map((result, index) => {
if (result.metadata?.url as string && !seenIds.has(result.metadata?.url as string)) {
seenIds.set(result.metadata?.url as string, index);
return result;
} else if (result.metadata?.url as string && seenIds.has(result.metadata?.url as string)) {
const existingIndex = seenIds.get(result.metadata?.url as string)!;
const existingResult = results[existingIndex];
existingResult.content += `\n\n${result.content}`;
return undefined;
}
return result;
})
.filter((r) => r !== undefined);
if (researchBlock && researchBlock.type === 'research') {
researchBlock.data.subSteps.push({
id: crypto.randomUUID(),
type: 'upload_search_results',
results: filteredSearchResults,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{
op: 'replace',
path: '/data/subSteps',
value: researchBlock.data.subSteps,
},
]);
}
return {
type: 'search_results',
results: filteredSearchResults,
};
},
};
export default uploadsSearchAction;

View File

@@ -1,182 +0,0 @@
import z from 'zod';
import { ResearchAction } from '../../types.js';
import { searchSearxng } from '../../../../searxng.js';
import { Chunk, SearchResultsResearchBlock } from '../../../../types.js';
const actionSchema = z.object({
type: z.literal('web_search'),
queries: z
.array(z.string())
.describe('An array of search queries to perform web searches for.'),
});
const speedModePrompt = `
Use this tool to perform web searches based on the provided queries. This is useful when you need to gather information from the web to answer the user's questions. You can provide up to 3 queries at a time. You will have to use this every single time if this is present and relevant.
You are currently on speed mode, meaning you would only get to call this tool once. Make sure to prioritize the most important queries that are likely to get you the needed information in one go.
Your queries should be very targeted and specific to the information you need, avoid broad or generic queries.
Your queries shouldn't be sentences but rather keywords that are SEO friendly and can be used to search the web for information.
For example, if the user is asking about the features of a new technology, you might use queries like "GPT-5.1 features", "GPT-5.1 release date", "GPT-5.1 improvements" rather than a broad query like "Tell me about GPT-5.1".
You can search for 3 queries in one go, make sure to utilize all 3 queries to maximize the information you can gather. If a question is simple, then split your queries to cover different aspects or related topics to get a comprehensive understanding.
If this tool is present and no other tools are more relevant, you MUST use this tool to get the needed information.
`;
const balancedModePrompt = `
Use this tool to perform web searches based on the provided queries. This is useful when you need to gather information from the web to answer the user's questions. You can provide up to 3 queries at a time. You will have to use this every single time if this is present and relevant.
You can call this tool several times if needed to gather enough information.
Start initially with broader queries to get an overview, then narrow down with more specific queries based on the results you receive.
Your queries shouldn't be sentences but rather keywords that are SEO friendly and can be used to search the web for information.
For example if the user is asking about Tesla, your actions should be like:
1. __reasoning_preamble "The user is asking about Tesla. I will start with broader queries to get an overview of Tesla, then narrow down with more specific queries based on the results I receive." then
2. web_search ["Tesla", "Tesla latest news", "Tesla stock price"] then
3. __reasoning_preamble "Based on the previous search results, I will now narrow down my queries to focus on Tesla's recent developments and stock performance." then
4. web_search ["Tesla Q2 2025 earnings", "Tesla new model 2025", "Tesla stock analysis"] then done.
5. __reasoning_preamble "I have gathered enough information to provide a comprehensive answer."
6. done.
You can search for 3 queries in one go, make sure to utilize all 3 queries to maximize the information you can gather. If a question is simple, then split your queries to cover different aspects or related topics to get a comprehensive understanding.
If this tool is present and no other tools are more relevant, you MUST use this tool to get the needed information. You can call this tools, multiple times as needed.
`;
const qualityModePrompt = `
Use this tool to perform web searches based on the provided queries. This is useful when you need to gather information from the web to answer the user's questions. You can provide up to 3 queries at a time. You will have to use this every single time if this is present and relevant.
You have to call this tool several times to gather enough information unless the question is very simple (like greeting questions or basic facts).
Start initially with broader queries to get an overview, then narrow down with more specific queries based on the results you receive.
Never stop before at least 5-6 iterations of searches unless the user question is very simple.
Your queries shouldn't be sentences but rather keywords that are SEO friendly and can be used to search the web for information.
You can search for 3 queries in one go, make sure to utilize all 3 queries to maximize the information you can gather. If a question is simple, then split your queries to cover different aspects or related topics to get a comprehensive understanding.
If this tool is present and no other tools are more relevant, you MUST use this tool to get the needed information. You can call this tools, multiple times as needed.
`;
const webSearchAction: ResearchAction<typeof actionSchema> = {
name: 'web_search',
schema: actionSchema,
getToolDescription: () =>
"Use this tool to perform web searches based on the provided queries. This is useful when you need to gather information from the web to answer the user's questions. You can provide up to 3 queries at a time. You will have to use this every single time if this is present and relevant.",
getDescription: (config) => {
let prompt = '';
switch (config.mode) {
case 'speed':
prompt = speedModePrompt;
break;
case 'balanced':
prompt = balancedModePrompt;
break;
case 'quality':
prompt = qualityModePrompt;
break;
default:
prompt = speedModePrompt;
break;
}
return prompt;
},
enabled: (config) =>
config.sources.includes('web') &&
config.classification.classification.skipSearch === false,
execute: async (input, additionalConfig) => {
input.queries = input.queries.slice(0, 3);
const researchBlock = additionalConfig.session.getBlock(
additionalConfig.researchBlockId,
);
if (researchBlock && researchBlock.type === 'research') {
researchBlock.data.subSteps.push({
id: crypto.randomUUID(),
type: 'searching',
searching: input.queries,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{
op: 'replace',
path: '/data/subSteps',
value: researchBlock.data.subSteps,
},
]);
}
const searchResultsBlockId = crypto.randomUUID();
let searchResultsEmitted = false;
let results: Chunk[] = [];
const search = async (q: string) => {
const res = await searchSearxng(q);
const resultChunks: Chunk[] = res.results.map((r) => ({
content: r.content || r.title,
metadata: {
title: r.title,
url: r.url,
},
}));
results.push(...resultChunks);
if (
!searchResultsEmitted &&
researchBlock &&
researchBlock.type === 'research'
) {
searchResultsEmitted = true;
researchBlock.data.subSteps.push({
id: searchResultsBlockId,
type: 'search_results',
reading: resultChunks,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{
op: 'replace',
path: '/data/subSteps',
value: researchBlock.data.subSteps,
},
]);
} else if (
searchResultsEmitted &&
researchBlock &&
researchBlock.type === 'research'
) {
const subStepIndex = researchBlock.data.subSteps.findIndex(
(step) => step.id === searchResultsBlockId,
);
const subStep = researchBlock.data.subSteps[
subStepIndex
] as SearchResultsResearchBlock;
subStep.reading.push(...resultChunks);
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{
op: 'replace',
path: '/data/subSteps',
value: researchBlock.data.subSteps,
},
]);
}
};
await Promise.all(input.queries.map(search));
return {
type: 'search_results',
results,
};
},
};
export default webSearchAction;

View File

@@ -1,225 +0,0 @@
import { ActionOutput, ResearcherInput, ResearcherOutput } from '../types.js';
import { ActionRegistry } from './actions/index.js';
import { getResearcherPrompt } from '../../../prompts/search/researcher.js';
import SessionManager from '../../../session.js';
import { Message, ReasoningResearchBlock } from '../../../types.js';
import formatChatHistoryAsString from '../../../utils/formatHistory.js';
import { ToolCall } from '../../../models/types.js';
class Researcher {
async research(
session: SessionManager,
input: ResearcherInput,
): Promise<ResearcherOutput> {
let actionOutput: ActionOutput[] = [];
let maxIteration =
input.config.mode === 'speed'
? 2
: input.config.mode === 'balanced'
? 6
: 25;
const availableTools = ActionRegistry.getAvailableActionTools({
classification: input.classification,
fileIds: input.config.fileIds,
mode: input.config.mode,
sources: input.config.sources,
hasEmbedding: !!input.config.embedding,
});
const availableActionsDescription =
ActionRegistry.getAvailableActionsDescriptions({
classification: input.classification,
fileIds: input.config.fileIds,
mode: input.config.mode,
sources: input.config.sources,
hasEmbedding: !!input.config.embedding,
});
const researchBlockId = crypto.randomUUID();
session.emitBlock({
id: researchBlockId,
type: 'research',
data: {
subSteps: [],
},
});
const agentMessageHistory: Message[] = [
{
role: 'user',
content: `
<conversation>
${formatChatHistoryAsString(input.chatHistory.slice(-10))}
User: ${input.followUp} (Standalone question: ${input.classification.standaloneFollowUp})
</conversation>
`,
},
];
for (let i = 0; i < maxIteration; i++) {
const researcherPrompt = getResearcherPrompt(
availableActionsDescription,
input.config.mode,
i,
maxIteration,
input.config.fileIds,
input.config.locale,
);
const actionStream = input.config.llm.streamText({
messages: [
{
role: 'system',
content: researcherPrompt,
},
...agentMessageHistory,
],
tools: availableTools,
});
const block = session.getBlock(researchBlockId);
let reasoningEmitted = false;
let reasoningId = crypto.randomUUID();
let finalToolCalls: ToolCall[] = [];
for await (const partialRes of actionStream) {
if (partialRes.toolCallChunk.length > 0) {
partialRes.toolCallChunk.forEach((tc) => {
if (
tc.name === '__reasoning_preamble' &&
tc.arguments['plan'] &&
!reasoningEmitted &&
block &&
block.type === 'research'
) {
reasoningEmitted = true;
block.data.subSteps.push({
id: reasoningId,
type: 'reasoning',
reasoning: tc.arguments['plan'],
});
session.updateBlock(researchBlockId, [
{
op: 'replace',
path: '/data/subSteps',
value: block.data.subSteps,
},
]);
} else if (
tc.name === '__reasoning_preamble' &&
tc.arguments['plan'] &&
reasoningEmitted &&
block &&
block.type === 'research'
) {
const subStepIndex = block.data.subSteps.findIndex(
(step: any) => step.id === reasoningId,
);
if (subStepIndex !== -1) {
const subStep = block.data.subSteps[
subStepIndex
] as ReasoningResearchBlock;
subStep.reasoning = tc.arguments['plan'];
session.updateBlock(researchBlockId, [
{
op: 'replace',
path: '/data/subSteps',
value: block.data.subSteps,
},
]);
}
}
const existingIndex = finalToolCalls.findIndex(
(ftc) => ftc.id === tc.id,
);
if (existingIndex !== -1) {
finalToolCalls[existingIndex].arguments = tc.arguments;
} else {
finalToolCalls.push(tc);
}
});
}
}
if (finalToolCalls.length === 0) {
break;
}
if (finalToolCalls[finalToolCalls.length - 1].name === 'done') {
break;
}
agentMessageHistory.push({
role: 'assistant',
content: '',
tool_calls: finalToolCalls,
});
const actionResults = await ActionRegistry.executeAll(finalToolCalls, {
llm: input.config.llm,
embedding: input.config.embedding,
session: session,
researchBlockId: researchBlockId,
fileIds: input.config.fileIds,
});
actionOutput.push(...actionResults);
actionResults.forEach((action, i) => {
agentMessageHistory.push({
role: 'tool',
id: finalToolCalls[i].id,
name: finalToolCalls[i].name,
content: JSON.stringify(action),
});
});
}
const searchResults = actionOutput
.filter((a) => a.type === 'search_results')
.flatMap((a) => a.results);
const seenUrls = new Map<string, number>();
const filteredSearchResults = searchResults
.map((result, index) => {
if (result.metadata?.url as string && !seenUrls.has(result.metadata?.url as string)) {
seenUrls.set(result.metadata?.url as string, index);
return result;
} else if (result.metadata?.url as string && seenUrls.has(result.metadata?.url as string)) {
const existingIndex = seenUrls.get(result.metadata?.url as string)!;
const existingResult = searchResults[existingIndex];
existingResult.content += `\n\n${result.content}`;
return undefined;
}
return result;
})
.filter((r) => r !== undefined);
session.emitBlock({
id: crypto.randomUUID(),
type: 'source',
data: filteredSearchResults,
});
return {
findings: actionOutput,
searchFindings: filteredSearchResults,
};
}
}
export default Researcher;

View File

@@ -1,141 +0,0 @@
import z from 'zod';
import BaseLLM from '../../models/base/llm.js';
import BaseEmbedding from '../../models/base/embedding.js';
import SessionManager from '../../session.js';
import { ChatTurnMessage, Chunk } from '../../types.js';
export type SearchSources = 'web' | 'discussions' | 'academic';
/** Answer mode — вертикаль ответа (travel, finance, academic и т.д.) */
export type AnswerMode = 'standard' | 'focus' | 'academic' | 'writing' | 'travel' | 'finance';
/** locale по geo (например ru, en) — язык ответа */
export type SearchAgentConfig = {
sources: SearchSources[];
fileIds: string[];
llm: BaseLLM<any>;
embedding: BaseEmbedding<any> | null;
mode: 'speed' | 'balanced' | 'quality';
systemInstructions: string;
locale?: string;
/** Memory context from memory-svc (Pro) — user preferences, facts */
memoryContext?: string;
/** Answer mode — vertical focus (travel, finance, academic) */
answerMode?: AnswerMode;
/** Response preferences from user settings */
responsePrefs?: { format?: string; length?: string; tone?: string };
/** Step-by-step Learning — объяснять пошагово, показывать ход мысли */
learningMode?: boolean;
/** Model Council (Max): 3 LLMs for parallel writer + synthesis */
councilLlms?: [BaseLLM<any>, BaseLLM<any>, BaseLLM<any>];
/** Library client для сохранения messages (вместо SQLite) */
libraryClient?: { upsertMessage: (threadId: string, msgId: string, query: string, opts?: object) => Promise<void>; updateMessage: (threadId: string, msgId: string, opts: object) => Promise<void>; enabled: boolean };
};
export type SearchAgentInput = {
chatHistory: ChatTurnMessage[];
followUp: string;
config: SearchAgentConfig;
chatId: string;
messageId: string;
};
export type WidgetInput = {
chatHistory: ChatTurnMessage[];
followUp: string;
classification: ClassifierOutput;
llm: BaseLLM<any>;
};
export type Widget = {
type: string;
shouldExecute: (classification: ClassifierOutput) => boolean;
execute: (input: WidgetInput) => Promise<WidgetOutput | void>;
};
export type WidgetOutput = {
type: string;
llmContext: string;
data: any;
};
export type ClassifierInput = {
llm: BaseLLM<any>;
enabledSources: SearchSources[];
query: string;
chatHistory: ChatTurnMessage[];
locale?: string;
};
export type ClassifierOutput = {
classification: {
skipSearch: boolean;
personalSearch: boolean;
academicSearch: boolean;
discussionSearch: boolean;
showWeatherWidget: boolean;
showStockWidget: boolean;
showCalculationWidget: boolean;
};
standaloneFollowUp: string;
};
export type AdditionalConfig = {
llm: BaseLLM<any>;
embedding: BaseEmbedding<any> | null;
session: SessionManager;
};
export type ResearcherInput = {
chatHistory: ChatTurnMessage[];
followUp: string;
classification: ClassifierOutput;
config: SearchAgentConfig;
};
export type ResearcherOutput = {
findings: ActionOutput[];
searchFindings: Chunk[];
};
export type SearchActionOutput = {
type: 'search_results';
results: Chunk[];
};
export type DoneActionOutput = {
type: 'done';
};
export type ReasoningResearchAction = {
type: 'reasoning';
reasoning: string;
};
export type ActionOutput =
| SearchActionOutput
| DoneActionOutput
| ReasoningResearchAction;
export interface ResearchAction<
TSchema extends z.ZodObject<any> = z.ZodObject<any>,
> {
name: string;
schema: z.ZodObject<any>;
getToolDescription: (config: { mode: SearchAgentConfig['mode'] }) => string;
getDescription: (config: { mode: SearchAgentConfig['mode'] }) => string;
enabled: (config: {
classification: ClassifierOutput;
fileIds: string[];
mode: SearchAgentConfig['mode'];
sources: SearchSources[];
hasEmbedding?: boolean;
}) => boolean;
execute: (
params: z.infer<TSchema>,
additionalConfig: AdditionalConfig & {
researchBlockId: string;
fileIds: string[];
},
) => Promise<ActionOutput>;
}

View File

@@ -1,71 +0,0 @@
import z from 'zod';
import { Widget } from '../types.js';
import formatChatHistoryAsString from '../../../utils/formatHistory.js';
import { exp, evaluate as mathEval } from 'mathjs';
const schema = z.object({
expression: z
.string()
.describe('Mathematical expression to calculate or evaluate.'),
notPresent: z
.boolean()
.describe('Whether there is any need for the calculation widget.'),
});
const system = `
<role>
Assistant is a calculation expression extractor. You will recieve a user follow up and a conversation history.
Your task is to determine if there is a mathematical expression that needs to be calculated or evaluated. If there is, extract the expression and return it. If there is no need for any calculation, set notPresent to true.
</role>
<instructions>
Make sure that the extracted expression is valid and can be used to calculate the result with Math JS library (https://mathjs.org/). If the expression is not valid, set notPresent to true.
If you feel like you cannot extract a valid expression, set notPresent to true.
</instructions>
<output_format>
You must respond in the following JSON format without any extra text, explanations or filler sentences:
{
"expression": string,
"notPresent": boolean
}
</output_format>
`;
const calculationWidget: Widget = {
type: 'calculationWidget',
shouldExecute: (classification) =>
classification.classification.showCalculationWidget,
execute: async (input) => {
const output = await input.llm.generateObject<typeof schema>({
messages: [
{
role: 'system',
content: system,
},
{
role: 'user',
content: `<conversation_history>\n${formatChatHistoryAsString(input.chatHistory)}\n</conversation_history>\n<user_follow_up>\n${input.followUp}\n</user_follow_up>`,
},
],
schema,
});
if (output.notPresent) {
return;
}
const result = mathEval(output.expression);
return {
type: 'calculation_result',
llmContext: `The result of the calculation for the expression "${output.expression}" is: ${result}`,
data: {
expression: output.expression,
result,
},
};
},
};
export default calculationWidget;

View File

@@ -1,36 +0,0 @@
import { Widget, WidgetInput, WidgetOutput } from '../types.js';
class WidgetExecutor {
static widgets = new Map<string, Widget>();
static register(widget: Widget) {
this.widgets.set(widget.type, widget);
}
static getWidget(type: string): Widget | undefined {
return this.widgets.get(type);
}
static async executeAll(input: WidgetInput): Promise<WidgetOutput[]> {
const results: WidgetOutput[] = [];
await Promise.all(
Array.from(this.widgets.values()).map(async (widget) => {
try {
if (widget.shouldExecute(input.classification)) {
const output = await widget.execute(input);
if (output) {
results.push(output);
}
}
} catch (e) {
console.log(`Error executing widget ${widget.type}:`, e);
}
}),
);
return results;
}
}
export default WidgetExecutor;

View File

@@ -1,10 +0,0 @@
import calculationWidget from './calculationWidget.js';
import WidgetExecutor from './executor.js';
import weatherWidget from './weatherWidget.js';
import stockWidget from './stockWidget.js';
WidgetExecutor.register(weatherWidget);
WidgetExecutor.register(calculationWidget);
WidgetExecutor.register(stockWidget);
export { WidgetExecutor };

View File

@@ -1,434 +0,0 @@
import z from 'zod';
import { Widget } from '../types.js';
import YahooFinance from 'yahoo-finance2';
import formatChatHistoryAsString from '../../../utils/formatHistory.js';
const yf = new YahooFinance({
suppressNotices: ['yahooSurvey'],
});
const schema = z.object({
name: z
.string()
.describe(
"The stock name for example Nvidia, Google, Apple, Microsoft etc. You can also return ticker if you're aware of it otherwise just use the name.",
),
comparisonNames: z
.array(z.string())
.max(3)
.describe(
"Optional array of up to 3 stock names to compare against the base name (e.g., ['Microsoft', 'GOOGL', 'Meta']). Charts will show percentage change comparison.",
),
notPresent: z
.boolean()
.describe('Whether there is no need for the stock widget.'),
});
const systemPrompt = `
<role>
You are a stock ticker/name extractor. You will receive a user follow up and a conversation history.
Your task is to determine if the user is asking about stock information and extract the stock name(s) they want data for.
</role>
<instructions>
- If the user is asking about a stock, extract the primary stock name or ticker.
- If the user wants to compare stocks, extract up to 3 comparison stock names in comparisonNames.
- You can use either stock names (e.g., "Nvidia", "Apple") or tickers (e.g., "NVDA", "AAPL").
- If you cannot determine a valid stock or the query is not stock-related, set notPresent to true.
- If no comparison is needed, set comparisonNames to an empty array.
</instructions>
<output_format>
You must respond in the following JSON format without any extra text, explanations or filler sentences:
{
"name": string,
"comparisonNames": string[],
"notPresent": boolean
}
</output_format>
`;
const stockWidget: Widget = {
type: 'stockWidget',
shouldExecute: (classification) =>
classification.classification.showStockWidget,
execute: async (input) => {
const output = await input.llm.generateObject<typeof schema>({
messages: [
{
role: 'system',
content: systemPrompt,
},
{
role: 'user',
content: `<conversation_history>\n${formatChatHistoryAsString(input.chatHistory)}\n</conversation_history>\n<user_follow_up>\n${input.followUp}\n</user_follow_up>`,
},
],
schema,
});
if (output.notPresent) {
return;
}
const params = output;
try {
const name = params.name;
const findings = await yf.search(name);
if (findings.quotes.length === 0)
throw new Error(`Failed to find quote for name/symbol: ${name}`);
const ticker = findings.quotes[0].symbol as string;
const quote: any = await yf.quote(ticker);
const chartPromises = {
'1D': yf
.chart(ticker, {
period1: new Date(Date.now() - 2 * 24 * 60 * 60 * 1000),
period2: new Date(),
interval: '5m',
})
.catch(() => null),
'5D': yf
.chart(ticker, {
period1: new Date(Date.now() - 6 * 24 * 60 * 60 * 1000),
period2: new Date(),
interval: '15m',
})
.catch(() => null),
'1M': yf
.chart(ticker, {
period1: new Date(Date.now() - 30 * 24 * 60 * 60 * 1000),
interval: '1d',
})
.catch(() => null),
'3M': yf
.chart(ticker, {
period1: new Date(Date.now() - 90 * 24 * 60 * 60 * 1000),
interval: '1d',
})
.catch(() => null),
'6M': yf
.chart(ticker, {
period1: new Date(Date.now() - 180 * 24 * 60 * 60 * 1000),
interval: '1d',
})
.catch(() => null),
'1Y': yf
.chart(ticker, {
period1: new Date(Date.now() - 365 * 24 * 60 * 60 * 1000),
interval: '1d',
})
.catch(() => null),
MAX: yf
.chart(ticker, {
period1: new Date(Date.now() - 10 * 365 * 24 * 60 * 60 * 1000),
interval: '1wk',
})
.catch(() => null),
};
const charts = await Promise.all([
chartPromises['1D'],
chartPromises['5D'],
chartPromises['1M'],
chartPromises['3M'],
chartPromises['6M'],
chartPromises['1Y'],
chartPromises['MAX'],
]);
const [chart1D, chart5D, chart1M, chart3M, chart6M, chart1Y, chartMAX] =
charts;
if (!quote) {
throw new Error(`No data found for ticker: ${ticker}`);
}
let comparisonData: any = null;
if (params.comparisonNames.length > 0) {
const comparisonPromises = params.comparisonNames
.slice(0, 3)
.map(async (compName) => {
try {
const compFindings = await yf.search(compName);
if (compFindings.quotes.length === 0) return null;
const compTicker = compFindings.quotes[0].symbol as string;
const compQuote = await yf.quote(compTicker);
const compCharts = await Promise.all([
yf
.chart(compTicker, {
period1: new Date(Date.now() - 2 * 24 * 60 * 60 * 1000),
period2: new Date(),
interval: '5m',
})
.catch(() => null),
yf
.chart(compTicker, {
period1: new Date(Date.now() - 6 * 24 * 60 * 60 * 1000),
period2: new Date(),
interval: '15m',
})
.catch(() => null),
yf
.chart(compTicker, {
period1: new Date(Date.now() - 30 * 24 * 60 * 60 * 1000),
interval: '1d',
})
.catch(() => null),
yf
.chart(compTicker, {
period1: new Date(Date.now() - 90 * 24 * 60 * 60 * 1000),
interval: '1d',
})
.catch(() => null),
yf
.chart(compTicker, {
period1: new Date(Date.now() - 180 * 24 * 60 * 60 * 1000),
interval: '1d',
})
.catch(() => null),
yf
.chart(compTicker, {
period1: new Date(Date.now() - 365 * 24 * 60 * 60 * 1000),
interval: '1d',
})
.catch(() => null),
yf
.chart(compTicker, {
period1: new Date(
Date.now() - 10 * 365 * 24 * 60 * 60 * 1000,
),
interval: '1wk',
})
.catch(() => null),
]);
return {
ticker: compTicker,
name: compQuote.shortName || compTicker,
charts: compCharts,
};
} catch (error) {
console.error(
`Failed to fetch comparison ticker ${compName}:`,
error,
);
return null;
}
});
const compResults = await Promise.all(comparisonPromises);
comparisonData = compResults.filter((r) => r !== null);
}
const stockData = {
symbol: quote.symbol,
shortName: quote.shortName || quote.longName || ticker,
longName: quote.longName,
exchange: quote.fullExchangeName || quote.exchange,
currency: quote.currency,
quoteType: quote.quoteType,
marketState: quote.marketState,
regularMarketTime: quote.regularMarketTime,
postMarketTime: quote.postMarketTime,
preMarketTime: quote.preMarketTime,
regularMarketPrice: quote.regularMarketPrice,
regularMarketChange: quote.regularMarketChange,
regularMarketChangePercent: quote.regularMarketChangePercent,
regularMarketPreviousClose: quote.regularMarketPreviousClose,
regularMarketOpen: quote.regularMarketOpen,
regularMarketDayHigh: quote.regularMarketDayHigh,
regularMarketDayLow: quote.regularMarketDayLow,
postMarketPrice: quote.postMarketPrice,
postMarketChange: quote.postMarketChange,
postMarketChangePercent: quote.postMarketChangePercent,
preMarketPrice: quote.preMarketPrice,
preMarketChange: quote.preMarketChange,
preMarketChangePercent: quote.preMarketChangePercent,
regularMarketVolume: quote.regularMarketVolume,
averageDailyVolume3Month: quote.averageDailyVolume3Month,
averageDailyVolume10Day: quote.averageDailyVolume10Day,
bid: quote.bid,
bidSize: quote.bidSize,
ask: quote.ask,
askSize: quote.askSize,
fiftyTwoWeekLow: quote.fiftyTwoWeekLow,
fiftyTwoWeekHigh: quote.fiftyTwoWeekHigh,
fiftyTwoWeekChange: quote.fiftyTwoWeekChange,
fiftyTwoWeekChangePercent: quote.fiftyTwoWeekChangePercent,
marketCap: quote.marketCap,
trailingPE: quote.trailingPE,
forwardPE: quote.forwardPE,
priceToBook: quote.priceToBook,
bookValue: quote.bookValue,
earningsPerShare: quote.epsTrailingTwelveMonths,
epsForward: quote.epsForward,
dividendRate: quote.dividendRate,
dividendYield: quote.dividendYield,
exDividendDate: quote.exDividendDate,
trailingAnnualDividendRate: quote.trailingAnnualDividendRate,
trailingAnnualDividendYield: quote.trailingAnnualDividendYield,
beta: quote.beta,
fiftyDayAverage: quote.fiftyDayAverage,
fiftyDayAverageChange: quote.fiftyDayAverageChange,
fiftyDayAverageChangePercent: quote.fiftyDayAverageChangePercent,
twoHundredDayAverage: quote.twoHundredDayAverage,
twoHundredDayAverageChange: quote.twoHundredDayAverageChange,
twoHundredDayAverageChangePercent:
quote.twoHundredDayAverageChangePercent,
sector: quote.sector,
industry: quote.industry,
website: quote.website,
chartData: {
'1D': chart1D
? {
timestamps: chart1D.quotes.map((q: any) => q.date.getTime()),
prices: chart1D.quotes.map((q: any) => q.close),
}
: null,
'5D': chart5D
? {
timestamps: chart5D.quotes.map((q: any) => q.date.getTime()),
prices: chart5D.quotes.map((q: any) => q.close),
}
: null,
'1M': chart1M
? {
timestamps: chart1M.quotes.map((q: any) => q.date.getTime()),
prices: chart1M.quotes.map((q: any) => q.close),
}
: null,
'3M': chart3M
? {
timestamps: chart3M.quotes.map((q: any) => q.date.getTime()),
prices: chart3M.quotes.map((q: any) => q.close),
}
: null,
'6M': chart6M
? {
timestamps: chart6M.quotes.map((q: any) => q.date.getTime()),
prices: chart6M.quotes.map((q: any) => q.close),
}
: null,
'1Y': chart1Y
? {
timestamps: chart1Y.quotes.map((q: any) => q.date.getTime()),
prices: chart1Y.quotes.map((q: any) => q.close),
}
: null,
MAX: chartMAX
? {
timestamps: chartMAX.quotes.map((q: any) => q.date.getTime()),
prices: chartMAX.quotes.map((q: any) => q.close),
}
: null,
},
comparisonData: comparisonData
? comparisonData.map((comp: any) => ({
ticker: comp.ticker,
name: comp.name,
chartData: {
'1D': comp.charts[0]
? {
timestamps: comp.charts[0].quotes.map((q: any) =>
q.date.getTime(),
),
prices: comp.charts[0].quotes.map((q: any) => q.close),
}
: null,
'5D': comp.charts[1]
? {
timestamps: comp.charts[1].quotes.map((q: any) =>
q.date.getTime(),
),
prices: comp.charts[1].quotes.map((q: any) => q.close),
}
: null,
'1M': comp.charts[2]
? {
timestamps: comp.charts[2].quotes.map((q: any) =>
q.date.getTime(),
),
prices: comp.charts[2].quotes.map((q: any) => q.close),
}
: null,
'3M': comp.charts[3]
? {
timestamps: comp.charts[3].quotes.map((q: any) =>
q.date.getTime(),
),
prices: comp.charts[3].quotes.map((q: any) => q.close),
}
: null,
'6M': comp.charts[4]
? {
timestamps: comp.charts[4].quotes.map((q: any) =>
q.date.getTime(),
),
prices: comp.charts[4].quotes.map((q: any) => q.close),
}
: null,
'1Y': comp.charts[5]
? {
timestamps: comp.charts[5].quotes.map((q: any) =>
q.date.getTime(),
),
prices: comp.charts[5].quotes.map((q: any) => q.close),
}
: null,
MAX: comp.charts[6]
? {
timestamps: comp.charts[6].quotes.map((q: any) =>
q.date.getTime(),
),
prices: comp.charts[6].quotes.map((q: any) => q.close),
}
: null,
},
}))
: null,
};
return {
type: 'stock',
llmContext: `Current price of ${stockData.shortName} (${stockData.symbol}) is ${stockData.regularMarketPrice} ${stockData.currency}. Other details: ${JSON.stringify(
{
marketState: stockData.marketState,
regularMarketChange: stockData.regularMarketChange,
regularMarketChangePercent: stockData.regularMarketChangePercent,
marketCap: stockData.marketCap,
peRatio: stockData.trailingPE,
dividendYield: stockData.dividendYield,
},
)}`,
data: stockData,
};
} catch (error: any) {
return {
type: 'stock',
llmContext: 'Failed to fetch stock data.',
data: {
error: `Error fetching stock data: ${error.message || error}`,
ticker: params.name,
},
};
}
},
};
export default stockWidget;

View File

@@ -1,203 +0,0 @@
import z from 'zod';
import { Widget } from '../types.js';
import formatChatHistoryAsString from '../../../utils/formatHistory.js';
const schema = z.object({
location: z
.string()
.describe(
'Human-readable location name (e.g., "New York, NY, USA", "London, UK"). Use this OR lat/lon coordinates, never both. Leave empty string if providing coordinates.',
),
lat: z
.number()
.describe(
'Latitude coordinate in decimal degrees (e.g., 40.7128). Only use when location name is empty.',
),
lon: z
.number()
.describe(
'Longitude coordinate in decimal degrees (e.g., -74.0060). Only use when location name is empty.',
),
notPresent: z
.boolean()
.describe('Whether there is no need for the weather widget.'),
});
const systemPrompt = `
<role>
You are a location extractor for weather queries. You will receive a user follow up and a conversation history.
Your task is to determine if the user is asking about weather and extract the location they want weather for.
</role>
<instructions>
- If the user is asking about weather, extract the location name OR coordinates (never both).
- If using location name, set lat and lon to 0.
- If using coordinates, set location to empty string.
- If you cannot determine a valid location or the query is not weather-related, set notPresent to true.
- Location should be specific (city, state/region, country) for best results.
- You have to give the location so that it can be used to fetch weather data, it cannot be left empty unless notPresent is true.
- Make sure to infer short forms of location names (e.g., "NYC" -> "New York City", "LA" -> "Los Angeles").
</instructions>
<output_format>
You must respond in the following JSON format without any extra text, explanations or filler sentences:
{
"location": string,
"lat": number,
"lon": number,
"notPresent": boolean
}
</output_format>
`;
const weatherWidget: Widget = {
type: 'weatherWidget',
shouldExecute: (classification) =>
classification.classification.showWeatherWidget,
execute: async (input) => {
const output = await input.llm.generateObject<typeof schema>({
messages: [
{
role: 'system',
content: systemPrompt,
},
{
role: 'user',
content: `<conversation_history>\n${formatChatHistoryAsString(input.chatHistory)}\n</conversation_history>\n<user_follow_up>\n${input.followUp}\n</user_follow_up>`,
},
],
schema,
});
if (output.notPresent) {
return;
}
const params = output;
try {
if (
params.location === '' &&
(params.lat === undefined || params.lon === undefined)
) {
throw new Error(
'Either location name or both latitude and longitude must be provided.',
);
}
if (params.location !== '') {
const openStreetMapUrl = `https://nominatim.openstreetmap.org/search?q=${encodeURIComponent(params.location)}&format=json&limit=1`;
const locationRes = await fetch(openStreetMapUrl, {
headers: {
'User-Agent': 'GooSeek',
'Content-Type': 'application/json',
},
});
const data = await locationRes.json();
const location = data[0];
if (!location) {
throw new Error(
`Could not find coordinates for location: ${params.location}`,
);
}
const weatherRes = await fetch(
`https://api.open-meteo.com/v1/forecast?latitude=${location.lat}&longitude=${location.lon}&current=temperature_2m,relative_humidity_2m,apparent_temperature,is_day,precipitation,rain,showers,snowfall,weather_code,cloud_cover,pressure_msl,surface_pressure,wind_speed_10m,wind_direction_10m,wind_gusts_10m&hourly=temperature_2m,precipitation_probability,precipitation,weather_code&daily=weather_code,temperature_2m_max,temperature_2m_min,precipitation_sum,precipitation_probability_max&timezone=auto&forecast_days=7`,
{
headers: {
'User-Agent': 'GooSeek',
'Content-Type': 'application/json',
},
},
);
const weatherData = await weatherRes.json();
return {
type: 'weather',
llmContext: `Weather in ${params.location} is ${JSON.stringify(weatherData.current)}`,
data: {
location: params.location,
latitude: location.lat,
longitude: location.lon,
current: weatherData.current,
hourly: {
time: weatherData.hourly.time.slice(0, 24),
temperature_2m: weatherData.hourly.temperature_2m.slice(0, 24),
precipitation_probability:
weatherData.hourly.precipitation_probability.slice(0, 24),
precipitation: weatherData.hourly.precipitation.slice(0, 24),
weather_code: weatherData.hourly.weather_code.slice(0, 24),
},
daily: weatherData.daily,
timezone: weatherData.timezone,
},
};
} else if (params.lat !== undefined && params.lon !== undefined) {
const [weatherRes, locationRes] = await Promise.all([
fetch(
`https://api.open-meteo.com/v1/forecast?latitude=${params.lat}&longitude=${params.lon}&current=temperature_2m,relative_humidity_2m,apparent_temperature,is_day,precipitation,rain,showers,snowfall,weather_code,cloud_cover,pressure_msl,surface_pressure,wind_speed_10m,wind_direction_10m,wind_gusts_10m&hourly=temperature_2m,precipitation_probability,precipitation,weather_code&daily=weather_code,temperature_2m_max,temperature_2m_min,precipitation_sum,precipitation_probability_max&timezone=auto&forecast_days=7`,
{
headers: {
'User-Agent': 'GooSeek',
'Content-Type': 'application/json',
},
},
),
fetch(
`https://nominatim.openstreetmap.org/reverse?lat=${params.lat}&lon=${params.lon}&format=json`,
{
headers: {
'User-Agent': 'GooSeek',
'Content-Type': 'application/json',
},
},
),
]);
const weatherData = await weatherRes.json();
const locationData = await locationRes.json();
return {
type: 'weather',
llmContext: `Weather in ${locationData.display_name} is ${JSON.stringify(weatherData.current)}`,
data: {
location: locationData.display_name,
latitude: params.lat,
longitude: params.lon,
current: weatherData.current,
hourly: {
time: weatherData.hourly.time.slice(0, 24),
temperature_2m: weatherData.hourly.temperature_2m.slice(0, 24),
precipitation_probability:
weatherData.hourly.precipitation_probability.slice(0, 24),
precipitation: weatherData.hourly.precipitation.slice(0, 24),
weather_code: weatherData.hourly.weather_code.slice(0, 24),
},
daily: weatherData.daily,
timezone: weatherData.timezone,
},
};
}
return {
type: 'weather',
llmContext: 'No valid location or coordinates provided.',
data: null,
};
} catch (err) {
return {
type: 'weather',
llmContext: 'Failed to fetch weather data.',
data: {
error: `Error fetching weather data: ${err}`,
},
};
}
},
};
export default weatherWidget;

View File

@@ -1,5 +1,6 @@
import type { ModelProviderUISection } from './types.js';
import { getModelProvidersUIConfigSection } from '../models/providers/index.js';
/** Provider UI config comes from llm-svc API (GET /api/v1/config). Empty for local init. */
export function loadModelProvidersUIConfigSection(): ModelProviderUISection[] {
return getModelProvidersUIConfigSection();
return [];
}

View File

@@ -1,4 +1,4 @@
import { Model } from '../models/types.js';
export type Model = { name: string; key: string };
type BaseUIConfigField = {
name: string;

View File

@@ -0,0 +1,43 @@
/**
* EmbeddingClient — HTTP-клиент к llm-svc для эмбеддингов
* Используется когда LLM_SVC_URL задан. Заменяет локальные embedding-модели.
*/
const LLM_SVC_URL = process.env.LLM_SVC_URL ?? '';
export interface EmbeddingClient {
embedText(texts: string[]): Promise<number[][]>;
embedChunks(chunks: { content: string; metadata: Record<string, unknown> }[]): Promise<number[][]>;
}
function getBaseUrl(): string {
if (!LLM_SVC_URL) throw new Error('LLM_SVC_URL is required for EmbeddingClient');
return LLM_SVC_URL.replace(/\/$/, '');
}
export function createEmbeddingClient(model: { providerId: string; key: string }): EmbeddingClient {
const base = getBaseUrl();
return {
async embedText(texts: string[]): Promise<number[][]> {
if (texts.length === 0) return [];
const res = await fetch(`${base}/api/v1/embeddings`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model, texts }),
signal: AbortSignal.timeout(60000),
});
if (!res.ok) {
const err = await res.text();
throw new Error(`llm-svc embeddings failed: ${res.status} ${err}`);
}
const data = (await res.json()) as { embeddings: number[][] };
return data.embeddings;
},
async embedChunks(chunks: { content: string; metadata: Record<string, unknown> }[]): Promise<number[][]> {
const texts = chunks.map((c) => c.content);
return this.embedText(texts);
},
};
}

View File

@@ -1,97 +0,0 @@
/**
* library-svc client — сохранение threads и messages вместо SQLite
*/
const LIBRARY_SVC_URL = process.env.LIBRARY_SVC_URL ?? '';
export interface Block {
type: string;
id?: string;
data?: unknown;
}
export class LibraryClient {
constructor(
private baseUrl: string,
private authHeader: string | undefined,
) {}
get enabled(): boolean {
return !!this.baseUrl && !!this.authHeader;
}
async upsertMessage(
threadId: string,
messageId: string,
query: string,
opts: {
backendId?: string;
responseBlocks?: Block[];
status?: string;
sources?: string[];
files?: { fileId: string; name: string }[];
} = {},
): Promise<void> {
if (!this.enabled) return;
try {
const res = await fetch(
`${this.baseUrl.replace(/\/$/, '')}/api/v1/library/threads/${encodeURIComponent(threadId)}/messages`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: this.authHeader!,
},
body: JSON.stringify({
messageId,
query,
backendId: opts.backendId ?? '',
responseBlocks: opts.responseBlocks ?? [],
status: opts.status ?? 'answering',
sources: opts.sources ?? [],
files: opts.files ?? [],
}),
signal: AbortSignal.timeout(5000),
},
);
if (!res.ok) {
const err = await res.text();
console.error('[LibraryClient] upsertMessage failed:', res.status, err);
}
} catch (err) {
console.error('[LibraryClient] upsertMessage error:', err);
}
}
async updateMessage(
threadId: string,
messageId: string,
opts: { responseBlocks?: Block[]; status?: string },
): Promise<void> {
if (!this.enabled) return;
try {
const res = await fetch(
`${this.baseUrl.replace(/\/$/, '')}/api/v1/library/threads/${encodeURIComponent(threadId)}/messages/${encodeURIComponent(messageId)}`,
{
method: 'PATCH',
headers: {
'Content-Type': 'application/json',
Authorization: this.authHeader!,
},
body: JSON.stringify(opts),
signal: AbortSignal.timeout(5000),
},
);
if (!res.ok) {
const err = await res.text();
console.error('[LibraryClient] updateMessage failed:', res.status, err);
}
} catch (err) {
console.error('[LibraryClient] updateMessage error:', err);
}
}
static create(authHeader: string | undefined): LibraryClient {
return new LibraryClient(LIBRARY_SVC_URL, authHeader);
}
}

View File

@@ -1,9 +0,0 @@
import { Chunk } from '../../types.js';
abstract class BaseEmbedding<CONFIG> {
constructor(protected config: CONFIG) {}
abstract embedText(texts: string[]): Promise<number[][]>;
abstract embedChunks(chunks: Chunk[]): Promise<number[][]>;
}
export default BaseEmbedding;

View File

@@ -1,22 +0,0 @@
import z from 'zod';
import {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
} from '../types.js';
abstract class BaseLLM<CONFIG> {
constructor(protected config: CONFIG) {}
abstract generateText(input: GenerateTextInput): Promise<GenerateTextOutput>;
abstract streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput>;
abstract generateObject<T>(input: GenerateObjectInput): Promise<z.infer<T>>;
abstract streamObject<T>(
input: GenerateObjectInput,
): AsyncGenerator<Partial<z.infer<T>>>;
}
export default BaseLLM;

View File

@@ -1,45 +0,0 @@
import { ModelList, ProviderMetadata } from '../types.js';
import { UIConfigField } from '../../config/types.js';
import BaseLLM from './llm.js';
import BaseEmbedding from './embedding.js';
abstract class BaseModelProvider<CONFIG> {
constructor(
protected id: string,
protected name: string,
protected config: CONFIG,
) {}
abstract getDefaultModels(): Promise<ModelList>;
abstract getModelList(): Promise<ModelList>;
abstract loadChatModel(modelName: string): Promise<BaseLLM<any>>;
abstract loadEmbeddingModel(modelName: string): Promise<BaseEmbedding<any>>;
static getProviderConfigFields(): UIConfigField[] {
throw new Error('Method not implemented.');
}
static getProviderMetadata(): ProviderMetadata {
throw new Error('Method not Implemented.');
}
static parseAndValidate(raw: any): any {
/* Static methods can't access class type parameters */
throw new Error('Method not Implemented.');
}
}
export type ProviderConstructor<CONFIG> = {
new (id: string, name: string, config: CONFIG): BaseModelProvider<CONFIG>;
parseAndValidate(raw: any): CONFIG;
getProviderConfigFields: () => UIConfigField[];
getProviderMetadata: () => ProviderMetadata;
};
export const createProviderInstance = <P extends ProviderConstructor<any>>(
Provider: P,
id: string,
name: string,
rawConfig: unknown,
): InstanceType<P> => {
const cfg = Provider.parseAndValidate(rawConfig);
return new Provider(id, name, cfg) as InstanceType<P>;
};
export default BaseModelProvider;

View File

@@ -1,5 +0,0 @@
import OpenAILLM from '../openai/openaiLLM.js';
class AnthropicLLM extends OpenAILLM {}
export default AnthropicLLM;

View File

@@ -1,115 +0,0 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import BaseEmbedding from '../../base/embedding.js';
import BaseModelProvider from '../../base/provider.js';
import BaseLLM from '../../base/llm.js';
import AnthropicLLM from './anthropicLLM.js';
interface AnthropicConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Anthropic API key',
required: true,
placeholder: 'Anthropic API Key',
env: 'ANTHROPIC_API_KEY',
scope: 'server',
},
];
class AnthropicProvider extends BaseModelProvider<AnthropicConfig> {
constructor(id: string, name: string, config: AnthropicConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
const res = await fetch('https://api.anthropic.com/v1/models?limit=999', {
method: 'GET',
headers: {
'x-api-key': this.config.apiKey,
'anthropic-version': '2023-06-01',
'Content-type': 'application/json',
},
});
if (!res.ok) {
throw new Error(`Failed to fetch Anthropic models: ${res.statusText}`);
}
const data = (await res.json()).data;
const models: Model[] = data.map((m: any) => {
return {
key: m.id,
name: m.display_name,
};
});
return {
embedding: [],
chat: models,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Anthropic Chat Model. Invalid Model Selected',
);
}
return new AnthropicLLM({
apiKey: this.config.apiKey,
model: key,
baseURL: 'https://api.anthropic.com/v1',
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
throw new Error('Anthropic provider does not support embedding models.');
}
static parseAndValidate(raw: any): AnthropicConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'anthropic',
name: 'Anthropic',
};
}
}
export default AnthropicProvider;

View File

@@ -1,5 +0,0 @@
import OpenAIEmbedding from '../openai/openaiEmbedding.js';
class GeminiEmbedding extends OpenAIEmbedding {}
export default GeminiEmbedding;

View File

@@ -1,5 +0,0 @@
import OpenAILLM from '../openai/openaiLLM.js';
class GeminiLLM extends OpenAILLM {}
export default GeminiLLM;

View File

@@ -1,144 +0,0 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import GeminiEmbedding from './geminiEmbedding.js';
import BaseEmbedding from '../../base/embedding.js';
import BaseModelProvider from '../../base/provider.js';
import BaseLLM from '../../base/llm.js';
import GeminiLLM from './geminiLLM.js';
interface GeminiConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Gemini API key',
required: true,
placeholder: 'Gemini API Key',
env: 'GEMINI_API_KEY',
scope: 'server',
},
];
class GeminiProvider extends BaseModelProvider<GeminiConfig> {
constructor(id: string, name: string, config: GeminiConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
const res = await fetch(
`https://generativelanguage.googleapis.com/v1beta/models?key=${this.config.apiKey}`,
{
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
},
);
const data = await res.json();
let defaultEmbeddingModels: Model[] = [];
let defaultChatModels: Model[] = [];
data.models.forEach((m: any) => {
if (
m.supportedGenerationMethods.some(
(genMethod: string) =>
genMethod === 'embedText' || genMethod === 'embedContent',
)
) {
defaultEmbeddingModels.push({
key: m.name,
name: m.displayName,
});
} else if (m.supportedGenerationMethods.includes('generateContent')) {
defaultChatModels.push({
key: m.name,
name: m.displayName,
});
}
});
return {
embedding: defaultEmbeddingModels,
chat: defaultChatModels,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Gemini Chat Model. Invalid Model Selected',
);
}
return new GeminiLLM({
apiKey: this.config.apiKey,
model: key,
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai',
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Gemini Embedding Model. Invalid Model Selected.',
);
}
return new GeminiEmbedding({
apiKey: this.config.apiKey,
model: key,
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai',
});
}
static parseAndValidate(raw: any): GeminiConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'gemini',
name: 'Gemini',
};
}
}
export default GeminiProvider;

View File

@@ -1,5 +0,0 @@
import OpenAILLM from '../openai/openaiLLM.js';
class GroqLLM extends OpenAILLM {}
export default GroqLLM;

View File

@@ -1,113 +0,0 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import BaseEmbedding from '../../base/embedding.js';
import BaseModelProvider from '../../base/provider.js';
import BaseLLM from '../../base/llm.js';
import GroqLLM from './groqLLM.js';
interface GroqConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Groq API key',
required: true,
placeholder: 'Groq API Key',
env: 'GROQ_API_KEY',
scope: 'server',
},
];
class GroqProvider extends BaseModelProvider<GroqConfig> {
constructor(id: string, name: string, config: GroqConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
const res = await fetch(`https://api.groq.com/openai/v1/models`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.config.apiKey}`,
},
});
const data = await res.json();
const defaultChatModels: Model[] = [];
data.data.forEach((m: any) => {
defaultChatModels.push({
key: m.id,
name: m.id,
});
});
return {
embedding: [],
chat: defaultChatModels,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error('Error Loading Groq Chat Model. Invalid Model Selected');
}
return new GroqLLM({
apiKey: this.config.apiKey,
model: key,
baseURL: 'https://api.groq.com/openai/v1',
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
throw new Error('Groq Provider does not support embedding models.');
}
static parseAndValidate(raw: any): GroqConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'groq',
name: 'Groq',
};
}
}
export default GroqProvider;

View File

@@ -1,37 +0,0 @@
import { ModelProviderUISection } from '../../config/types.js';
import { ProviderConstructor } from '../base/provider.js';
import OpenAIProvider from './openai/index.js';
import OllamaProvider from './ollama/index.js';
import TimewebProvider from './timeweb/index.js';
import GeminiProvider from './gemini/index.js';
import TransformersProvider from './transformers/index.js';
import GroqProvider from './groq/index.js';
import LemonadeProvider from './lemonade/index.js';
import AnthropicProvider from './anthropic/index.js';
import LMStudioProvider from './lmstudio/index.js';
export const providers: Record<string, ProviderConstructor<any>> = {
openai: OpenAIProvider,
ollama: OllamaProvider,
timeweb: TimewebProvider,
gemini: GeminiProvider,
transformers: TransformersProvider,
groq: GroqProvider,
lemonade: LemonadeProvider,
anthropic: AnthropicProvider,
lmstudio: LMStudioProvider,
};
export const getModelProvidersUIConfigSection =
(): ModelProviderUISection[] => {
return Object.entries(providers).map(([k, p]) => {
const configFields = p.getProviderConfigFields();
const metadata = p.getProviderMetadata();
return {
fields: configFields,
key: k,
name: metadata.name,
};
});
};

View File

@@ -1,153 +0,0 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import BaseModelProvider from '../../base/provider.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import BaseLLM from '../../base/llm.js';
import LemonadeLLM from './lemonadeLLM.js';
import BaseEmbedding from '../../base/embedding.js';
import LemonadeEmbedding from './lemonadeEmbedding.js';
interface LemonadeConfig {
baseURL: string;
apiKey?: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for Lemonade API',
required: true,
placeholder: 'https://api.lemonade.ai/v1',
env: 'LEMONADE_BASE_URL',
scope: 'server',
},
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Lemonade API key (optional)',
required: false,
placeholder: 'Lemonade API Key',
env: 'LEMONADE_API_KEY',
scope: 'server',
},
];
class LemonadeProvider extends BaseModelProvider<LemonadeConfig> {
constructor(id: string, name: string, config: LemonadeConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
try {
const res = await fetch(`${this.config.baseURL}/models`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
...(this.config.apiKey
? { Authorization: `Bearer ${this.config.apiKey}` }
: {}),
},
});
const data = await res.json();
const models: Model[] = data.data
.filter((m: any) => m.recipe === 'llamacpp')
.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: models,
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to Lemonade API. Please ensure the base URL is correct and the service is available.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Lemonade Chat Model. Invalid Model Selected',
);
}
return new LemonadeLLM({
apiKey: this.config.apiKey || 'not-needed',
model: key,
baseURL: this.config.baseURL,
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Lemonade Embedding Model. Invalid Model Selected.',
);
}
return new LemonadeEmbedding({
apiKey: this.config.apiKey || 'not-needed',
model: key,
baseURL: this.config.baseURL,
});
}
static parseAndValidate(raw: any): LemonadeConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.baseURL)
throw new Error('Invalid config provided. Base URL must be provided');
return {
baseURL: String(raw.baseURL),
apiKey: raw.apiKey ? String(raw.apiKey) : undefined,
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'lemonade',
name: 'Lemonade',
};
}
}
export default LemonadeProvider;

View File

@@ -1,5 +0,0 @@
import OpenAIEmbedding from '../openai/openaiEmbedding.js';
class LemonadeEmbedding extends OpenAIEmbedding {}
export default LemonadeEmbedding;

View File

@@ -1,5 +0,0 @@
import OpenAILLM from '../openai/openaiLLM.js';
class LemonadeLLM extends OpenAILLM {}
export default LemonadeLLM;

View File

@@ -1,143 +0,0 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import BaseModelProvider from '../../base/provider.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import LMStudioLLM from './lmstudioLLM.js';
import BaseLLM from '../../base/llm.js';
import BaseEmbedding from '../../base/embedding.js';
import LMStudioEmbedding from './lmstudioEmbedding.js';
interface LMStudioConfig {
baseURL: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for LM Studio server',
required: true,
placeholder: 'http://localhost:1234',
env: 'LM_STUDIO_BASE_URL',
scope: 'server',
},
];
class LMStudioProvider extends BaseModelProvider<LMStudioConfig> {
constructor(id: string, name: string, config: LMStudioConfig) {
super(id, name, config);
}
private normalizeBaseURL(url: string): string {
const trimmed = url.trim().replace(/\/+$/, '');
return trimmed.endsWith('/v1') ? trimmed : `${trimmed}/v1`;
}
async getDefaultModels(): Promise<ModelList> {
try {
const baseURL = this.normalizeBaseURL(this.config.baseURL);
const res = await fetch(`${baseURL}/models`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
const data = await res.json();
const models: Model[] = data.data.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: models,
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to LM Studio. Please ensure the base URL is correct and the LM Studio server is running.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading LM Studio Chat Model. Invalid Model Selected',
);
}
return new LMStudioLLM({
apiKey: 'lm-studio',
model: key,
baseURL: this.normalizeBaseURL(this.config.baseURL),
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading LM Studio Embedding Model. Invalid Model Selected.',
);
}
return new LMStudioEmbedding({
apiKey: 'lm-studio',
model: key,
baseURL: this.normalizeBaseURL(this.config.baseURL),
});
}
static parseAndValidate(raw: any): LMStudioConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.baseURL)
throw new Error('Invalid config provided. Base URL must be provided');
return {
baseURL: String(raw.baseURL),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'lmstudio',
name: 'LM Studio',
};
}
}
export default LMStudioProvider;

View File

@@ -1,5 +0,0 @@
import OpenAIEmbedding from '../openai/openaiEmbedding.js';
class LMStudioEmbedding extends OpenAIEmbedding {}
export default LMStudioEmbedding;

View File

@@ -1,5 +0,0 @@
import OpenAILLM from '../openai/openaiLLM.js';
class LMStudioLLM extends OpenAILLM {}
export default LMStudioLLM;

View File

@@ -1,139 +0,0 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import BaseModelProvider from '../../base/provider.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import BaseLLM from '../../base/llm.js';
import BaseEmbedding from '../../base/embedding.js';
import OllamaLLM from './ollamaLLM.js';
import OllamaEmbedding from './ollamaEmbedding.js';
interface OllamaConfig {
baseURL: string;
embeddingBaseURL?: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for the Ollama',
required: true,
placeholder: process.env.DOCKER
? 'http://host.docker.internal:11434'
: 'http://localhost:11434',
env: 'OLLAMA_BASE_URL',
scope: 'server',
},
];
class OllamaProvider extends BaseModelProvider<OllamaConfig> {
constructor(id: string, name: string, config: OllamaConfig) {
super(id, name, config);
}
private async fetchModels(baseURL: string): Promise<Model[]> {
const res = await fetch(`${baseURL}/api/tags`, {
method: 'GET',
headers: { 'Content-type': 'application/json' },
});
const data = await res.json();
return (data.models ?? []).map((m: { name?: string; model?: string }) => ({
name: m.model ?? m.name ?? '',
key: m.model ?? m.name ?? '',
}));
}
async getDefaultModels(): Promise<ModelList> {
try {
const [chatModels, embeddingModels] = await Promise.all([
this.fetchModels(this.config.baseURL),
this.fetchModels(
this.config.embeddingBaseURL ?? this.config.baseURL,
),
]);
return { chat: chatModels, embedding: embeddingModels };
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to Ollama API. Please ensure the base URL is correct and the Ollama server is running.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Ollama Chat Model. Invalid Model Selected',
);
}
return new OllamaLLM({
baseURL: this.config.baseURL,
model: key,
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Ollama Embedding Model. Invalid Model Selected.',
);
}
return new OllamaEmbedding({
model: key,
baseURL: this.config.embeddingBaseURL ?? this.config.baseURL,
});
}
static parseAndValidate(raw: unknown): OllamaConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
const obj = raw as Record<string, unknown>;
if (!obj.baseURL)
throw new Error('Invalid config provided. Base URL must be provided');
return {
baseURL: String(obj.baseURL),
embeddingBaseURL: obj.embeddingBaseURL
? String(obj.embeddingBaseURL)
: undefined,
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'ollama',
name: 'Ollama',
};
}
}
export default OllamaProvider;

View File

@@ -1,40 +0,0 @@
import { Ollama } from 'ollama';
import BaseEmbedding from '../../base/embedding.js';
import { Chunk } from '../../../types.js';
type OllamaConfig = {
model: string;
baseURL?: string;
};
class OllamaEmbedding extends BaseEmbedding<OllamaConfig> {
ollamaClient: Ollama;
constructor(protected config: OllamaConfig) {
super(config);
this.ollamaClient = new Ollama({
host: this.config.baseURL || 'http://localhost:11434',
});
}
async embedText(texts: string[]): Promise<number[][]> {
const response = await this.ollamaClient.embed({
input: texts,
model: this.config.model,
});
return response.embeddings;
}
async embedChunks(chunks: Chunk[]): Promise<number[][]> {
const response = await this.ollamaClient.embed({
input: chunks.map((c) => c.content),
model: this.config.model,
});
return response.embeddings;
}
}
export default OllamaEmbedding;

View File

@@ -1,261 +0,0 @@
import z from 'zod';
import BaseLLM from '../../base/llm.js';
import {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
} from '../../types.js';
import { Ollama, Tool as OllamaTool, Message as OllamaMessage } from 'ollama';
import { parse } from 'partial-json';
import crypto from 'crypto';
import { Message } from '../../../types.js';
import { repairJson } from '@toolsycc/json-repair';
type OllamaConfig = {
baseURL: string;
model: string;
options?: GenerateOptions;
};
const reasoningModels = [
'gpt-oss',
'deepseek-r1',
'qwen3',
'deepseek-v3.1',
'magistral',
'nemotron-3-nano',
];
class OllamaLLM extends BaseLLM<OllamaConfig> {
ollamaClient: Ollama;
constructor(protected config: OllamaConfig) {
super(config);
this.ollamaClient = new Ollama({
host: this.config.baseURL || 'http://localhost:11434',
});
}
convertToOllamaMessages(messages: Message[]): OllamaMessage[] {
return messages.map((msg) => {
if (msg.role === 'tool') {
return {
role: 'tool',
tool_name: msg.name,
content: msg.content,
} as OllamaMessage;
} else if (msg.role === 'assistant') {
return {
role: 'assistant',
content: msg.content,
tool_calls:
msg.tool_calls?.map((tc, i) => ({
function: {
index: i,
name: tc.name,
arguments: tc.arguments,
},
})) || [],
};
}
return msg;
});
}
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
const ollamaTools: OllamaTool[] = [];
input.tools?.forEach((tool) => {
ollamaTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema).properties,
},
});
});
const res = await this.ollamaClient.chat({
model: this.config.model,
messages: this.convertToOllamaMessages(input.messages),
tools: ollamaTools.length > 0 ? ollamaTools : undefined,
...(reasoningModels.find((m) => this.config.model.includes(m))
? { think: false }
: {}),
options: {
top_p: input.options?.topP ?? this.config.options?.topP,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 0.7,
num_predict: input.options?.maxTokens ?? this.config.options?.maxTokens,
num_ctx: 32000,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ??
this.config.options?.presencePenalty,
stop:
input.options?.stopSequences ?? this.config.options?.stopSequences,
},
});
return {
content: res.message.content,
toolCalls:
res.message.tool_calls?.map((tc) => ({
id: crypto.randomUUID(),
name: tc.function.name,
arguments: tc.function.arguments,
})) || [],
additionalInfo: {
reasoning: res.message.thinking,
},
};
}
async *streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput> {
const ollamaTools: OllamaTool[] = [];
input.tools?.forEach((tool) => {
ollamaTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema) as any,
},
});
});
const stream = await this.ollamaClient.chat({
model: this.config.model,
messages: this.convertToOllamaMessages(input.messages),
stream: true,
...(reasoningModels.find((m) => this.config.model.includes(m))
? { think: false }
: {}),
tools: ollamaTools.length > 0 ? ollamaTools : undefined,
options: {
top_p: input.options?.topP ?? this.config.options?.topP,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 0.7,
num_ctx: 32000,
num_predict: input.options?.maxTokens ?? this.config.options?.maxTokens,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ??
this.config.options?.presencePenalty,
stop:
input.options?.stopSequences ?? this.config.options?.stopSequences,
},
});
for await (const chunk of stream) {
yield {
contentChunk: chunk.message.content,
toolCallChunk:
chunk.message.tool_calls?.map((tc, i) => ({
id: crypto
.createHash('sha256')
.update(
`${i}-${tc.function.name}`,
) /* Ollama currently doesn't return a tool call ID so we're creating one based on the index and tool call name */
.digest('hex'),
name: tc.function.name,
arguments: tc.function.arguments,
})) || [],
done: chunk.done,
additionalInfo: {
reasoning: chunk.message.thinking,
},
};
}
}
async generateObject<T>(input: GenerateObjectInput): Promise<T> {
const response = await this.ollamaClient.chat({
model: this.config.model,
messages: this.convertToOllamaMessages(input.messages),
format: z.toJSONSchema(input.schema),
...(reasoningModels.find((m) => this.config.model.includes(m))
? { think: false }
: {}),
options: {
top_p: input.options?.topP ?? this.config.options?.topP,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 0.7,
num_predict: input.options?.maxTokens ?? this.config.options?.maxTokens,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ??
this.config.options?.presencePenalty,
stop:
input.options?.stopSequences ?? this.config.options?.stopSequences,
},
});
try {
return input.schema.parse(
JSON.parse(
repairJson(response.message.content, {
extractJson: true,
}) as string,
),
) as T;
} catch (err) {
throw new Error(`Error parsing response from Ollama: ${err}`);
}
}
async *streamObject<T>(input: GenerateObjectInput): AsyncGenerator<T> {
let recievedObj: string = '';
const stream = await this.ollamaClient.chat({
model: this.config.model,
messages: this.convertToOllamaMessages(input.messages),
format: z.toJSONSchema(input.schema),
stream: true,
...(reasoningModels.find((m) => this.config.model.includes(m))
? { think: false }
: {}),
options: {
top_p: input.options?.topP ?? this.config.options?.topP,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 0.7,
num_predict: input.options?.maxTokens ?? this.config.options?.maxTokens,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ??
this.config.options?.presencePenalty,
stop:
input.options?.stopSequences ?? this.config.options?.stopSequences,
},
});
for await (const chunk of stream) {
recievedObj += chunk.message.content;
try {
yield parse(recievedObj) as T;
} catch (err) {
console.log('Error parsing partial object from Ollama:', err);
yield {} as T;
}
}
}
}
export default OllamaLLM;

View File

@@ -1,226 +0,0 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import OpenAIEmbedding from './openaiEmbedding.js';
import BaseEmbedding from '../../base/embedding.js';
import BaseModelProvider from '../../base/provider.js';
import BaseLLM from '../../base/llm.js';
import OpenAILLM from './openaiLLM.js';
interface OpenAIConfig {
apiKey: string;
baseURL: string;
}
const defaultChatModels: Model[] = [
{
name: 'GPT-3.5 Turbo',
key: 'gpt-3.5-turbo',
},
{
name: 'GPT-4',
key: 'gpt-4',
},
{
name: 'GPT-4 turbo',
key: 'gpt-4-turbo',
},
{
name: 'GPT-4 omni',
key: 'gpt-4o',
},
{
name: 'GPT-4o (2024-05-13)',
key: 'gpt-4o-2024-05-13',
},
{
name: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
},
{
name: 'GPT 4.1 nano',
key: 'gpt-4.1-nano',
},
{
name: 'GPT 4.1 mini',
key: 'gpt-4.1-mini',
},
{
name: 'GPT 4.1',
key: 'gpt-4.1',
},
{
name: 'GPT 5 nano',
key: 'gpt-5-nano',
},
{
name: 'GPT 5',
key: 'gpt-5',
},
{
name: 'GPT 5 Mini',
key: 'gpt-5-mini',
},
{
name: 'GPT 5 Pro',
key: 'gpt-5-pro',
},
{
name: 'GPT 5.1',
key: 'gpt-5.1',
},
{
name: 'GPT 5.2',
key: 'gpt-5.2',
},
{
name: 'GPT 5.2 Pro',
key: 'gpt-5.2-pro',
},
{
name: 'o1',
key: 'o1',
},
{
name: 'o3',
key: 'o3',
},
{
name: 'o3 Mini',
key: 'o3-mini',
},
{
name: 'o4 Mini',
key: 'o4-mini',
},
];
const defaultEmbeddingModels: Model[] = [
{
name: 'Text Embedding 3 Small',
key: 'text-embedding-3-small',
},
{
name: 'Text Embedding 3 Large',
key: 'text-embedding-3-large',
},
];
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your OpenAI API key',
required: true,
placeholder: 'OpenAI API Key',
env: 'OPENAI_API_KEY',
scope: 'server',
},
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for the OpenAI API',
required: true,
placeholder: 'OpenAI Base URL',
default: 'https://api.openai.com/v1',
env: 'OPENAI_BASE_URL',
scope: 'server',
},
];
class OpenAIProvider extends BaseModelProvider<OpenAIConfig> {
constructor(id: string, name: string, config: OpenAIConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
if (this.config.baseURL === 'https://api.openai.com/v1') {
return {
embedding: defaultEmbeddingModels,
chat: defaultChatModels,
};
}
return {
embedding: [],
chat: [],
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading OpenAI Chat Model. Invalid Model Selected',
);
}
return new OpenAILLM({
apiKey: this.config.apiKey,
model: key,
baseURL: this.config.baseURL,
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading OpenAI Embedding Model. Invalid Model Selected.',
);
}
return new OpenAIEmbedding({
apiKey: this.config.apiKey,
model: key,
baseURL: this.config.baseURL,
});
}
static parseAndValidate(raw: any): OpenAIConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey || !raw.baseURL)
throw new Error(
'Invalid config provided. API key and base URL must be provided',
);
return {
apiKey: String(raw.apiKey),
baseURL: String(raw.baseURL),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'openai',
name: 'OpenAI',
};
}
}
export default OpenAIProvider;

View File

@@ -1,42 +0,0 @@
import OpenAI from 'openai';
import BaseEmbedding from '../../base/embedding.js';
import { Chunk } from '../../../types.js';
type OpenAIConfig = {
apiKey: string;
model: string;
baseURL?: string;
};
class OpenAIEmbedding extends BaseEmbedding<OpenAIConfig> {
openAIClient: OpenAI;
constructor(protected config: OpenAIConfig) {
super(config);
this.openAIClient = new OpenAI({
apiKey: config.apiKey,
baseURL: config.baseURL,
});
}
async embedText(texts: string[]): Promise<number[][]> {
const response = await this.openAIClient.embeddings.create({
model: this.config.model,
input: texts,
});
return response.data.map((embedding) => embedding.embedding);
}
async embedChunks(chunks: Chunk[]): Promise<number[][]> {
const response = await this.openAIClient.embeddings.create({
model: this.config.model,
input: chunks.map((c) => c.content),
});
return response.data.map((embedding) => embedding.embedding);
}
}
export default OpenAIEmbedding;

View File

@@ -1,275 +0,0 @@
import OpenAI from 'openai';
import BaseLLM from '../../base/llm.js';
import { zodTextFormat, zodResponseFormat } from 'openai/helpers/zod';
import {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
ToolCall,
} from '../../types.js';
import { parse } from 'partial-json';
import z from 'zod';
import {
ChatCompletionAssistantMessageParam,
ChatCompletionMessageParam,
ChatCompletionTool,
ChatCompletionToolMessageParam,
} from 'openai/resources/index.mjs';
import { Message } from '../../../types.js';
import { repairJson } from '@toolsycc/json-repair';
type OpenAIConfig = {
apiKey: string;
model: string;
baseURL?: string;
options?: GenerateOptions;
};
class OpenAILLM extends BaseLLM<OpenAIConfig> {
openAIClient: OpenAI;
constructor(protected config: OpenAIConfig) {
super(config);
this.openAIClient = new OpenAI({
apiKey: this.config.apiKey,
baseURL: this.config.baseURL || 'https://api.openai.com/v1',
});
}
convertToOpenAIMessages(messages: Message[]): ChatCompletionMessageParam[] {
return messages.map((msg) => {
if (msg.role === 'tool') {
return {
role: 'tool',
tool_call_id: msg.id,
content: msg.content,
} as ChatCompletionToolMessageParam;
} else if (msg.role === 'assistant') {
return {
role: 'assistant',
content: msg.content,
...(msg.tool_calls &&
msg.tool_calls.length > 0 && {
tool_calls: msg.tool_calls?.map((tc) => ({
id: tc.id,
type: 'function',
function: {
name: tc.name,
arguments: JSON.stringify(tc.arguments),
},
})),
}),
} as ChatCompletionAssistantMessageParam;
}
return msg;
});
}
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
const openaiTools: ChatCompletionTool[] = [];
input.tools?.forEach((tool) => {
openaiTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema),
},
});
});
const response = await this.openAIClient.chat.completions.create({
model: this.config.model,
tools: openaiTools.length > 0 ? openaiTools : undefined,
messages: this.convertToOpenAIMessages(input.messages),
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
});
if (response.choices && response.choices.length > 0) {
return {
content: response.choices[0].message.content!,
toolCalls:
response.choices[0].message.tool_calls
?.map((tc) => {
if (tc.type === 'function') {
return {
name: tc.function.name,
id: tc.id,
arguments: JSON.parse(tc.function.arguments),
};
}
})
.filter((tc) => tc !== undefined) || [],
additionalInfo: {
finishReason: response.choices[0].finish_reason,
},
};
}
throw new Error('No response from OpenAI');
}
async *streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput> {
const openaiTools: ChatCompletionTool[] = [];
input.tools?.forEach((tool) => {
openaiTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema),
},
});
});
const stream = await this.openAIClient.chat.completions.create({
model: this.config.model,
messages: this.convertToOpenAIMessages(input.messages),
tools: openaiTools.length > 0 ? openaiTools : undefined,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
stream: true,
});
let recievedToolCalls: { name: string; id: string; arguments: string }[] =
[];
for await (const chunk of stream) {
if (chunk.choices && chunk.choices.length > 0) {
const toolCalls = chunk.choices[0].delta.tool_calls;
yield {
contentChunk: chunk.choices[0].delta.content || '',
toolCallChunk:
toolCalls?.map((tc) => {
if (!recievedToolCalls[tc.index]) {
const call = {
name: tc.function?.name!,
id: tc.id!,
arguments: tc.function?.arguments || '',
};
recievedToolCalls.push(call);
return { ...call, arguments: parse(call.arguments || '{}') };
} else {
const existingCall = recievedToolCalls[tc.index];
existingCall.arguments += tc.function?.arguments || '';
return {
...existingCall,
arguments: parse(existingCall.arguments),
};
}
}) || [],
done: chunk.choices[0].finish_reason !== null,
additionalInfo: {
finishReason: chunk.choices[0].finish_reason,
},
};
}
}
}
async generateObject<T>(input: GenerateObjectInput): Promise<T> {
const response = await this.openAIClient.chat.completions.parse({
messages: this.convertToOpenAIMessages(input.messages),
model: this.config.model,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
response_format: zodResponseFormat(input.schema, 'object'),
});
if (response.choices && response.choices.length > 0) {
try {
return input.schema.parse(
JSON.parse(
repairJson(response.choices[0].message.content!, {
extractJson: true,
}) as string,
),
) as T;
} catch (err) {
throw new Error(`Error parsing response from OpenAI: ${err}`);
}
}
throw new Error('No response from OpenAI');
}
async *streamObject<T>(input: GenerateObjectInput): AsyncGenerator<T> {
let recievedObj: string = '';
const stream = this.openAIClient.responses.stream({
model: this.config.model,
input: input.messages,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
text: {
format: zodTextFormat(input.schema, 'object'),
},
});
for await (const chunk of stream) {
if (chunk.type === 'response.output_text.delta' && chunk.delta) {
recievedObj += chunk.delta;
try {
yield parse(recievedObj) as T;
} catch (err) {
console.log('Error parsing partial object from OpenAI:', err);
yield {} as T;
}
} else if (chunk.type === 'response.output_text.done' && chunk.text) {
try {
yield parse(chunk.text) as T;
} catch (err) {
throw new Error(`Error parsing response from OpenAI: ${err}`);
}
}
}
}
}
export default OpenAILLM;

View File

@@ -1,172 +0,0 @@
import { UIConfigField } from '../../../config/types.js';
import BaseModelProvider from '../../base/provider.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import BaseLLM from '../../base/llm.js';
import BaseEmbedding from '../../base/embedding.js';
import TimewebLLM from './timewebLLM.js';
interface TimewebConfig {
baseURL: string;
agentAccessId: string;
apiKey: string;
model: string;
xProxySource?: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'API Base URL',
key: 'baseURL',
description: 'Timeweb Cloud AI API base URL',
required: true,
placeholder: 'https://api.timeweb.cloud',
env: 'TIMEWEB_API_BASE_URL',
scope: 'server',
},
{
type: 'string',
name: 'Agent Access ID',
key: 'agentAccessId',
description: 'Agent access ID from Timeweb Cloud AI',
required: true,
placeholder: '',
env: 'TIMEWEB_AGENT_ACCESS_ID',
scope: 'server',
},
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Bearer token for Timeweb Cloud AI',
required: true,
placeholder: '',
env: 'TIMEWEB_API_KEY',
scope: 'server',
},
{
type: 'string',
name: 'Model',
key: 'model',
description: 'Model key (e.g. gpt-4)',
required: true,
placeholder: 'gpt-4',
env: 'LLM_CHAT_MODEL',
scope: 'server',
},
{
type: 'string',
name: 'X-Proxy-Source',
key: 'xProxySource',
description: 'Optional header for Timeweb API',
required: false,
placeholder: '',
env: 'TIMEWEB_X_PROXY_SOURCE',
scope: 'server',
},
];
class TimewebProvider extends BaseModelProvider<TimewebConfig> {
constructor(id: string, name: string, config: TimewebConfig) {
super(id, name, config);
}
getTimewebBaseURL(): string {
const base = this.config.baseURL.replace(/\/$/, '');
return `${base}/api/v1/cloud-ai/agents/${this.config.agentAccessId}/v1`;
}
async getDefaultModels(): Promise<ModelList> {
try {
const url = `${this.getTimewebBaseURL()}/models`;
const res = await fetch(url, {
headers: {
Authorization: `Bearer ${this.config.apiKey}`,
'Content-Type': 'application/json',
...(this.config.xProxySource && {
'x-proxy-source': this.config.xProxySource,
}),
},
});
if (!res.ok) {
throw new Error(`Timeweb API error: ${res.status} ${res.statusText}`);
}
const data = (await res.json()) as { data?: { id: string }[] };
const models: Model[] = (data.data ?? []).map((m) => ({
name: m.id,
key: m.id,
}));
// Если API вернул пустой список — используем модель из конфигурации
const chat =
models.length > 0
? models
: [{ name: this.config.model, key: this.config.model }];
return {
chat,
embedding: [],
};
} catch (err) {
return {
chat: [{ name: this.config.model, key: this.config.model }],
embedding: [],
};
}
}
async getModelList(): Promise<ModelList> {
return this.getDefaultModels();
}
async loadChatModel(key: string): Promise<BaseLLM<unknown>> {
return new TimewebLLM({
apiKey: this.config.apiKey,
baseURL: this.getTimewebBaseURL(),
model: key,
defaultHeaders: this.config.xProxySource
? { 'x-proxy-source': this.config.xProxySource }
: undefined,
});
}
async loadEmbeddingModel(_key: string): Promise<BaseEmbedding<unknown>> {
throw new Error(
'Timeweb Cloud AI does not provide embedding models. Use Ollama for embeddings.',
);
}
static parseAndValidate(raw: unknown): TimewebConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
const obj = raw as Record<string, unknown>;
if (!obj.baseURL || !obj.agentAccessId || !obj.apiKey)
throw new Error(
'Invalid config. baseURL, agentAccessId and apiKey are required',
);
return {
baseURL: String(obj.baseURL),
agentAccessId: String(obj.agentAccessId),
apiKey: String(obj.apiKey),
model: String(obj.model || 'gpt-4'),
xProxySource: obj.xProxySource ? String(obj.xProxySource) : undefined,
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'timeweb',
name: 'Timeweb Cloud AI',
};
}
}
export default TimewebProvider;

View File

@@ -1,265 +0,0 @@
import OpenAI from 'openai';
import BaseLLM from '../../base/llm.js';
import { zodTextFormat, zodResponseFormat } from 'openai/helpers/zod';
import {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
} from '../../types.js';
import { parse } from 'partial-json';
import z from 'zod';
import {
ChatCompletionAssistantMessageParam,
ChatCompletionMessageParam,
ChatCompletionTool,
ChatCompletionToolMessageParam,
} from 'openai/resources/index.mjs';
import { Message } from '../../../types.js';
import { repairJson } from '@toolsycc/json-repair';
type TimewebConfig = {
apiKey: string;
baseURL: string;
model: string;
options?: GenerateOptions;
defaultHeaders?: Record<string, string>;
};
class TimewebLLM extends BaseLLM<TimewebConfig> {
openAIClient: OpenAI;
constructor(protected config: TimewebConfig) {
super(config);
this.openAIClient = new OpenAI({
apiKey: this.config.apiKey,
baseURL: this.config.baseURL,
defaultHeaders: this.config.defaultHeaders,
});
}
convertToOpenAIMessages(messages: Message[]): ChatCompletionMessageParam[] {
return messages.map((msg) => {
if (msg.role === 'tool') {
return {
role: 'tool',
tool_call_id: msg.id,
content: msg.content,
} as ChatCompletionToolMessageParam;
} else if (msg.role === 'assistant') {
return {
role: 'assistant',
content: msg.content,
...(msg.tool_calls &&
msg.tool_calls.length > 0 && {
tool_calls: msg.tool_calls?.map((tc) => ({
id: tc.id,
type: 'function' as const,
function: {
name: tc.name,
arguments: JSON.stringify(tc.arguments),
},
})),
}),
} as ChatCompletionAssistantMessageParam;
}
return msg;
});
}
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
const openaiTools: ChatCompletionTool[] = [];
input.tools?.forEach((tool) => {
openaiTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema),
},
});
});
const response = await this.openAIClient.chat.completions.create({
model: this.config.model,
tools: openaiTools.length > 0 ? openaiTools : undefined,
messages: this.convertToOpenAIMessages(input.messages),
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
});
if (response.choices && response.choices.length > 0) {
return {
content: response.choices[0].message.content ?? '',
toolCalls:
response.choices[0].message.tool_calls
?.map((tc) => {
if (tc.type === 'function') {
return {
name: tc.function.name,
id: tc.id!,
arguments: JSON.parse(tc.function.arguments ?? '{}'),
};
}
return undefined;
})
.filter((tc): tc is NonNullable<typeof tc> => tc !== undefined) ?? [],
additionalInfo: {
finishReason: response.choices[0].finish_reason ?? undefined,
},
};
}
throw new Error('No response from Timeweb');
}
async *streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput> {
const openaiTools: ChatCompletionTool[] = [];
input.tools?.forEach((tool) => {
openaiTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema),
},
});
});
let stream;
try {
stream = await this.openAIClient.chat.completions.create({
model: this.config.model,
messages: this.convertToOpenAIMessages(input.messages),
tools: openaiTools.length > 0 ? openaiTools : undefined,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
stream: true,
});
} catch (err: unknown) {
const e = err as {
status?: number;
error?: { message?: string; code?: string };
response?: { status?: number; body?: unknown };
};
const details = [
e?.status != null && `status=${e.status}`,
e?.error?.message && `error=${e.error.message}`,
e?.error?.code && `code=${e.error.code}`,
e?.response?.body != null &&
`body=${JSON.stringify(e.response.body).slice(0, 300)}`,
]
.filter(Boolean)
.join(', ');
console.error(
`[Timeweb] streamText failed: ${details || String(err)}`,
err,
);
throw err;
}
const receivedToolCalls: { name: string; id: string; arguments: string }[] =
[];
for await (const chunk of stream) {
if (chunk.choices && chunk.choices.length > 0) {
const toolCalls = chunk.choices[0].delta.tool_calls;
yield {
contentChunk: chunk.choices[0].delta.content ?? '',
toolCallChunk:
toolCalls?.map((tc) => {
if (!receivedToolCalls[tc.index!]) {
const call = {
name: tc.function?.name ?? '',
id: tc.id ?? '',
arguments: tc.function?.arguments ?? '',
};
receivedToolCalls[tc.index!] = call;
return { ...call, arguments: parse(call.arguments || '{}') };
} else {
const existingCall = receivedToolCalls[tc.index!];
existingCall.arguments += tc.function?.arguments ?? '';
return {
...existingCall,
arguments: parse(existingCall.arguments),
};
}
}) || [],
done: chunk.choices[0].finish_reason !== null,
additionalInfo: {
finishReason: chunk.choices[0].finish_reason ?? undefined,
},
};
}
}
}
async generateObject<T>(input: GenerateObjectInput): Promise<T> {
const response = await this.openAIClient.chat.completions.create({
messages: this.convertToOpenAIMessages(input.messages),
model: this.config.model,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
response_format: zodResponseFormat(input.schema, 'object'),
});
if (response.choices && response.choices.length > 0) {
try {
return input.schema.parse(
JSON.parse(
repairJson(response.choices[0].message.content ?? '{}', {
extractJson: true,
}) as string,
),
) as T;
} catch (err) {
throw new Error(`Error parsing response from Timeweb: ${err}`);
}
}
throw new Error('No response from Timeweb');
}
async *streamObject<T>(
input: GenerateObjectInput,
): AsyncGenerator<Partial<T>> {
const result = await this.generateObject<T>(input);
yield result;
}
}
export default TimewebLLM;

View File

@@ -1,88 +0,0 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import BaseModelProvider from '../../base/provider.js';
import BaseLLM from '../../base/llm.js';
import BaseEmbedding from '../../base/embedding.js';
import TransformerEmbedding from './transformerEmbedding.js';
interface TransformersConfig {}
const defaultEmbeddingModels: Model[] = [
{
name: 'all-MiniLM-L6-v2',
key: 'Xenova/all-MiniLM-L6-v2',
},
{
name: 'mxbai-embed-large-v1',
key: 'mixedbread-ai/mxbai-embed-large-v1',
},
{
name: 'nomic-embed-text-v1',
key: 'Xenova/nomic-embed-text-v1',
},
];
const providerConfigFields: UIConfigField[] = [];
class TransformersProvider extends BaseModelProvider<TransformersConfig> {
constructor(id: string, name: string, config: TransformersConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
return {
embedding: [...defaultEmbeddingModels],
chat: [],
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
throw new Error('Transformers Provider does not support chat models.');
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading OpenAI Embedding Model. Invalid Model Selected.',
);
}
return new TransformerEmbedding({
model: key,
});
}
static parseAndValidate(raw: any): TransformersConfig {
return {};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'transformers',
name: 'Transformers',
};
}
}
export default TransformersProvider;

View File

@@ -1,41 +0,0 @@
import { Chunk } from '../../../types.js';
import BaseEmbedding from '../../base/embedding.js';
import type { FeatureExtractionPipeline } from '@huggingface/transformers';
type TransformerConfig = {
model: string;
};
class TransformerEmbedding extends BaseEmbedding<TransformerConfig> {
private pipelinePromise: Promise<FeatureExtractionPipeline> | null = null;
constructor(protected config: TransformerConfig) {
super(config);
}
async embedText(texts: string[]): Promise<number[][]> {
return this.embed(texts);
}
async embedChunks(chunks: Chunk[]): Promise<number[][]> {
return this.embed(chunks.map((c) => c.content));
}
private async embed(texts: string[]) {
if (!this.pipelinePromise) {
this.pipelinePromise = (async () => {
const { pipeline } = await import('@huggingface/transformers');
const result = await pipeline('feature-extraction', this.config.model, {
dtype: 'fp32',
});
return result as FeatureExtractionPipeline;
})();
}
const pipe = await this.pipelinePromise;
const output = await pipe(texts, { pooling: 'mean', normalize: true });
return output.tolist() as number[][];
}
}
export default TransformerEmbedding;

View File

@@ -1,256 +0,0 @@
import { ConfigModelProvider } from '../config/types.js';
import BaseModelProvider, { createProviderInstance } from './base/provider.js';
import { getConfiguredModelProviders } from '../config/serverRegistry.js';
import { providers } from './providers/index.js';
import { MinimalProvider, ModelList } from './types.js';
import configManager from '../config/index.js';
const LLM_SVC_URL = process.env.LLM_SVC_URL ?? '';
class ModelRegistry {
activeProviders: (ConfigModelProvider & {
provider: BaseModelProvider<any>;
})[] = [];
private llmSvcInitialized = false;
constructor() {
if (!LLM_SVC_URL) {
this.initializeActiveProviders();
}
}
async initFromLlmSvc(): Promise<void> {
if (this.llmSvcInitialized || this.activeProviders.length > 0) return;
const base = LLM_SVC_URL.replace(/\/$/, '');
const res = await fetch(`${base}/api/v1/providers?internal=1`, {
signal: AbortSignal.timeout(10000),
});
if (!res.ok) throw new Error(`llm-svc providers fetch failed: ${res.status}`);
const data = (await res.json()) as { providers: ConfigModelProvider[] };
const configuredProviders = data.providers ?? [];
configuredProviders.forEach((p) => {
try {
const providerCtor = providers[p.type];
if (!providerCtor) throw new Error(`Invalid provider type: ${p.type}`);
this.activeProviders.push({
...p,
provider: createProviderInstance(providerCtor, p.id, p.name, p.config),
});
} catch (err) {
console.error(
`Failed to initialize provider from llm-svc. Type: ${p.type}, ID: ${p.id}, Error: ${err}`,
);
}
});
this.llmSvcInitialized = true;
}
private initializeActiveProviders(): void {
const configuredProviders = getConfiguredModelProviders();
configuredProviders.forEach((p) => {
try {
const provider = providers[p.type];
if (!provider) throw new Error('Invalid provider type');
this.activeProviders.push({
...p,
provider: createProviderInstance(provider, p.id, p.name, p.config),
});
} catch (err) {
console.error(
`Failed to initialize provider. Type: ${p.type}, ID: ${p.id}, Config: ${JSON.stringify(p.config)}, Error: ${err}`,
);
}
});
}
async getActiveProviders(): Promise<MinimalProvider[]> {
if (LLM_SVC_URL) await this.initFromLlmSvc();
const providers: MinimalProvider[] = [];
await Promise.all(
this.activeProviders.map(async (p) => {
let m: ModelList = { chat: [], embedding: [] };
try {
m = await p.provider.getModelList();
} catch (err: any) {
console.error(
`Failed to get model list. Type: ${p.type}, ID: ${p.id}, Error: ${err.message}`,
);
m = {
chat: [
{
key: 'error',
name: err.message,
},
],
embedding: [],
};
}
providers.push({
id: p.id,
name: p.name,
chatModels: m.chat,
embeddingModels: m.embedding,
});
}),
);
return providers;
}
async loadChatModel(providerId: string, modelName: string) {
if (LLM_SVC_URL) await this.initFromLlmSvc();
const provider = this.activeProviders.find((p) => p.id === providerId);
if (!provider) throw new Error('Invalid provider id');
const model = await provider.provider.loadChatModel(modelName);
return model;
}
async loadEmbeddingModel(providerId: string, modelName: string) {
if (LLM_SVC_URL) await this.initFromLlmSvc();
const provider = this.activeProviders.find((p) => p.id === providerId);
if (!provider) throw new Error('Invalid provider id');
const model = await provider.provider.loadEmbeddingModel(modelName);
return model;
}
async addProvider(
type: string,
name: string,
config: Record<string, any>,
): Promise<ConfigModelProvider> {
const provider = providers[type];
if (!provider) throw new Error('Invalid provider type');
const newProvider = configManager.addModelProvider(type, name, config);
const instance = createProviderInstance(
provider,
newProvider.id,
newProvider.name,
newProvider.config,
);
let m: ModelList = { chat: [], embedding: [] };
try {
m = await instance.getModelList();
} catch (err: any) {
console.error(
`Failed to get model list for newly added provider. Type: ${type}, ID: ${newProvider.id}, Error: ${err.message}`,
);
m = {
chat: [
{
key: 'error',
name: err.message,
},
],
embedding: [],
};
}
this.activeProviders.push({
...newProvider,
provider: instance,
});
return {
...newProvider,
chatModels: m.chat || [],
embeddingModels: m.embedding || [],
};
}
async removeProvider(providerId: string): Promise<void> {
configManager.removeModelProvider(providerId);
this.activeProviders = this.activeProviders.filter(
(p) => p.id !== providerId,
);
return;
}
async updateProvider(
providerId: string,
name: string,
config: any,
): Promise<ConfigModelProvider> {
const updated = await configManager.updateModelProvider(
providerId,
name,
config,
);
const instance = createProviderInstance(
providers[updated.type],
providerId,
name,
config,
);
let m: ModelList = { chat: [], embedding: [] };
try {
m = await instance.getModelList();
} catch (err: any) {
console.error(
`Failed to get model list for updated provider. Type: ${updated.type}, ID: ${updated.id}, Error: ${err.message}`,
);
m = {
chat: [
{
key: 'error',
name: err.message,
},
],
embedding: [],
};
}
this.activeProviders.push({
...updated,
provider: instance,
});
return {
...updated,
chatModels: m.chat || [],
embeddingModels: m.embedding || [],
};
}
/* Using async here because maybe in the future we might want to add some validation?? */
async addProviderModel(
providerId: string,
type: 'embedding' | 'chat',
model: any,
): Promise<any> {
const addedModel = configManager.addProviderModel(providerId, type, model);
return addedModel;
}
async removeProviderModel(
providerId: string,
type: 'embedding' | 'chat',
modelKey: string,
): Promise<void> {
configManager.removeProviderModel(providerId, type, modelKey);
return;
}
}
export default ModelRegistry;

View File

@@ -1,103 +0,0 @@
import z from 'zod';
import { Message } from '../types.js';
type Model = {
name: string;
key: string;
};
type ModelList = {
embedding: Model[];
chat: Model[];
};
type ProviderMetadata = {
name: string;
key: string;
};
type MinimalProvider = {
id: string;
name: string;
chatModels: Model[];
embeddingModels: Model[];
};
type ModelWithProvider = {
key: string;
providerId: string;
};
type GenerateOptions = {
temperature?: number;
maxTokens?: number;
topP?: number;
stopSequences?: string[];
frequencyPenalty?: number;
presencePenalty?: number;
};
type Tool = {
name: string;
description: string;
schema: z.ZodObject<any>;
};
type ToolCall = {
id: string;
name: string;
arguments: Record<string, any>;
};
type GenerateTextInput = {
messages: Message[];
tools?: Tool[];
options?: GenerateOptions;
};
type GenerateTextOutput = {
content: string;
toolCalls: ToolCall[];
additionalInfo?: Record<string, any>;
};
type StreamTextOutput = {
contentChunk: string;
toolCallChunk: ToolCall[];
additionalInfo?: Record<string, any>;
done?: boolean;
};
type GenerateObjectInput = {
schema: z.ZodTypeAny;
messages: Message[];
options?: GenerateOptions;
};
type GenerateObjectOutput<T> = {
object: T;
additionalInfo?: Record<string, any>;
};
type StreamObjectOutput<T> = {
objectChunk: Partial<T>;
additionalInfo?: Record<string, any>;
done?: boolean;
};
export type {
Model,
ModelList,
ProviderMetadata,
MinimalProvider,
ModelWithProvider,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
GenerateObjectInput,
GenerateObjectOutput,
StreamObjectOutput,
Tool,
ToolCall,
};

View File

@@ -1,52 +0,0 @@
/** Маппинг кода локали в название языка для LLM */
const LOCALE_TO_LANGUAGE: Record<string, string> = {
ru: 'Russian',
en: 'English',
de: 'German',
fr: 'French',
es: 'Spanish',
it: 'Italian',
pt: 'Portuguese',
uk: 'Ukrainian',
pl: 'Polish',
zh: 'Chinese',
ja: 'Japanese',
ko: 'Korean',
ar: 'Arabic',
tr: 'Turkish',
be: 'Belarusian',
kk: 'Kazakh',
sv: 'Swedish',
nb: 'Norwegian',
da: 'Danish',
fi: 'Finnish',
cs: 'Czech',
sk: 'Slovak',
hu: 'Hungarian',
ro: 'Romanian',
bg: 'Bulgarian',
hr: 'Croatian',
sr: 'Serbian',
el: 'Greek',
hi: 'Hindi',
th: 'Thai',
vi: 'Vietnamese',
id: 'Indonesian',
ms: 'Malay',
he: 'Hebrew',
fa: 'Persian',
};
/**
* Возвращает инструкцию для LLM о языке ответа.
* Добавлять в system prompt каждого запроса (classifier, researcher, writer).
*/
export function getLocaleInstruction(locale?: string): string {
if (!locale) return '';
const lang = locale.split('-')[0];
const languageName = LOCALE_TO_LANGUAGE[lang] ?? lang;
return `
<response_language>
User's locale is ${locale}. Always format your response in ${languageName}, regardless of the language of the query or search results. Even when the discussed content is in another language, respond in ${languageName}.
</response_language>`;
}

View File

@@ -1,356 +0,0 @@
import BaseEmbedding from '../../models/base/embedding.js';
import UploadStore from '../../uploads/store.js';
import { getLocaleInstruction } from '../locale.js';
const getSpeedPrompt = (
actionDesc: string,
i: number,
maxIteration: number,
fileDesc: string,
) => {
const today = new Date().toLocaleDateString('en-US', {
year: 'numeric',
month: 'long',
day: 'numeric',
});
return `
Assistant is an action orchestrator. Your job is to fulfill user requests by selecting and executing the available tools—no free-form replies.
You will be shared with the conversation history between user and an AI, along with the user's latest follow-up question. Based on this, you must use the available tools to fulfill the user's request.
Today's date: ${today}
You are currently on iteration ${i + 1} of your research process and have ${maxIteration} total iterations so act efficiently.
When you are finished, you must call the \`done\` tool. Never output text directly.
<goal>
Fulfill the user's request as quickly as possible using the available tools.
Call tools to gather information or perform tasks as needed.
</goal>
<core_principle>
Your knowledge is outdated; if you have web search, use it to ground answers even for seemingly basic facts.
</core_principle>
<examples>
## Example 1: Unknown Subject
User: "What is Kimi K2?"
Action: web_search ["Kimi K2", "Kimi K2 AI"] then done.
## Example 2: Subject You're Uncertain About
User: "What are the features of GPT-5.1?"
Action: web_search ["GPT-5.1", "GPT-5.1 features", "GPT-5.1 release"] then done.
## Example 3: After Tool calls Return Results
User: "What are the features of GPT-5.1?"
[Previous tool calls returned the needed info]
Action: done.
</examples>
<available_tools>
${actionDesc}
</available_tools>
<mistakes_to_avoid>
1. **Over-assuming**: Don't assume things exist or don't exist - just look them up
2. **Verification obsession**: Don't waste tool calls "verifying existence" - just search for the thing directly
3. **Endless loops**: If 2-3 tool calls don't find something, it probably doesn't exist - report that and move on
4. **Ignoring task context**: If user wants a calendar event, don't just search - create the event
5. **Overthinking**: Keep reasoning simple and tool calls focused
</mistakes_to_avoid>
<response_protocol>
- NEVER output normal text to the user. ONLY call tools.
- Choose the appropriate tools based on the action descriptions provided above.
- Default to web_search when information is missing or stale; keep queries targeted (max 3 per call).
- Call done when you have gathered enough to answer or performed the required actions.
- Do not invent tools. Do not return JSON.
</response_protocol>
${
fileDesc.length > 0
? `<user_uploaded_files>
The user has uploaded the following files which may be relevant to their request:
${fileDesc}
You can use the uploaded files search tool to look for information within these documents if needed.
</user_uploaded_files>`
: ''
}
`;
};
const getBalancedPrompt = (
actionDesc: string,
i: number,
maxIteration: number,
fileDesc: string,
) => {
const today = new Date().toLocaleDateString('en-US', {
year: 'numeric',
month: 'long',
day: 'numeric',
});
return `
Assistant is an action orchestrator. Your job is to fulfill user requests by reasoning briefly and executing the available tools—no free-form replies.
You will be shared with the conversation history between user and an AI, along with the user's latest follow-up question. Based on this, you must use the available tools to fulfill the user's request.
Today's date: ${today}
You are currently on iteration ${i + 1} of your research process and have ${maxIteration} total iterations so act efficiently.
When you are finished, you must call the \`done\` tool. Never output text directly.
<goal>
Fulfill the user's request with concise reasoning plus focused actions.
You must call the __reasoning_preamble tool before every tool call in this assistant turn. Alternate: __reasoning_preamble → tool → __reasoning_preamble → tool ... and finish with __reasoning_preamble → done. Open each __reasoning_preamble with a brief intent phrase (e.g., "Okay, the user wants to...", "Searching for...", "Looking into...") and lay out your reasoning for the next step. Keep it natural language, no tool names.
</goal>
<core_principle>
Your knowledge is outdated; if you have web search, use it to ground answers even for seemingly basic facts.
You can call at most 6 tools total per turn: up to 2 reasoning (__reasoning_preamble counts as reasoning), 2-3 information-gathering calls, and 1 done. If you hit the cap, stop after done.
Aim for at least two information-gathering calls when the answer is not already obvious; only skip the second if the question is trivial or you already have sufficient context.
Do not spam searches—pick the most targeted queries.
</core_principle>
<done_usage>
Call done only after the reasoning plus the necessary tool calls are completed and you have enough to answer. If you call done early, stop. If you reach the tool cap, call done to conclude.
</done_usage>
<examples>
## Example 1: Unknown Subject
User: "What is Kimi K2?"
Reason: "Okay, the user wants to know about Kimi K2. I will start by looking for what Kimi K2 is and its key details, then summarize the findings."
Action: web_search ["Kimi K2", "Kimi K2 AI"] then reasoning then done.
## Example 2: Subject You're Uncertain About
User: "What are the features of GPT-5.1?"
Reason: "The user is asking about GPT-5.1 features. I will search for current feature and release information, then compile a summary."
Action: web_search ["GPT-5.1", "GPT-5.1 features", "GPT-5.1 release"] then reasoning then done.
## Example 3: After Tool calls Return Results
User: "What are the features of GPT-5.1?"
[Previous tool calls returned the needed info]
Reason: "I have gathered enough information about GPT-5.1 features; I will now wrap up."
Action: done.
</examples>
<available_tools>
YOU MUST CALL __reasoning_preamble BEFORE EVERY TOOL CALL IN THIS ASSISTANT TURN. IF YOU DO NOT CALL IT, THE TOOL CALL WILL BE IGNORED.
${actionDesc}
</available_tools>
<mistakes_to_avoid>
1. **Over-assuming**: Don't assume things exist or don't exist - just look them up
2. **Verification obsession**: Don't waste tool calls "verifying existence" - just search for the thing directly
3. **Endless loops**: If 2-3 tool calls don't find something, it probably doesn't exist - report that and move on
4. **Ignoring task context**: If user wants a calendar event, don't just search - create the event
5. **Overthinking**: Keep reasoning simple and tool calls focused
6. **Skipping the reasoning step**: Always call __reasoning_preamble first to outline your approach before other actions
</mistakes_to_avoid>
<response_protocol>
- NEVER output normal text to the user. ONLY call tools.
- Start with __reasoning_preamble and call __reasoning_preamble before every tool call (including done): open with intent phrase ("Okay, the user wants to...", "Looking into...", etc.) and lay out your reasoning for the next step. No tool names.
- Choose tools based on the action descriptions provided above.
- Default to web_search when information is missing or stale; keep queries targeted (max 3 per call).
- Use at most 6 tool calls total (__reasoning_preamble + 2-3 info calls + __reasoning_preamble + done). If done is called early, stop.
- Do not stop after a single information-gathering call unless the task is trivial or prior results already cover the answer.
- Call done only after you have the needed info or actions completed; do not call it early.
- Do not invent tools. Do not return JSON.
</response_protocol>
${
fileDesc.length > 0
? `<user_uploaded_files>
The user has uploaded the following files which may be relevant to their request:
${fileDesc}
You can use the uploaded files search tool to look for information within these documents if needed.
</user_uploaded_files>`
: ''
}
`;
};
const getQualityPrompt = (
actionDesc: string,
i: number,
maxIteration: number,
fileDesc: string,
) => {
const today = new Date().toLocaleDateString('en-US', {
year: 'numeric',
month: 'long',
day: 'numeric',
});
return `
Assistant is a deep-research orchestrator. Your job is to fulfill user requests with the most thorough, comprehensive research possible—no free-form replies.
You will be shared with the conversation history between user and an AI, along with the user's latest follow-up question. Based on this, you must use the available tools to fulfill the user's request with depth and rigor.
Today's date: ${today}
You are currently on iteration ${i + 1} of your research process and have ${maxIteration} total iterations. Use every iteration wisely to gather comprehensive information.
When you are finished, you must call the \`done\` tool. Never output text directly.
<goal>
Conduct the deepest, most thorough research possible. Leave no stone unturned.
Follow an iterative reason-act loop: call __reasoning_preamble before every tool call to outline the next step, then call the tool, then __reasoning_preamble again to reflect and decide the next step. Repeat until you have exhaustive coverage.
Open each __reasoning_preamble with a brief intent phrase (e.g., "Okay, the user wants to know about...", "From the results, it looks like...", "Now I need to dig into...") and describe what you'll do next. Keep it natural language, no tool names.
Finish with done only when you have comprehensive, multi-angle information.
</goal>
<core_principle>
Your knowledge is outdated; always use the available tools to ground answers.
This is DEEP RESEARCH mode—be exhaustive. Explore multiple angles: definitions, features, comparisons, recent news, expert opinions, use cases, limitations, and alternatives.
You can call up to 10 tools total per turn. Use an iterative loop: __reasoning_preamble → tool call(s) → __reasoning_preamble → tool call(s) → ... → __reasoning_preamble → done.
Never settle for surface-level answers. If results hint at more depth, reason about your next step and follow up. Cross-reference information from multiple queries.
</core_principle>
<done_usage>
Call done only after you have gathered comprehensive, multi-angle information. Do not call done early—exhaust your research budget first. If you reach the tool cap, call done to conclude.
</done_usage>
<examples>
## Example 1: Unknown Subject - Deep Dive
User: "What is Kimi K2?"
Reason: "Okay, the user wants to know about Kimi K2. I'll start by finding out what it is and its key capabilities."
[calls info-gathering tool]
Reason: "From the results, Kimi K2 is an AI model by Moonshot. Now I need to dig into how it compares to competitors and any recent news."
[calls info-gathering tool]
Reason: "Got comparison info. Let me also check for limitations or critiques to give a balanced view."
[calls info-gathering tool]
Reason: "I now have comprehensive coverage—definition, capabilities, comparisons, and critiques. Wrapping up."
Action: done.
## Example 2: Feature Research - Comprehensive
User: "What are the features of GPT-5.1?"
Reason: "The user wants comprehensive GPT-5.1 feature information. I'll start with core features and specs."
[calls info-gathering tool]
Reason: "Got the basics. Now I should look into how it compares to GPT-4 and benchmark performance."
[calls info-gathering tool]
Reason: "Good comparison data. Let me also gather use cases and expert opinions for depth."
[calls info-gathering tool]
Reason: "I have exhaustive coverage across features, comparisons, benchmarks, and reviews. Done."
Action: done.
## Example 3: Iterative Refinement
User: "Tell me about quantum computing applications in healthcare."
Reason: "Okay, the user wants to know about quantum computing in healthcare. I'll start with an overview of current applications."
[calls info-gathering tool]
Reason: "Results mention drug discovery and diagnostics. Let me dive deeper into drug discovery use cases."
[calls info-gathering tool]
Reason: "Now I'll explore the diagnostics angle and any recent breakthroughs."
[calls info-gathering tool]
Reason: "Comprehensive coverage achieved. Wrapping up."
Action: done.
</examples>
<available_tools>
YOU MUST CALL __reasoning_preamble BEFORE EVERY TOOL CALL IN THIS ASSISTANT TURN. IF YOU DO NOT CALL IT, THE TOOL CALL WILL BE IGNORED.
${actionDesc}
</available_tools>
<research_strategy>
For any topic, consider searching:
1. **Core definition/overview** - What is it?
2. **Features/capabilities** - What can it do?
3. **Comparisons** - How does it compare to alternatives?
4. **Recent news/updates** - What's the latest?
5. **Reviews/opinions** - What do experts say?
6. **Use cases** - How is it being used?
7. **Limitations/critiques** - What are the downsides?
</research_strategy>
<mistakes_to_avoid>
1. **Shallow research**: Don't stop after one or two searches—dig deeper from multiple angles
2. **Over-assuming**: Don't assume things exist or don't exist - just look them up
3. **Missing perspectives**: Search for both positive and critical viewpoints
4. **Ignoring follow-ups**: If results hint at interesting sub-topics, explore them
5. **Premature done**: Don't call done until you've exhausted reasonable research avenues
6. **Skipping the reasoning step**: Always call __reasoning_preamble first to outline your research strategy
</mistakes_to_avoid>
<response_protocol>
- NEVER output normal text to the user. ONLY call tools.
- Follow an iterative loop: __reasoning_preamble → tool call → __reasoning_preamble → tool call → ... → __reasoning_preamble → done.
- Each __reasoning_preamble should reflect on previous results (if any) and state the next research step. No tool names in the reasoning.
- Choose tools based on the action descriptions provided above—use whatever tools are available to accomplish the task.
- Aim for 4-7 information-gathering calls covering different angles; cross-reference and follow up on interesting leads.
- Call done only after comprehensive, multi-angle research is complete.
- Do not invent tools. Do not return JSON.
</response_protocol>
${
fileDesc.length > 0
? `<user_uploaded_files>
The user has uploaded the following files which may be relevant to their request:
${fileDesc}
You can use the uploaded files search tool to look for information within these documents if needed.
</user_uploaded_files>`
: ''
}
`;
};
export const getResearcherPrompt = (
actionDesc: string,
mode: 'speed' | 'balanced' | 'quality',
i: number,
maxIteration: number,
fileIds: string[],
locale?: string,
) => {
let prompt = '';
const filesData = UploadStore.getFileData(fileIds);
const fileDesc = filesData
.map(
(f) =>
`<file><name>${f.fileName}</name><initial_content>${f.initialContent}</initial_content></file>`,
)
.join('\n');
switch (mode) {
case 'speed':
prompt = getSpeedPrompt(actionDesc, i, maxIteration, fileDesc);
break;
case 'balanced':
prompt = getBalancedPrompt(actionDesc, i, maxIteration, fileDesc);
break;
case 'quality':
prompt = getQualityPrompt(actionDesc, i, maxIteration, fileDesc);
break;
default:
prompt = getSpeedPrompt(actionDesc, i, maxIteration, fileDesc);
break;
}
return prompt + getLocaleInstruction(locale);
};

View File

@@ -1,167 +0,0 @@
import { getLocaleInstruction } from '../locale.js';
import type { AnswerMode } from '../../agents/search/types.js';
const TRAVEL_VERTICAL_BLOCK = `
### Answer Mode: Travel
You are answering in Travel vertical. Prioritize:
- Destinations, itineraries, hotels, transport, practical tips
- Travel time, best seasons, visa/border info, local customs
- Specific recommendations (restaurants, attractions, neighborhoods)
- Budget and cost estimates where relevant
Format: clear sections (Where to stay, What to see, Getting there). Include actionable advice.
`;
const FINANCE_VERTICAL_BLOCK = `
### Answer Mode: Finance
You are answering in Finance vertical. Prioritize:
- Market data, company analysis, financial metrics
- Cite sources for numbers and projections
- Consider risk, volatility, regulatory context
`;
export interface ResponsePrefs {
format?: string;
length?: string;
tone?: string;
}
export const getWriterPrompt = (
context: string,
systemInstructions: string,
mode: 'speed' | 'balanced' | 'quality',
locale?: string,
memoryContext?: string,
answerMode?: AnswerMode,
responsePrefs?: ResponsePrefs,
learningMode?: boolean,
) => {
const memoryBlock = memoryContext?.trim()
? `\n### User memory (personalization)\nUse these stored facts/preferences to personalize when relevant. Do NOT cite as source.\n${memoryContext}\n`
: '';
const verticalBlock =
answerMode === 'travel'
? TRAVEL_VERTICAL_BLOCK
: answerMode === 'finance'
? FINANCE_VERTICAL_BLOCK
: '';
const prefs: string[] = [];
if (responsePrefs?.format) {
const f = responsePrefs.format;
if (f === 'bullets') prefs.push('Format: use bullet points where appropriate.');
else if (f === 'outline') prefs.push('Format: use clear headings and outline structure.');
else prefs.push('Format: use paragraphs and flowing prose.');
}
if (responsePrefs?.length) {
const l = responsePrefs.length;
if (l === 'short') prefs.push('Length: keep response concise and brief.');
else if (l === 'long') prefs.push('Length: provide comprehensive, detailed coverage.');
else prefs.push('Length: medium depth, balanced.');
}
if (responsePrefs?.tone) {
const t = responsePrefs.tone;
if (t === 'professional') prefs.push('Tone: formal, professional.');
else if (t === 'casual') prefs.push('Tone: friendly, conversational.');
else if (t === 'concise') prefs.push('Tone: direct, to the point.');
else prefs.push('Tone: neutral.');
}
const prefsBlock = prefs.length ? `\n### Response preferences\n${prefs.join(' ')}\n` : '';
const learningBlock = learningMode
? `\n### Step-by-step Learning mode\nYou are in Learning mode. Explain your reasoning step-by-step. Break down complex concepts into manageable parts. Show the logical flow of your answer. When appropriate, use numbered steps or "First... Then... Finally" structure. Help the user understand the "why" behind the facts, not just the facts themselves.\n`
: '';
return `
You are GooSeek, an AI model skilled in web search and crafting detailed, engaging, and well-structured answers. You excel at summarizing web pages and extracting relevant information to create professional, blog-style responses.
Your task is to provide answers that are:
- **Informative and relevant**: Thoroughly address the user's query using the given context.
- **Well-structured**: Include clear headings and subheadings, and use a professional tone to present information concisely and logically.
- **Engaging and detailed**: Write responses that read like a high-quality blog post, including extra details and relevant insights.
- **Cited and credible**: Use inline citations with [number] notation to refer to the context source(s) for each fact or detail included.
- **Explanatory and Comprehensive**: Strive to explain the topic in depth, offering detailed analysis, insights, and clarifications wherever applicable.
### Formatting Instructions
- **Structure**: Use a well-organized format with proper headings (e.g., "## Example heading 1" or "## Example heading 2"). Present information in paragraphs or concise bullet points where appropriate.
- **Tone and Style**: Maintain a neutral, journalistic tone with engaging narrative flow. Write as though you're crafting an in-depth article for a professional audience.
- **Markdown Usage**: Format your response with Markdown for clarity. Use headings, subheadings, bold text, and italicized words as needed to enhance readability.
- **Length and Depth**: Provide comprehensive coverage of the topic. Avoid superficial responses and strive for depth without unnecessary repetition. Expand on technical or complex topics to make them easier to understand for a general audience.
- **No main heading/title**: Start your response directly with the introduction unless asked to provide a specific title.
- **Conclusion or Summary**: Include a concluding paragraph that synthesizes the provided information or suggests potential next steps, where appropriate.
### Citation Requirements
- Cite every single fact from **search_results** using [number] notation. Citations [1], [2], etc. refer ONLY to sources in search_results.
- **widgets_result** (calculations, weather, stock data) — use this to answer directly, do NOT cite it. This data is already shown to the user; integrate it naturally into your response.
- Integrate citations naturally at the end of sentences or clauses as appropriate. For example, "The Eiffel Tower is one of the most visited landmarks in the world[1]."
- When search_results exist: ensure every fact from search has a citation. When ONLY widgets_result has relevant data (e.g. calculation), no citations needed — just answer using the widget data.
- Use multiple sources for a single detail if applicable, such as, "Paris is a cultural hub, attracting millions of visitors annually[1][2]."
- Always prioritize credibility and accuracy by linking all statements back to their respective context sources.
- Avoid citing unsupported assumptions or personal interpretations; if no source supports a statement, clearly indicate the limitation.
### Special Instructions
- **IMPORTANT**: The context contains two sections: \`search_results\` (web search) and \`widgets_result\` (calculations, weather, stocks). If widgets_result has the answer (e.g. "The result of 2+2 is 4"), USE IT. Do not say "no relevant information" when the answer is in widgets_result.
- If the query involves technical, historical, or complex topics, provide detailed background and explanatory sections to ensure clarity.
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- Only if BOTH search_results AND widgets_result lack relevant information, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?"
${mode === 'quality' ? "- YOU ARE CURRENTLY SET IN QUALITY MODE, GENERATE VERY DEEP, DETAILED AND COMPREHENSIVE RESPONSES USING THE FULL CONTEXT PROVIDED. ASSISTANT'S RESPONSES SHALL NOT BE LESS THAN AT LEAST 2000 WORDS, COVER EVERYTHING AND FRAME IT LIKE A RESEARCH REPORT." : ''}
${verticalBlock}${prefsBlock}${learningBlock}
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
${systemInstructions}
${memoryBlock}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.
- Provide explanations or historical context as needed to enhance understanding.
- End with a conclusion or overall perspective if relevant.
<context>
${context}
</context>
Current date & time in ISO format (UTC timezone) is: ${new Date().toISOString()}.
${getLocaleInstruction(locale)}
`;
};
/**
* Model Council synthesis prompt — combines 3 model answers into one best response
* docs/architecture: 01-perplexity-analogue-design.md §5.14
*/
export const getSynthesisPrompt = (
query: string,
answer1: string,
answer2: string,
answer3: string,
locale?: string,
) => `
You are synthesizing answers from 3 different AI models (Model Council). The user asked a question and received 3 separate answers. Your job is to produce ONE final answer that:
- Combines the BEST parts of each answer (most accurate, most useful, best structured)
- Eliminates redundancy and contradictions
- Preserves all relevant citations [1], [2], etc. from the sources
- Maintains a professional, well-structured format
- Responds fully to the user's query
User query: ${query}
Answer from Model 1:
---
${answer1}
---
Answer from Model 2:
---
${answer2}
---
Answer from Model 3:
---
${answer3}
---
Produce your synthesized answer now. Use Markdown. Preserve citations. Be comprehensive.
${getLocaleInstruction(locale)}
`;

View File

@@ -1,155 +0,0 @@
import { getSearxngURL } from './config/serverRegistry.js';
const SEARCH_SVC_URL = process.env.SEARCH_SVC_URL?.trim() ?? '';
const FALLBACK_INSTANCES = (
process.env.SEARXNG_FALLBACK_URL
? process.env.SEARXNG_FALLBACK_URL.split(',').map((u) => u.trim())
: ['https://searx.tiekoetter.com', 'https://search.sapti.me']
).filter(Boolean);
interface SearxngSearchOptions {
categories?: string[];
engines?: string[];
language?: string;
pageno?: number;
}
export interface SearxngSearchResult {
title: string;
url: string;
img_src?: string;
thumbnail_src?: string;
thumbnail?: string;
content?: string;
author?: string;
iframe_src?: string;
}
function buildSearchUrl(baseUrl: string, query: string, opts?: SearxngSearchOptions): string {
const params = new URLSearchParams();
params.append('format', 'json');
params.append('q', query);
if (opts) {
Object.entries(opts).forEach(([key, value]) => {
if (value == null) return;
params.append(
key,
Array.isArray(value) ? value.join(',') : String(value),
);
});
}
const base = baseUrl.trim().replace(/\/$/, '');
const prefix = /^https?:\/\//i.test(base) ? '' : 'http://';
return `${prefix}${base}/search?${params.toString()}`;
}
export const searchSearxng = async (
query: string,
opts?: SearxngSearchOptions,
) => {
if (SEARCH_SVC_URL) {
const params = new URLSearchParams();
params.set('q', query);
if (opts?.categories) params.set('categories', Array.isArray(opts.categories) ? opts.categories.join(',') : opts.categories);
if (opts?.engines) params.set('engines', Array.isArray(opts.engines) ? opts.engines.join(',') : opts.engines);
if (opts?.language) params.set('language', opts.language);
if (opts?.pageno != null) params.set('pageno', String(opts.pageno));
const url = `${SEARCH_SVC_URL.replace(/\/$/, '')}/api/v1/search?${params.toString()}`;
const res = await fetch(url, { signal: AbortSignal.timeout(15000) });
if (!res.ok) {
const err = await res.json().catch(() => ({}));
throw new Error((err as { error?: string }).error ?? `Search service HTTP ${res.status}`);
}
return res.json() as Promise<{ results: SearxngSearchResult[]; suggestions?: string[] }>;
}
const searxngURL = getSearxngURL();
const candidates: string[] = [];
if (searxngURL?.trim()) {
let u = searxngURL.trim().replace(/\/$/, '');
if (!/^https?:\/\//i.test(u)) u = `http://${u}`;
candidates.push(u);
}
FALLBACK_INSTANCES.forEach((u) => {
const trimmed = u.trim().replace(/\/$/, '');
if (trimmed && !candidates.includes(trimmed)) candidates.push(trimmed);
});
let lastError: Error | null = null;
const FETCH_TIMEOUT_MS = 15_000;
for (const baseUrl of candidates) {
try {
const fullUrl = buildSearchUrl(baseUrl, query, opts);
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), FETCH_TIMEOUT_MS);
const res = await fetch(fullUrl, { signal: controller.signal });
clearTimeout(timeoutId);
const text = await res.text();
const ok = res.ok;
const isJson =
text.trim().startsWith('{') || text.trim().startsWith('[');
if (res.status === 429) {
const err = new Error(
`SearXNG ${baseUrl}: лимит запросов (429). Укажите свой инстанс в настройках или попробуйте позже.`,
);
err.name = 'SearxngRateLimit';
throw err;
}
if (!isJson) {
throw new Error(
`SearXNG ${baseUrl}: ответ не JSON (HTTP ${res.status}). Проверьте URL и поддержку format=json.`,
);
}
const data = JSON.parse(text);
const results: SearxngSearchResult[] = data.results ?? [];
const suggestions: string[] = data.suggestions ?? [];
if (!ok && results.length === 0) {
const errMsg = text.slice(0, 200) || `HTTP ${res.status}`;
throw new Error(`SearXNG ${baseUrl}: ${errMsg}`);
}
return { results, suggestions };
} catch (err) {
lastError =
err instanceof Error ? err : new Error(String(err));
const cause = (err as { cause?: { code?: string } })?.cause;
const isAbort = lastError.name === 'AbortError';
const isNetwork =
isAbort ||
lastError.message.includes('fetch failed') ||
lastError.message.includes('Invalid URL') ||
cause?.code === 'ECONNREFUSED' ||
cause?.code === 'ECONNRESET';
const isRateLimit =
lastError.name === 'SearxngRateLimit' ||
lastError.message.includes('429');
const hasFallback = candidates.indexOf(baseUrl) < candidates.length - 1;
if (hasFallback && (isNetwork || isRateLimit)) {
continue;
}
if (isAbort) {
throw new Error(
`SearXNG ${baseUrl}: таймаут ${FETCH_TIMEOUT_MS / 1000}с. Проверьте подключение или используйте локальный инстанс.`,
);
}
throw lastError;
}
}
throw (
lastError ??
new Error(
'SearXNG not configured. Set SEARXNG_API_URL or run Docker (includes SearXNG).',
)
);
};

View File

@@ -1,105 +0,0 @@
import { EventEmitter } from 'node:events';
import { applyPatch } from 'rfc6902';
import { Block } from './types.js';
const sessions =
(global as any)._sessionManagerSessions || new Map<string, SessionManager>();
if (process.env.NODE_ENV !== 'production') {
(global as any)._sessionManagerSessions = sessions;
}
class SessionManager {
private static sessions: Map<string, SessionManager> = sessions;
readonly id: string;
private blocks = new Map<string, Block>();
private events: { event: string; data: any }[] = [];
private emitter = new EventEmitter();
private TTL_MS = 30 * 60 * 1000;
constructor(id?: string) {
this.id = id ?? crypto.randomUUID();
setTimeout(() => {
SessionManager.sessions.delete(this.id);
}, this.TTL_MS);
}
static getSession(id: string): SessionManager | undefined {
return this.sessions.get(id);
}
static getAllSessions(): SessionManager[] {
return Array.from(this.sessions.values());
}
static createSession(): SessionManager {
const session = new SessionManager();
this.sessions.set(session.id, session);
return session;
}
removeAllListeners() {
this.emitter.removeAllListeners();
}
emit(event: string, data: any) {
this.emitter.emit(event, data);
this.events.push({ event, data });
}
emitBlock(block: Block) {
this.blocks.set(block.id, block);
this.emit('data', {
type: 'block',
block: block,
});
}
getBlock(blockId: string): Block | undefined {
return this.blocks.get(blockId);
}
updateBlock(blockId: string, patch: any[]) {
const block = this.blocks.get(blockId);
if (block) {
applyPatch(block, patch);
this.blocks.set(blockId, block);
this.emit('data', {
type: 'updateBlock',
blockId: blockId,
patch: patch,
});
}
}
getAllBlocks() {
return Array.from(this.blocks.values());
}
subscribe(listener: (event: string, data: any) => void): () => void {
const currentEventsLength = this.events.length;
const handler = (event: string) => (data: any) => listener(event, data);
const dataHandler = handler('data');
const endHandler = handler('end');
const errorHandler = handler('error');
this.emitter.on('data', dataHandler);
this.emitter.on('end', endHandler);
this.emitter.on('error', errorHandler);
for (let i = 0; i < currentEventsLength; i++) {
const { event, data } = this.events[i];
listener(event, data);
}
return () => {
this.emitter.off('data', dataHandler);
this.emitter.off('end', endHandler);
this.emitter.off('error', errorHandler);
};
}
}
export default SessionManager;

View File

@@ -1,5 +1,4 @@
import path from "path";
import BaseEmbedding from "../models/base/embedding.js"
import crypto from "crypto"
import fs from 'fs';
import { splitText } from '../utils/splitText.js';
@@ -9,8 +8,12 @@ const supportedMimeTypes = ['application/pdf', 'application/vnd.openxmlformats-o
type SupportedMimeType = typeof supportedMimeTypes[number];
export type EmbeddingLike = {
embedText(texts: string[]): Promise<number[][]>;
};
type UploadManagerParams = {
embeddingModel: BaseEmbedding<any>;
embeddingModel: EmbeddingLike;
}
type RecordedFile = {
@@ -32,7 +35,7 @@ const ROOT_DIR = process.env.DATA_DIR
: process.cwd();
class UploadManager {
private embeddingModel: BaseEmbedding<any>;
private embeddingModel: EmbeddingLike;
static uploadsDir = path.join(ROOT_DIR, 'data', 'uploads');
static uploadedFilesRecordPath = path.join(UploadManager.uploadsDir, 'uploaded_files.json');

View File

@@ -1,122 +0,0 @@
import BaseEmbedding from "../models/base/embedding.js";
import UploadManager from "./manager.js";
import computeSimilarity from '../utils/computeSimilarity.js';
import { hashObj } from '../serverUtils.js';
import type { Chunk } from '../types.js';
import fs from 'fs';
type UploadStoreParams = {
embeddingModel: BaseEmbedding<any>;
fileIds: string[];
}
type StoreRecord = {
embedding: number[];
content: string;
fileId: string;
metadata: Record<string, any>
}
class UploadStore {
embeddingModel: BaseEmbedding<any>;
fileIds: string[];
records: StoreRecord[] = [];
constructor(private params: UploadStoreParams) {
this.embeddingModel = params.embeddingModel;
this.fileIds = params.fileIds;
this.initializeStore()
}
initializeStore() {
this.fileIds.forEach((fileId) => {
const file = UploadManager.getFile(fileId)
if (!file) {
throw new Error(`File with ID ${fileId} not found`);
}
const chunks = UploadManager.getFileChunks(fileId);
this.records.push(...chunks.map((chunk) => ({
embedding: chunk.embedding,
content: chunk.content,
fileId: fileId,
metadata: {
fileName: file.name,
title: file.name,
url: `file_id://${file.id}`,
}
})))
})
}
async query(queries: string[], topK: number): Promise<Chunk[]> {
const queryEmbeddings = await this.embeddingModel.embedText(queries)
const results: { chunk: Chunk; score: number; }[][] = [];
const hashResults: string[][] = []
await Promise.all(queryEmbeddings.map(async (query) => {
const similarities = this.records.map((record, idx) => {
return {
chunk: {
content: record.content,
metadata: {
...record.metadata,
fileId: record.fileId,
}
},
score: computeSimilarity(query, record.embedding)
} as { chunk: Chunk; score: number; };
}).sort((a, b) => b.score - a.score)
results.push(similarities)
hashResults.push(similarities.map(s => hashObj(s)))
}))
const chunkMap: Map<string, Chunk> = new Map();
const scoreMap: Map<string, number> = new Map();
const k = 60;
for (let i = 0; i < results.length; i++) {
for (let j = 0; j < results[i].length; j++) {
const chunkHash = hashResults[i][j]
chunkMap.set(chunkHash, results[i][j].chunk);
scoreMap.set(chunkHash, (scoreMap.get(chunkHash) || 0) + results[i][j].score / (j + 1 + k));
}
}
const finalResults = Array.from(scoreMap.entries())
.sort((a, b) => b[1] - a[1])
.map(([chunkHash, _score]) => {
return chunkMap.get(chunkHash)!;
})
return finalResults.slice(0, topK);
}
static getFileData(fileIds: string[]): { fileName: string; initialContent: string }[] {
const filesData: { fileName: string; initialContent: string }[] = [];
fileIds.forEach((fileId) => {
const file = UploadManager.getFile(fileId)
if (!file) {
throw new Error(`File with ID ${fileId} not found`);
}
const chunks = UploadManager.getFileChunks(fileId);
filesData.push({
fileName: file.name,
initialContent: chunks.slice(0, 3).map(c => c.content).join('\n---\n'),
})
})
return filesData
}
}
export default UploadStore

View File

@@ -1,22 +0,0 @@
const computeSimilarity = (x: number[], y: number[]): number => {
if (x.length !== y.length)
throw new Error('Vectors must be of the same length');
let dotProduct = 0;
let normA = 0;
let normB = 0;
for (let i = 0; i < x.length; i++) {
dotProduct += x[i] * y[i];
normA += x[i] * x[i];
normB += y[i] * y[i];
}
if (normA === 0 || normB === 0) {
return 0;
}
return dotProduct / (Math.sqrt(normA) * Math.sqrt(normB));
};
export default computeSimilarity;

View File

@@ -1,12 +0,0 @@
import type { ChatTurnMessage } from '../types.js';
const formatChatHistoryAsString = (history: ChatTurnMessage[]) => {
return history
.map(
(message) =>
`${message.role === 'assistant' ? 'AI' : 'User'}: ${message.content}`,
)
.join('\n');
};
export default formatChatHistoryAsString;

View File

@@ -1,7 +1,7 @@
FROM node:22-alpine AS builder
WORKDIR /app
COPY package*.json ./
RUN npm ci
RUN npm install
COPY tsconfig.json ./
COPY src ./src
RUN npm run build
@@ -9,7 +9,7 @@ RUN npm run build
FROM node:22-alpine
WORKDIR /app
COPY package*.json ./
RUN npm ci --omit=dev
RUN npm install --omit=dev
COPY --from=builder /app/dist ./dist
EXPOSE 3002
CMD ["node", "dist/index.js"]

View File

@@ -0,0 +1,15 @@
FROM node:22-alpine AS builder
WORKDIR /app
COPY package*.json ./
RUN npm install
COPY tsconfig.json ./
COPY src ./src
RUN npm run build
FROM node:22-alpine
WORKDIR /app
COPY package*.json ./
RUN npm install --omit=dev
COPY --from=builder /app/dist ./dist
EXPOSE 4002
CMD ["node", "dist/index.js"]

View File

@@ -6,7 +6,8 @@
"main": "src/index.ts",
"scripts": {
"dev": "npx tsx watch src/index.ts",
"start": "npx tsx src/index.ts"
"build": "tsc",
"start": "node dist/index.js"
},
"dependencies": {
"cors": "^2.8.5",
@@ -16,6 +17,7 @@
},
"devDependencies": {
"@types/express": "^4.17.21",
"@types/geoip-lite": "^1.4.4",
"@types/ua-parser-js": "^0.7.39",
"tsx": "^4.19.0",
"typescript": "^5.9.3"

View File

@@ -5,7 +5,7 @@ import contextRouter from './routes/context.js';
const app = express();
app.use(cors({ origin: true }));
const PORT = parseInt(process.env.PORT ?? '3015', 10);
const PORT = parseInt(process.env.PORT ?? '4002', 10);
app.use(express.json({ limit: '1mb' }));

View File

@@ -1,7 +1,9 @@
# syntax=docker/dockerfile:1
FROM node:20-alpine AS builder
WORKDIR /app
COPY package*.json ./
RUN npm install
RUN --mount=type=cache,target=/root/.npm \
npm install
COPY tsconfig.json ./
COPY src ./src
RUN npm run build
@@ -9,7 +11,8 @@ RUN npm run build
FROM node:20-alpine
WORKDIR /app
COPY package*.json ./
RUN npm install --omit=dev
RUN --mount=type=cache,target=/root/.npm \
npm install --omit=dev
COPY --from=builder /app/dist ./dist
EXPOSE 3020
ENV DATA_DIR=/app/data

View File

@@ -1,5 +1,5 @@
/**
* llm-svc — LLM provider management microservice
* llm-svc — LLM provider management service (СОА)
* API: GET/POST/PATCH/DELETE /api/v1/providers, GET/POST /api/v1/providers/:id/models
* Config: data/llm-providers.json, envOnlyMode via LLM_PROVIDER=ollama|timeweb
*/
@@ -59,6 +59,22 @@ app.get('/metrics', async (_req, reply) => {
);
});
/* --- Provider UI config (for chat-svc config) --- */
app.get('/api/v1/providers/ui-config', async (_req, reply) => {
try {
const { providers } = await import('./lib/models/providers/index.js');
const sections = Object.entries(providers).map(([key, Provider]) => {
const configFields = Provider.getProviderConfigFields();
const metadata = Provider.getProviderMetadata();
return { key, name: metadata.name, fields: configFields };
});
return reply.send({ sections });
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: 'An error has occurred.' });
}
});
/* --- Providers --- */
app.get<{ Querystring: { internal?: string } }>('/api/v1/providers', async (req, reply) => {
try {
@@ -257,6 +273,156 @@ app.post<{ Params: { id: string }; Body: unknown }>(
},
);
const modelSchema = z.object({
providerId: z.string().min(1),
key: z.string().min(1),
});
const messageSchema = z.object({
role: z.enum(['user', 'assistant', 'system', 'tool']),
content: z.string(),
id: z.string().optional(),
name: z.string().optional(),
tool_calls: z.array(z.object({
id: z.string(),
name: z.string(),
arguments: z.record(z.string(), z.unknown()),
})).optional(),
});
const toolSchema = z.object({
name: z.string(),
description: z.string(),
schema: z.record(z.string(), z.unknown()),
});
const generateOptionsSchema = z.object({
temperature: z.number().optional(),
maxTokens: z.number().optional(),
topP: z.number().optional(),
stopSequences: z.array(z.string()).optional(),
frequencyPenalty: z.number().optional(),
presencePenalty: z.number().optional(),
}).optional();
const generateSchema = z.object({
model: modelSchema,
messages: z.array(messageSchema),
tools: z.array(toolSchema).optional(),
options: generateOptionsSchema,
});
const generateObjectSchema = z.object({
model: modelSchema,
messages: z.array(messageSchema),
schema: z.record(z.string(), z.unknown()),
options: generateOptionsSchema,
});
const embeddingsSchema = z.object({
model: modelSchema,
texts: z.array(z.string()),
});
/* --- Generation API --- */
app.post<{ Body: unknown }>('/api/v1/generate', async (req, reply) => {
const parsed = generateSchema.safeParse(req.body);
if (!parsed.success) {
return reply.status(400).send({ message: 'Invalid request', error: parsed.error.issues });
}
try {
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(parsed.data.model.providerId, parsed.data.model.key);
const tools = parsed.data.tools?.map((t) => ({
name: t.name,
description: t.description,
schema: z.fromJSONSchema(t.schema as Parameters<typeof z.fromJSONSchema>[0]),
}));
const result = await llm.generateText({
messages: parsed.data.messages as unknown as import('./lib/types.js').Message[],
tools,
options: parsed.data.options,
});
return reply.send(result);
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: err instanceof Error ? err.message : 'Generation failed' });
}
});
app.post<{ Body: unknown }>('/api/v1/generate/stream', async (req, reply) => {
const parsed = generateSchema.safeParse(req.body);
if (!parsed.success) {
return reply.status(400).send({ message: 'Invalid request', error: parsed.error.issues });
}
try {
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(parsed.data.model.providerId, parsed.data.model.key);
const tools = parsed.data.tools?.map((t) => ({
name: t.name,
description: t.description,
schema: z.fromJSONSchema(t.schema as Parameters<typeof z.fromJSONSchema>[0]),
}));
const stream = llm.streamText({
messages: parsed.data.messages as unknown as import('./lib/types.js').Message[],
tools,
options: parsed.data.options,
});
const encoder = new TextEncoder();
const readable = new ReadableStream({
async start(controller) {
try {
for await (const chunk of stream) {
controller.enqueue(encoder.encode(JSON.stringify(chunk) + '\n'));
}
} catch (e) {
app.log.error(e);
controller.enqueue(encoder.encode(JSON.stringify({ error: e instanceof Error ? e.message : String(e) }) + '\n'));
}
controller.close();
},
});
return reply
.header('Content-Type', 'application/x-ndjson')
.header('Cache-Control', 'no-cache')
.send(readable);
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: err instanceof Error ? err.message : 'Stream generation failed' });
}
});
app.post<{ Body: unknown }>('/api/v1/generate/object', async (req, reply) => {
const parsed = generateObjectSchema.safeParse(req.body);
if (!parsed.success) {
return reply.status(400).send({ message: 'Invalid request', error: parsed.error.issues });
}
try {
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(parsed.data.model.providerId, parsed.data.model.key);
const zodSchema = z.fromJSONSchema(parsed.data.schema as Parameters<typeof z.fromJSONSchema>[0]);
const result = await llm.generateObject({
messages: parsed.data.messages as unknown as import('./lib/types.js').Message[],
schema: zodSchema,
options: parsed.data.options,
});
return reply.send({ object: result });
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: err instanceof Error ? err.message : 'Object generation failed' });
}
});
app.post<{ Body: unknown }>('/api/v1/embeddings', async (req, reply) => {
const parsed = embeddingsSchema.safeParse(req.body);
if (!parsed.success) {
return reply.status(400).send({ message: 'Invalid request', error: parsed.error.issues });
}
try {
const registry = new ModelRegistry();
const embedding = await registry.loadEmbeddingModel(parsed.data.model.providerId, parsed.data.model.key);
const embeddings = await embedding.embedText(parsed.data.texts);
return reply.send({ embeddings });
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: err instanceof Error ? err.message : 'Embeddings failed' });
}
});
const modelDeleteSchema = z.object({
type: z.enum(['chat', 'embedding']),
key: z.string().min(1),

View File

@@ -2,7 +2,7 @@ import type { ConfigModelProvider } from '../config/types.js';
import BaseModelProvider, {
createProviderInstance,
} from './base/provider.js';
import { getConfiguredModelProviders } from '../config/serverRegistry.js';
import { getConfiguredModelProviders, isEnvOnlyMode } from '../config/serverRegistry.js';
import { providers } from './providers/index.js';
import type { MinimalProvider, ModelList } from './types.js';
import { providersConfig } from '../config/ProvidersConfig.js';
@@ -74,13 +74,25 @@ class ModelRegistry {
}
async loadChatModel(providerId: string, modelName: string) {
const provider = this.activeProviders.find((p) => p.id === providerId);
let provider = this.activeProviders.find((p) => p.id === providerId);
if (!provider && isEnvOnlyMode() && (providerId === 'env' || !providerId)) {
provider = this.activeProviders.find((p) => p.id.startsWith('env-'));
if (provider && modelName === 'default') {
modelName = provider.chatModels[0]?.key ?? modelName;
}
}
if (!provider) throw new Error('Invalid provider id');
return provider.provider.loadChatModel(modelName);
}
async loadEmbeddingModel(providerId: string, modelName: string) {
const provider = this.activeProviders.find((p) => p.id === providerId);
let provider = this.activeProviders.find((p) => p.id === providerId);
if (!provider && isEnvOnlyMode() && (providerId === 'env' || !providerId)) {
provider = this.activeProviders.find((p) => p.id.startsWith('env-'));
if (provider && modelName === 'default') {
modelName = provider.embeddingModels[0]?.key ?? modelName;
}
}
if (!provider) throw new Error('Invalid provider id');
return provider.provider.loadEmbeddingModel(modelName);
}

View File

@@ -0,0 +1,15 @@
FROM node:22-alpine AS builder
WORKDIR /app
COPY package*.json ./
RUN npm install
COPY tsconfig.json ./
COPY src ./src
RUN npm run build
FROM node:22-alpine
WORKDIR /app
COPY package*.json ./
RUN npm install --omit=dev
COPY --from=builder /app/dist ./dist
EXPOSE 4003
CMD ["node", "dist/index.js"]

View File

@@ -6,7 +6,8 @@
"main": "src/index.ts",
"scripts": {
"dev": "npx tsx watch src/index.ts",
"start": "npx tsx src/index.ts"
"build": "tsc",
"start": "node dist/index.js"
},
"dependencies": {
"cors": "^2.8.5",

View File

@@ -6,7 +6,7 @@ import translationsRouter from './routes/translations.js';
const app = express();
app.use(cors({ origin: true }));
const PORT = parseInt(process.env.PORT ?? '3016', 10);
const PORT = parseInt(process.env.PORT ?? '4003', 10);
app.use(express.json({ limit: '1mb' }));

View File

@@ -1,7 +1,7 @@
import type { GeoDeviceContext } from '../types.js';
const GEO_DEVICE_URL =
process.env.GEO_DEVICE_SVC_URL ?? process.env.GEO_DEVICE_SERVICE_URL ?? 'http://localhost:3015';
process.env.GEO_DEVICE_SVC_URL ?? process.env.GEO_DEVICE_SERVICE_URL ?? 'http://localhost:4002';
export async function fetchGeoContext(
headers: Record<string, string>,

View File

@@ -0,0 +1,15 @@
FROM node:22-alpine AS builder
WORKDIR /app
COPY package*.json ./
RUN npm install
COPY tsconfig.json ./
COPY src ./src
RUN npm run build
FROM node:22-alpine
WORKDIR /app
COPY package*.json ./
RUN npm install --omit=dev
COPY --from=builder /app/dist ./dist
EXPOSE 3018
CMD ["node", "dist/index.js"]

View File

@@ -17,10 +17,14 @@
"ollama": "^0.6.3",
"openai": "^6.9.0",
"partial-json": "^0.1.7",
"rfc6902": "^5.1.2",
"turndown": "^7.2.2",
"yahoo-finance2": "^3.13.0",
"zod": "^4.1.12"
},
"devDependencies": {
"@types/node": "^24.8.1",
"@types/turndown": "^5.0.6",
"tsx": "^4.19.2",
"typescript": "^5.9.3"
}

View File

@@ -1,6 +1,6 @@
/**
* master-agents-svc — Master Agent с динамическими под-агентами и инструментами
* API: POST /api/v1/agents/execute
* master-agents-svc — Master Agent + Search Orchestrator (Perplexity-style)
* API: POST /api/v1/agents/execute, POST /api/v1/agents/search (NDJSON stream)
*/
import Fastify from 'fastify';
@@ -8,8 +8,13 @@ import cors from '@fastify/cors';
import { z } from 'zod';
import { loadChatModel } from './lib/models/registry.js';
import { runMasterAgent } from './lib/agent/master.js';
import { createLlmClient } from './lib/llm-client.js';
import SessionManager from './lib/session.js';
import { runSearchOrchestrator } from './lib/agent/searchOrchestrator.js';
const PORT = parseInt(process.env.PORT ?? '3018', 10);
const LLM_SVC_URL = process.env.LLM_SVC_URL ?? '';
const MEMORY_SVC_URL = process.env.MEMORY_SVC_URL ?? '';
const chatModelSchema = z.object({
providerId: z.string(),
@@ -23,6 +28,30 @@ const bodySchema = z.object({
maxSteps: z.number().min(1).max(25).optional().default(15),
});
const answerModeEnum = z.enum([
'standard', 'focus', 'academic', 'writing', 'travel', 'finance',
'health', 'education', 'medicine', 'realEstate', 'psychology', 'sports',
'children', 'goods', 'shopping', 'games', 'taxes', 'legislation',
]);
const searchBodySchema = z.object({
message: z.object({
messageId: z.string().min(1),
chatId: z.string().min(1),
content: z.string().min(1),
}),
optimizationMode: z.enum(['speed', 'balanced', 'quality']),
sources: z.array(z.string()).optional().default([]),
history: z.array(z.tuple([z.string(), z.string()])).optional().default([]),
files: z.array(z.string()).optional().default([]),
chatModel: chatModelSchema,
systemInstructions: z.string().nullable().optional().default(''),
locale: z.string().optional(),
answerMode: answerModeEnum.optional().default('standard'),
responsePrefs: z.object({ format: z.string().optional(), length: z.string().optional(), tone: z.string().optional() }).optional(),
learningMode: z.boolean().optional().default(false),
});
const app = Fastify({ logger: true });
await app.register(cors, { origin: true });
@@ -62,6 +91,105 @@ app.post('/api/v1/agents/execute', async (req, reply) => {
}
});
app.post<{ Body: unknown }>('/api/v1/agents/search', async (req, reply) => {
const parsed = searchBodySchema.safeParse(req.body);
if (!parsed.success) {
return reply.status(400).send({ message: 'Invalid request body', error: parsed.error.issues });
}
const body = parsed.data;
if (!LLM_SVC_URL) {
return reply.status(503).send({ message: 'LLM_SVC_URL not configured. llm-svc required.' });
}
if (body.message.content === '') {
return reply.status(400).send({ message: 'Please provide a message to process' });
}
let memoryContext: string | undefined;
const authHeader = req.headers.authorization;
const useMemory = MEMORY_SVC_URL && authHeader && (body.optimizationMode === 'balanced' || body.optimizationMode === 'quality');
if (useMemory) {
try {
const memRes = await fetch(`${MEMORY_SVC_URL.replace(/\/$/, '')}/api/v1/memory`, {
headers: { Authorization: authHeader! },
signal: AbortSignal.timeout(3000),
});
if (memRes.ok) {
const memData = (await memRes.json()) as { items?: { key: string; value: string }[] };
const items = memData.items ?? [];
if (items.length > 0) {
memoryContext = items.map((r) => `- ${r.key}: ${r.value}`).join('\n');
}
}
} catch (err) {
req.log.warn({ err }, 'Memory fetch failed');
}
}
try {
const llm = createLlmClient({ providerId: body.chatModel.providerId, key: body.chatModel.key });
const history = body.history.map((msg) =>
msg[0] === 'human' ? { role: 'user' as const, content: msg[1] } : { role: 'assistant' as const, content: msg[1] });
const session = SessionManager.createSession();
const encoder = new TextEncoder();
const stream = new ReadableStream({
start(controller) {
const disconnect = session.subscribe((event: string, data: unknown) => {
const d = data as { type?: string; block?: unknown; blockId?: string; patch?: unknown; data?: unknown };
if (event === 'data') {
if (d.type === 'block') controller.enqueue(encoder.encode(JSON.stringify({ type: 'block', block: d.block }) + '\n'));
else if (d.type === 'updateBlock') controller.enqueue(encoder.encode(JSON.stringify({ type: 'updateBlock', blockId: d.blockId, patch: d.patch }) + '\n'));
else if (d.type === 'researchComplete') controller.enqueue(encoder.encode(JSON.stringify({ type: 'researchComplete' }) + '\n'));
} else if (event === 'end') {
controller.enqueue(encoder.encode(JSON.stringify({ type: 'messageEnd' }) + '\n'));
controller.close();
session.removeAllListeners();
} else if (event === 'error') {
controller.enqueue(encoder.encode(JSON.stringify({ type: 'error', data: d.data }) + '\n'));
controller.close();
session.removeAllListeners();
}
});
runSearchOrchestrator(session, {
chatHistory: history,
followUp: body.message.content,
config: {
llm,
mode: body.optimizationMode,
sources: (body.sources as ('web' | 'discussions' | 'academic')[]) ?? [],
fileIds: body.files,
systemInstructions: body.systemInstructions || 'None',
locale: body.locale ?? 'en',
memoryContext,
answerMode: body.answerMode,
responsePrefs: body.responsePrefs,
learningMode: body.learningMode,
},
}).catch((err: Error) => {
req.log.error(err);
session.emit('error', { data: err?.message ?? 'Error during search.' });
});
req.raw.on?.('abort', () => {
disconnect();
try {
controller.close();
} catch {}
});
},
});
return reply
.header('Content-Type', 'application/x-ndjson')
.header('Cache-Control', 'no-cache')
.send(stream);
} catch (err) {
req.log.error(err);
return reply.status(500).send({ message: 'An error occurred while processing search request' });
}
});
try {
await app.listen({ port: PORT, host: '0.0.0.0' });
console.log(`master-agents-svc listening on :${PORT}`);

View File

@@ -0,0 +1,28 @@
import z from 'zod';
import type { ResearchAction } from './types.js';
const schema = z.object({
plan: z.string().describe('A concise natural-language plan in one short paragraph. Open with a short intent phrase and lay out the steps you will take.'),
});
const actionDescription = `
Use this tool FIRST on every turn to state your plan in natural language before any other action. Keep it short, action-focused, and tailored to the current query.
Make sure to not include reference to any tools or actions you might take, just the plan itself.
YOU CAN NEVER CALL ANY OTHER TOOL BEFORE CALLING THIS ONE FIRST, IF YOU DO, THAT CALL WOULD BE IGNORED.
`;
const planAction: ResearchAction<typeof schema> = {
name: '__reasoning_preamble',
schema,
getToolDescription: () =>
'Use this FIRST on every turn to state your plan in natural language before any other action. Keep it short, action-focused, and tailored to the current query.',
getDescription: () => actionDescription,
enabled: (config) => config.mode !== 'speed',
execute: async (input) => ({
type: 'reasoning',
reasoning: input.plan,
}),
};
export default planAction;

View File

@@ -0,0 +1,77 @@
import z from 'zod';
import type { ResearchAction } from './types.js';
import type { Chunk, SearchResultsResearchBlock } from '../types.js';
import { searchSearxng } from '../searxng.js';
const schema = z.object({
queries: z.array(z.string()).describe('List of academic search queries'),
});
const academicSearchAction: ResearchAction<typeof schema> = {
name: 'academic_search',
schema,
getToolDescription: () =>
'Use this tool to perform academic searches for scholarly articles, papers, and research studies. Provide up to 3 queries at a time.',
getDescription: () =>
'Use this tool to perform academic searches for scholarly articles and research studies. Provide concise search queries. You can provide up to 3 queries at a time.',
enabled: (config) =>
config.sources.includes('academic') &&
config.classification.classification.skipSearch === false &&
config.classification.classification.academicSearch === true,
execute: async (input, additionalConfig) => {
input.queries = input.queries.slice(0, 3);
const researchBlock = additionalConfig.session.getBlock(additionalConfig.researchBlockId);
if (researchBlock && researchBlock.type === 'research') {
researchBlock.data.subSteps.push({
id: crypto.randomUUID(),
type: 'searching',
searching: input.queries,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
const searchResultsBlockId = crypto.randomUUID();
let searchResultsEmitted = false;
const results: Chunk[] = [];
const search = async (q: string) => {
const res = await searchSearxng(q, { engines: ['arxiv', 'google scholar', 'pubmed'] });
const resultChunks: Chunk[] = res.results.map((r) => ({
content: r.content || r.title,
metadata: { title: r.title, url: r.url },
}));
results.push(...resultChunks);
if (!searchResultsEmitted && researchBlock && researchBlock.type === 'research') {
searchResultsEmitted = true;
researchBlock.data.subSteps.push({
id: searchResultsBlockId,
type: 'search_results',
reading: resultChunks,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
} else if (searchResultsEmitted && researchBlock && researchBlock.type === 'research') {
const subStepIndex = researchBlock.data.subSteps.findIndex((s) => s.id === searchResultsBlockId);
const subStep = researchBlock.data.subSteps[subStepIndex] as SearchResultsResearchBlock | undefined;
if (subStep) {
subStep.reading.push(...resultChunks);
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
}
};
await Promise.all(input.queries.map(search));
return { type: 'search_results', results };
},
};
export default academicSearchAction;

View File

@@ -0,0 +1,16 @@
import z from 'zod';
import type { ResearchAction } from './types.js';
const emptySchema = z.object({});
const doneAction: ResearchAction<typeof emptySchema> = {
name: 'done',
schema: emptySchema,
getToolDescription: () =>
'Only call this after __reasoning_preamble AND after any other needed tool calls when you truly have enough to answer. Do not call if information is still missing.',
getDescription: () =>
'Use this action ONLY when you have completed all necessary research and are ready to provide a final answer. YOU MUST CALL THIS ACTION TO SIGNAL COMPLETION; DO NOT OUTPUT FINAL ANSWERS DIRECTLY TO THE USER.',
enabled: () => true,
execute: async () => ({ type: 'done' }),
};
export default doneAction;

View File

@@ -0,0 +1,57 @@
import type { ResearchAction, ActionConfig, AdditionalConfig, ToolCall } from './types.js';
import __reasoning_preamble from './__reasoning_preamble.js';
import done from './done.js';
import webSearch from './web_search.js';
import academicSearch from './academic_search.js';
import socialSearch from './social_search.js';
import scrapeUrl from './scrape_url.js';
const actions = new Map<string, ResearchAction>();
function register(action: ResearchAction) {
actions.set(action.name, action);
}
register(__reasoning_preamble);
register(done);
register(webSearch);
register(academicSearch);
register(socialSearch);
register(scrapeUrl);
export function getAction(name: string): ResearchAction | undefined {
return actions.get(name);
}
export function getAvailableActions(config: ActionConfig): ResearchAction[] {
return Array.from(actions.values()).filter((a) => a.enabled(config));
}
export function getAvailableActionTools(config: ActionConfig): { name: string; description: string; schema: unknown }[] {
return getAvailableActions(config).map((a) => ({
name: a.name,
description: a.getToolDescription({ mode: config.mode }),
schema: a.schema,
}));
}
export function getAvailableActionsDescriptions(config: ActionConfig): string {
return getAvailableActions(config)
.map((a) => `<tool name="${a.name}">\n${a.getDescription({ mode: config.mode })}\n</tool>`)
.join('\n\n');
}
/** Параллельное выполнение tool calls */
export async function executeAll(
toolCalls: ToolCall[],
additionalConfig: AdditionalConfig,
): Promise<import('./types.js').ActionOutput[]> {
const results = await Promise.all(
toolCalls.map(async (tc) => {
const action = getAction(tc.name);
if (!action) throw new Error(`Action ${tc.name} not found`);
return action.execute(tc.arguments as never, additionalConfig);
}),
);
return results;
}

View File

@@ -0,0 +1,71 @@
import z from 'zod';
import TurndownService from 'turndown';
import type { ResearchAction } from './types.js';
import type { Chunk, ReadingResearchBlock } from '../types.js';
const turndownService = new TurndownService();
const schema = z.object({
urls: z.array(z.string()).describe('A list of URLs to scrape content from.'),
});
const scrapeURLAction: ResearchAction<typeof schema> = {
name: 'scrape_url',
schema,
getToolDescription: () =>
'Use this tool to scrape and extract content from the provided URLs. You can provide up to 3 URLs at a time. NEVER CALL THIS TOOL EXPLICITLY YOURSELF UNLESS INSTRUCTED TO DO SO BY THE USER.',
getDescription: () =>
'Use this tool to scrape content from specific web pages. Only call when the user has specifically requested information from certain URLs. Never call yourself to get extra information without user instruction.',
enabled: () => true,
execute: async (params, additionalConfig) => {
params.urls = params.urls.slice(0, 3);
const researchBlock = additionalConfig.session.getBlock(additionalConfig.researchBlockId);
let readingBlockId = crypto.randomUUID();
let readingEmitted = false;
const results: Chunk[] = [];
await Promise.all(
params.urls.map(async (url) => {
try {
const res = await fetch(url, { signal: AbortSignal.timeout(10000) });
const text = await res.text();
const title = text.match(/<title>(.*?)<\/title>/i)?.[1] || `Content from ${url}`;
if (!readingEmitted && researchBlock && researchBlock.type === 'research') {
readingEmitted = true;
researchBlock.data.subSteps.push({
id: readingBlockId,
type: 'reading',
reading: [{ content: '', metadata: { url, title } }],
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
} else if (readingEmitted && researchBlock && researchBlock.type === 'research') {
const subStepIndex = researchBlock.data.subSteps.findIndex((s) => s.id === readingBlockId);
const subStep = researchBlock.data.subSteps[subStepIndex] as ReadingResearchBlock | undefined;
if (subStep) {
subStep.reading.push({ content: '', metadata: { url, title } });
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
}
const markdown = turndownService.turndown(text);
results.push({ content: markdown, metadata: { url, title } });
} catch (error) {
results.push({
content: `Failed to fetch content from ${url}: ${error}`,
metadata: { url, title: `Error fetching ${url}` },
});
}
}),
);
return { type: 'search_results', results };
},
};
export default scrapeURLAction;

View File

@@ -0,0 +1,77 @@
import z from 'zod';
import type { ResearchAction } from './types.js';
import type { Chunk, SearchResultsResearchBlock } from '../types.js';
import { searchSearxng } from '../searxng.js';
const schema = z.object({
queries: z.array(z.string()).describe('List of social search queries'),
});
const socialSearchAction: ResearchAction<typeof schema> = {
name: 'social_search',
schema,
getToolDescription: () =>
'Use this tool to perform social media searches for relevant posts, discussions, and trends. Provide up to 3 queries at a time.',
getDescription: () =>
'Use this tool to perform social media searches for posts, discussions, and trends. Provide concise search queries. You can provide up to 3 queries at a time.',
enabled: (config) =>
config.sources.includes('discussions') &&
config.classification.classification.skipSearch === false &&
config.classification.classification.discussionSearch === true,
execute: async (input, additionalConfig) => {
input.queries = input.queries.slice(0, 3);
const researchBlock = additionalConfig.session.getBlock(additionalConfig.researchBlockId);
if (researchBlock && researchBlock.type === 'research') {
researchBlock.data.subSteps.push({
id: crypto.randomUUID(),
type: 'searching',
searching: input.queries,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
const searchResultsBlockId = crypto.randomUUID();
let searchResultsEmitted = false;
const results: Chunk[] = [];
const search = async (q: string) => {
const res = await searchSearxng(q, { engines: ['reddit'] });
const resultChunks: Chunk[] = res.results.map((r) => ({
content: r.content || r.title,
metadata: { title: r.title, url: r.url },
}));
results.push(...resultChunks);
if (!searchResultsEmitted && researchBlock && researchBlock.type === 'research') {
searchResultsEmitted = true;
researchBlock.data.subSteps.push({
id: searchResultsBlockId,
type: 'search_results',
reading: resultChunks,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
} else if (searchResultsEmitted && researchBlock && researchBlock.type === 'research') {
const subStepIndex = researchBlock.data.subSteps.findIndex((s) => s.id === searchResultsBlockId);
const subStep = researchBlock.data.subSteps[subStepIndex] as SearchResultsResearchBlock | undefined;
if (subStep) {
subStep.reading.push(...resultChunks);
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
}
};
await Promise.all(input.queries.map(search));
return { type: 'search_results', results };
},
};
export default socialSearchAction;

View File

@@ -0,0 +1,52 @@
import z from 'zod';
import type SessionManager from '../session.js';
import type { Chunk } from '../types.js';
export type ClassifierOutput = {
classification: {
skipSearch: boolean;
personalSearch: boolean;
academicSearch: boolean;
discussionSearch: boolean;
showWeatherWidget: boolean;
showStockWidget: boolean;
showCalculationWidget: boolean;
};
standaloneFollowUp: string;
};
export type SearchSources = 'web' | 'discussions' | 'academic';
export type SearchMode = 'speed' | 'balanced' | 'quality';
export type ActionConfig = {
classification: ClassifierOutput;
fileIds: string[];
mode: SearchMode;
sources: SearchSources[];
hasEmbedding?: boolean;
};
export type AdditionalConfig = {
session: SessionManager;
researchBlockId: string;
fileIds: string[];
};
export type SearchActionOutput = { type: 'search_results'; results: Chunk[] };
export type DoneActionOutput = { type: 'done' };
export type ReasoningActionOutput = { type: 'reasoning'; reasoning: string };
export type ActionOutput = SearchActionOutput | DoneActionOutput | ReasoningActionOutput;
export type ToolCall = { id: string; name: string; arguments: Record<string, unknown> };
export interface ResearchAction<TSchema extends z.ZodObject<Record<string, z.ZodTypeAny>> = z.ZodObject<Record<string, z.ZodTypeAny>>> {
name: string;
schema: TSchema;
getToolDescription: (config: { mode: SearchMode }) => string;
getDescription: (config: { mode: SearchMode }) => string;
enabled: (config: ActionConfig) => boolean;
execute: (
params: z.infer<TSchema>,
additionalConfig: AdditionalConfig,
) => Promise<ActionOutput>;
}

View File

@@ -0,0 +1,80 @@
import z from 'zod';
import type { ResearchAction } from './types.js';
import type { Chunk, SearchResultsResearchBlock } from '../types.js';
import { searchSearxng } from '../searxng.js';
const schema = z.object({
queries: z.array(z.string()).describe('An array of search queries to perform web searches for.'),
});
const webSearchAction: ResearchAction<typeof schema> = {
name: 'web_search',
schema,
getToolDescription: () =>
'Use this tool to perform web searches based on the provided queries. You can provide up to 3 queries at a time.',
getDescription: () =>
'Use this tool to perform web searches. Your queries should be targeted and specific, SEO-friendly keywords. You can search for 3 queries in one go.',
enabled: (config) =>
config.sources.includes('web') && config.classification.classification.skipSearch === false,
execute: async (input, additionalConfig) => {
input.queries = input.queries.slice(0, 3);
const researchBlock = additionalConfig.session.getBlock(additionalConfig.researchBlockId);
if (researchBlock && researchBlock.type === 'research') {
researchBlock.data.subSteps.push({
id: crypto.randomUUID(),
type: 'searching',
searching: input.queries,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
const searchResultsBlockId = crypto.randomUUID();
let searchResultsEmitted = false;
const results: Chunk[] = [];
const search = async (q: string) => {
let res: { results: { content?: string; title: string; url: string }[] };
try {
res = await searchSearxng(q);
} catch {
return;
}
const resultChunks: Chunk[] = res.results.map((r) => ({
content: r.content || r.title,
metadata: { title: r.title, url: r.url },
}));
results.push(...resultChunks);
if (!searchResultsEmitted && researchBlock && researchBlock.type === 'research') {
searchResultsEmitted = true;
researchBlock.data.subSteps.push({
id: searchResultsBlockId,
type: 'search_results',
reading: resultChunks,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
} else if (searchResultsEmitted && researchBlock && researchBlock.type === 'research') {
const subStepIndex = researchBlock.data.subSteps.findIndex((s) => s.id === searchResultsBlockId);
const subStep = researchBlock.data.subSteps[subStepIndex] as SearchResultsResearchBlock | undefined;
if (subStep) {
subStep.reading.push(...resultChunks);
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
}
};
await Promise.all(input.queries.map(search));
return { type: 'search_results', results };
},
};
export default webSearchAction;

View File

@@ -0,0 +1,39 @@
import z from 'zod';
import type { LlmClient } from '../llm-client.js';
import { getClassifierPrompt } from '../prompts/classifier.js';
import formatChatHistoryAsString from '../utils/formatHistory.js';
const schema = z.object({
classification: z.object({
skipSearch: z.boolean(),
personalSearch: z.boolean(),
academicSearch: z.boolean(),
discussionSearch: z.boolean(),
showWeatherWidget: z.boolean(),
showStockWidget: z.boolean(),
showCalculationWidget: z.boolean(),
}),
standaloneFollowUp: z.string(),
});
export type ClassifierInput = {
chatHistory: { role: string; content: string }[];
query: string;
llm: LlmClient;
locale?: string;
enabledSources: ('web' | 'discussions' | 'academic')[];
};
export async function classify(input: ClassifierInput): Promise<z.infer<typeof schema>> {
const output = await input.llm.generateObject<z.infer<typeof schema>>({
messages: [
{ role: 'system', content: getClassifierPrompt(input.locale) },
{
role: 'user',
content: `<conversation_history>\n${formatChatHistoryAsString(input.chatHistory)}\n</conversation_history>\n<user_query>\n${input.query}\n</user_query>`,
},
],
schema,
});
return output;
}

View File

@@ -0,0 +1,195 @@
import z from 'zod';
import type { LlmClient } from '../llm-client.js';
import type SessionManager from '../session.js';
import type { Chunk, ReasoningResearchBlock } from '../types.js';
import type { ClassifierOutput } from '../actions/types.js';
import { getResearcherPrompt } from '../prompts/researcher.js';
import { getAvailableActionTools, getAvailableActionsDescriptions, executeAll } from '../actions/registry.js';
import formatChatHistoryAsString from '../utils/formatHistory.js';
export type ResearcherConfig = {
mode: 'speed' | 'balanced' | 'quality';
sources: ('web' | 'discussions' | 'academic')[];
fileIds: string[];
locale?: string;
};
export type ResearcherInput = {
chatHistory: { role: string; content: string }[];
followUp: string;
classification: ClassifierOutput;
config: ResearcherConfig;
};
export type ResearcherOutput = {
searchFindings: Chunk[];
};
export type ToolCall = { id: string; name: string; arguments: Record<string, unknown> };
export async function research(
session: SessionManager,
llm: LlmClient,
input: ResearcherInput,
): Promise<ResearcherOutput> {
const maxIteration = input.config.mode === 'speed' ? 2 : input.config.mode === 'balanced' ? 6 : 25;
const actionConfig = {
classification: input.classification,
fileIds: input.config.fileIds,
mode: input.config.mode,
sources: input.config.sources,
hasEmbedding: false,
};
const availableTools = getAvailableActionTools(actionConfig);
const availableActionsDescription = getAvailableActionsDescriptions(actionConfig);
const researchBlockId = crypto.randomUUID();
session.emitBlock({
id: researchBlockId,
type: 'research',
data: { subSteps: [] },
});
const agentMessageHistory: { role: string; content: string; tool_calls?: ToolCall[] }[] = [
{
role: 'user',
content: `<conversation>\n${formatChatHistoryAsString(input.chatHistory.slice(-10))}\nUser: ${input.followUp} (Standalone: ${input.classification.standaloneFollowUp})\n</conversation>`,
},
];
const actionOutput: { type: string; results?: Chunk[] }[] = [];
for (let i = 0; i < maxIteration; i++) {
const researcherPrompt = getResearcherPrompt(
availableActionsDescription,
input.config.mode,
i,
maxIteration,
'',
input.config.locale,
);
const toolsForLlm = availableTools.map((t) => ({
name: t.name,
description: t.description,
schema: t.schema as z.ZodObject<Record<string, z.ZodTypeAny>>,
}));
const actionStream = llm.streamText({
messages: [{ role: 'system', content: researcherPrompt }, ...agentMessageHistory],
tools: toolsForLlm,
});
const block = session.getBlock(researchBlockId);
let reasoningEmitted = false;
let reasoningId = crypto.randomUUID();
const finalToolCalls: ToolCall[] = [];
for await (const partialRes of actionStream) {
if (partialRes.toolCallChunk?.length) {
for (const tc of partialRes.toolCallChunk) {
if (
tc.name === '__reasoning_preamble' &&
tc.arguments?.plan &&
!reasoningEmitted &&
block &&
block.type === 'research'
) {
reasoningEmitted = true;
(block.data.subSteps as ResearchBlockSubStep[]).push({
id: reasoningId,
type: 'reasoning',
reasoning: String(tc.arguments.plan),
});
session.updateBlock(researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: block.data.subSteps },
]);
} else if (
tc.name === '__reasoning_preamble' &&
tc.arguments?.plan &&
reasoningEmitted &&
block &&
block.type === 'research'
) {
const subStepIndex = block.data.subSteps.findIndex((s) => s.id === reasoningId);
if (subStepIndex !== -1) {
const subStep = block.data.subSteps[subStepIndex] as ReasoningResearchBlock;
subStep.reasoning = String(tc.arguments.plan);
session.updateBlock(researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: block.data.subSteps },
]);
}
}
const existingIndex = finalToolCalls.findIndex((ftc) => ftc.id === tc.id);
if (existingIndex !== -1) {
finalToolCalls[existingIndex].arguments = { ...finalToolCalls[existingIndex].arguments, ...tc.arguments };
} else {
finalToolCalls.push({ id: tc.id, name: tc.name, arguments: tc.arguments ?? {} });
}
}
}
}
if (finalToolCalls.length === 0) break;
if (finalToolCalls[finalToolCalls.length - 1].name === 'done') break;
agentMessageHistory.push({
role: 'assistant',
content: '',
tool_calls: finalToolCalls,
});
const results = await executeAll(finalToolCalls, {
session,
researchBlockId,
fileIds: input.config.fileIds,
});
actionOutput.push(...results);
for (let j = 0; j < finalToolCalls.length; j++) {
agentMessageHistory.push({
role: 'tool',
content: JSON.stringify(results[j]),
id: finalToolCalls[j].id,
name: finalToolCalls[j].name,
} as { role: string; content: string; id: string; name: string });
}
}
const searchResults = actionOutput
.filter((a) => a.type === 'search_results' && a.results)
.flatMap((a) => a.results as Chunk[]);
const seenUrls = new Map<string, number>();
const filteredSearchResults = searchResults
.map((result, index) => {
const url = result.metadata?.url as string | undefined;
if (url && !seenUrls.has(url)) {
seenUrls.set(url, index);
return result;
}
if (url && seenUrls.has(url)) {
const existingIndex = seenUrls.get(url)!;
const existing = searchResults[existingIndex];
existing.content += `\n\n${result.content}`;
return undefined;
}
return result;
})
.filter((r): r is Chunk => r !== undefined);
session.emitBlock({
id: crypto.randomUUID(),
type: 'source',
data: filteredSearchResults,
});
return { searchFindings: filteredSearchResults };
}
type ResearchBlockSubStep = { id: string; type: string; reasoning?: string; reading?: Chunk[]; searching?: string[] };

View File

@@ -0,0 +1,156 @@
import type { LlmClient } from '../llm-client.js';
import SessionManager from '../session.js';
import type { TextBlock } from '../types.js';
import type { ClassifierOutput } from '../actions/types.js';
import { getClassifierPrompt } from '../prompts/classifier.js';
import { getWriterPrompt } from '../prompts/writer.js';
import { classify } from './classifier.js';
import { research } from './researcher.js';
import { executeAllWidgets } from '../widgets/index.js';
export type SearchOrchestratorConfig = {
llm: LlmClient;
mode: 'speed' | 'balanced' | 'quality';
sources: ('web' | 'discussions' | 'academic')[];
fileIds: string[];
systemInstructions: string;
locale?: string;
memoryContext?: string;
answerMode?: import('../prompts/writer.js').AnswerMode;
responsePrefs?: { format?: string; length?: string; tone?: string };
learningMode?: boolean;
};
export type SearchOrchestratorInput = {
chatHistory: { role: string; content: string }[];
followUp: string;
config: SearchOrchestratorConfig;
};
export async function runSearchOrchestrator(
session: SessionManager,
input: SearchOrchestratorInput,
): Promise<void> {
const { chatHistory, followUp, config } = input;
const classification = await classify({
chatHistory,
query: followUp,
llm: config.llm,
locale: config.locale,
enabledSources: config.sources,
});
const widgetPromise = executeAllWidgets({
chatHistory,
followUp,
classification,
llm: config.llm,
}).then((outputs) => {
for (const o of outputs) {
session.emitBlock({
id: crypto.randomUUID(),
type: 'widget',
data: { widgetType: o.type, params: o.data ?? {} },
});
}
return outputs;
});
let searchPromise: Promise<{ searchFindings: import('../types.js').Chunk[] }> | null = null;
if (!classification.classification.skipSearch) {
searchPromise = research(session, config.llm, {
chatHistory,
followUp,
classification,
config: {
mode: config.mode,
sources: config.sources,
fileIds: config.fileIds,
locale: config.locale,
},
});
}
const [widgetOutputs, searchResults] = await Promise.all([widgetPromise, searchPromise ?? Promise.resolve({ searchFindings: [] })]);
session.emit('data', { type: 'researchComplete' });
const MAX_RESULTS_FOR_WRITER = 15;
const MAX_CONTENT_PER_RESULT = 180;
const findingsForWriter = (searchResults?.searchFindings ?? []).slice(0, MAX_RESULTS_FOR_WRITER);
const finalContext =
findingsForWriter
.map((f, index) => {
const content = f.content.length > MAX_CONTENT_PER_RESULT ? f.content.slice(0, MAX_CONTENT_PER_RESULT) + '…' : f.content;
return `<result index=${index + 1} title="${String(f.metadata?.title ?? '').replace(/"/g, "'")}">${content}</result>`;
})
.join('\n') || '';
const widgetContext = widgetOutputs
.map((o) => `<result>${o.llmContext}</result>`)
.join('\n-------------\n');
const finalContextWithWidgets =
`<search_results note="These are the search results and assistant can cite these">\n${finalContext}\n</search_results>\n` +
`<widgets_result noteForAssistant="Its output is already showed to the user, assistant can use this information to answer the query but do not CITE this as a source">\n${widgetContext}\n</widgets_result>`;
const writerPrompt = getWriterPrompt(
finalContextWithWidgets,
config.systemInstructions,
config.mode,
config.locale,
config.memoryContext,
config.answerMode,
config.responsePrefs,
config.learningMode,
);
const answerStream = config.llm.streamText({
messages: [
{ role: 'system', content: writerPrompt },
...chatHistory,
{ role: 'user', content: followUp },
],
options: { maxTokens: 4096 },
});
let responseBlockId = '';
let hasContent = false;
for await (const chunk of answerStream) {
if (!chunk.contentChunk && !responseBlockId) continue;
if (!responseBlockId) {
const block: TextBlock = {
id: crypto.randomUUID(),
type: 'text',
data: chunk.contentChunk ?? '',
};
session.emitBlock(block);
responseBlockId = block.id;
if (chunk.contentChunk) hasContent = true;
} else {
const block = session.getBlock(responseBlockId) as TextBlock | null;
if (block) {
block.data += chunk.contentChunk ?? '';
if (chunk.contentChunk) hasContent = true;
session.updateBlock(block.id, [{ op: 'replace', path: '/data', value: block.data }]);
}
}
}
if (!hasContent && findingsForWriter.length > 0) {
const lines = findingsForWriter.slice(0, 10).map((f, i) => {
const title = (f.metadata?.title as string) ?? 'Без названия';
const excerpt = f.content.length > 120 ? f.content.slice(0, 120) + '…' : f.content;
return `${i + 1}. **${title}** — ${excerpt}`;
});
session.emitBlock({
id: crypto.randomUUID(),
type: 'text',
data: `## По найденным источникам\n\n${lines.join('\n\n')}\n\n*Ответ LLM недоступен. Проверьте модель в Settings.*`,
});
}
session.emit('end', {});
}

View File

@@ -0,0 +1,37 @@
/**
* EmbeddingClient — HTTP-клиент к llm-svc для эмбеддингов
*/
const LLM_SVC_URL = process.env.LLM_SVC_URL ?? '';
export interface EmbeddingClient {
embedText(texts: string[]): Promise<number[][]>;
embedChunks(chunks: { content: string; metadata: Record<string, unknown> }[]): Promise<number[][]>;
}
function getBaseUrl(): string {
if (!LLM_SVC_URL) throw new Error('LLM_SVC_URL is required for EmbeddingClient');
return LLM_SVC_URL.replace(/\/$/, '');
}
export function createEmbeddingClient(model: { providerId: string; key: string }): EmbeddingClient {
const base = getBaseUrl();
return {
async embedText(texts: string[]): Promise<number[][]> {
if (texts.length === 0) return [];
const res = await fetch(`${base}/api/v1/embeddings`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model, texts }),
signal: AbortSignal.timeout(60000),
});
if (!res.ok) throw new Error(`llm-svc embeddings failed: ${res.status} ${await res.text()}`);
const data = (await res.json()) as { embeddings: number[][] };
return data.embeddings;
},
async embedChunks(chunks: { content: string; metadata: Record<string, unknown> }[]): Promise<number[][]> {
const texts = chunks.map((c) => c.content);
return this.embedText(texts);
},
};
}

View File

@@ -0,0 +1,100 @@
/**
* LlmClient — HTTP-клиент к llm-svc для генерации
*/
import z from 'zod';
export type Message = { role: string; content: string; id?: string; name?: string; tool_calls?: { id: string; name: string; arguments: Record<string, unknown> }[] };
export type ToolCall = { id: string; name: string; arguments: Record<string, unknown> };
const LLM_SVC_URL = process.env.LLM_SVC_URL ?? '';
export type GenerateTextInput = {
messages: Message[];
tools?: { name: string; description: string; schema: z.ZodObject<Record<string, z.ZodTypeAny>> }[];
options?: { maxTokens?: number; temperature?: number };
};
export type GenerateTextOutput = { content: string; toolCalls: ToolCall[] };
export type StreamTextOutput = { contentChunk: string; toolCallChunk: ToolCall[]; done?: boolean };
export interface LlmClient {
generateText(input: GenerateTextInput): Promise<GenerateTextOutput>;
streamText(input: GenerateTextInput): AsyncGenerator<StreamTextOutput>;
generateObject<T>(input: { schema: z.ZodTypeAny; messages: Message[]; options?: object }): Promise<T>;
}
function getBaseUrl(): string {
if (!LLM_SVC_URL) throw new Error('LLM_SVC_URL required');
return LLM_SVC_URL.replace(/\/$/, '');
}
function serializeTool(t: NonNullable<GenerateTextInput['tools']>[0]) {
return { name: t.name, description: t.description, schema: z.toJSONSchema(t.schema) as Record<string, unknown> };
}
export function createLlmClient(model: { providerId: string; key: string }): LlmClient {
const base = getBaseUrl();
return {
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
const res = await fetch(`${base}/api/v1/generate`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model, messages: input.messages, tools: input.tools?.map(serializeTool), options: input.options }),
signal: AbortSignal.timeout(120000),
});
if (!res.ok) throw new Error(`llm-svc generate failed: ${res.status} ${await res.text()}`);
return res.json() as Promise<GenerateTextOutput>;
},
async *streamText(input: GenerateTextInput): AsyncGenerator<StreamTextOutput> {
const res = await fetch(`${base}/api/v1/generate/stream`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model, messages: input.messages, tools: input.tools?.map(serializeTool), options: input.options }),
signal: AbortSignal.timeout(120000),
});
if (!res.ok) throw new Error(`llm-svc stream failed: ${res.status} ${await res.text()}`);
const reader = res.body?.getReader();
if (!reader) throw new Error('No response body');
const decoder = new TextDecoder();
let buffer = '';
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() ?? '';
for (const line of lines) {
if (!line.trim()) continue;
try {
const parsed = JSON.parse(line) as StreamTextOutput | { error?: string };
if ('error' in parsed && parsed.error) throw new Error(parsed.error);
yield parsed as StreamTextOutput;
} catch (e) {
if (!(e instanceof SyntaxError)) throw e;
}
}
}
if (buffer.trim()) {
try {
const parsed = JSON.parse(buffer) as StreamTextOutput | { error?: string };
if ('error' in parsed && parsed.error) throw new Error(parsed.error);
yield parsed as StreamTextOutput;
} catch (e) {
if (!(e instanceof SyntaxError)) throw e;
}
}
},
async generateObject<T>(input: { schema: z.ZodTypeAny; messages: Message[]; options?: object }): Promise<T> {
const res = await fetch(`${base}/api/v1/generate/object`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model, messages: input.messages, schema: z.toJSONSchema(input.schema), options: input.options }),
signal: AbortSignal.timeout(60000),
});
if (!res.ok) throw new Error(`llm-svc generateObject failed: ${res.status} ${await res.text()}`);
const data = (await res.json()) as { object: T };
return data.object;
},
};
}

View File

@@ -1,4 +1,4 @@
import { getLocaleInstruction } from '../locale.js';
import { getLocaleInstruction } from './locale.js';
const baseClassifierPrompt = `
<role>
@@ -15,36 +15,30 @@ NOTE: BY GENERAL KNOWLEDGE WE MEAN INFORMATION THAT IS OBVIOUS, WIDELY KNOWN, OR
- Set it to false if the query requires up-to-date information, specific details, or context that cannot be inferred from general knowledge.
- ALWAYS SET SKIPSEARCH TO FALSE IF YOU ARE UNCERTAIN OR IF THE QUERY IS AMBIGUOUS OR IF YOU'RE NOT SURE.
2. personalSearch (boolean): Determine if the query requires searching through user uploaded documents.
- Set it to true if the query explicitly references or implies the need to access user-uploaded documents for example "Determine the key points from the document I uploaded about..." or "Who is the author?", "Summarize the content of the document"
- Set it to true if the query explicitly references or implies the need to access user-uploaded documents.
- Set it to false if the query does not reference user-uploaded documents or if the information can be obtained through general web search.
- ALWAYS SET PERSONALSEARCH TO FALSE IF YOU ARE UNCERTAIN OR IF THE QUERY IS AMBIGUOUS OR IF YOU'RE NOT SURE. AND SET SKIPSEARCH TO FALSE AS WELL.
3. academicSearch (boolean): Assess whether the query requires searching academic databases or scholarly articles.
- Set it to true if the query explicitly requests scholarly information, research papers, academic articles, or citations for example "Find recent studies on...", "What does the latest research say about...", or "Provide citations for..."
- Set it to true if the query explicitly requests scholarly information, research papers, academic articles, or citations.
- Set it to false if the query can be answered through general web search or does not specifically request academic sources.
4. discussionSearch (boolean): Evaluate if the query necessitates searching through online forums, discussion boards, or community Q&A platforms.
- Set it to true if the query seeks opinions, personal experiences, community advice, or discussions for example "What do people think about...", "Are there any discussions on...", or "What are the common issues faced by..."
- Set it to true if the query seeks opinions, personal experiences, community advice, or discussions.
- Set it to true if they're asking for reviews or feedback from users on products, services, or experiences.
- Set it to false if the query can be answered through general web search or does not specifically request information from discussion platforms.
5. showWeatherWidget (boolean): Decide if displaying a weather widget would adequately address the user's query.
- Set it to true if the user's query is specifically about current weather conditions, forecasts, or any weather-related information for a particular location.
- Set it to true for queries like "What's the weather like in [Location]?" or "Will it rain tomorrow in [Location]?" or "Show me the weather" (Here they mean weather of their current location).
- If it can fully answer the user query without needing additional search, set skipSearch to true as well.
6. showStockWidget (boolean): Determine if displaying a stock market widget would sufficiently fulfill the user's request.
- Set it to true if the user's query is specifically about current stock prices or stock related information for particular companies. Never use it for a market analysis or news about stock market.
- Set it to true for queries like "What's the stock price of [Company]?" or "How is the [Stock] performing today?" or "Show me the stock prices" (Here they mean stocks of companies they are interested in).
- Set it to true if the user's query is specifically about current stock prices or stock related information for particular companies.
- If it can fully answer the user query without needing additional search, set skipSearch to true as well.
7. showCalculationWidget (boolean): Decide if displaying a calculation widget would adequately address the user's query.
- Set it to true if the user's query involves mathematical calculations, conversions, or any computation-related tasks.
- Set it to true for queries like "What is 25% of 80?" or "Convert 100 USD to EUR" or "Calculate the square root of 256" or "What is 2 * 3 + 5?" or other mathematical expressions.
- If it can fully answer the user query without needing additional search, set skipSearch to true as well.
</labels>
<standalone_followup>
For the standalone follow up, you have to generate a self contained, context independant reformulation of the user's query.
You basically have to rephrase the user's query in a way that it can be understood without any prior context from the conversation history.
Say for example the converastion is about cars and the user says "How do they work" then the standalone follow up should be "How do cars work?"
Do not contain excess information or everything that has been discussed before, just reformulate the user's last query in a self contained manner.
Rephrase the user's query in a way that it can be understood without any prior context from the conversation history.
The standalone follow-up should be concise and to the point.
</standalone_followup>

View File

@@ -0,0 +1,19 @@
const LOCALE_TO_LANGUAGE: Record<string, string> = {
ru: 'Russian', en: 'English', de: 'German', fr: 'French', es: 'Spanish',
it: 'Italian', pt: 'Portuguese', uk: 'Ukrainian', pl: 'Polish', zh: 'Chinese',
ja: 'Japanese', ko: 'Korean', ar: 'Arabic', tr: 'Turkish', be: 'Belarusian',
kk: 'Kazakh', sv: 'Swedish', nb: 'Norwegian', da: 'Danish', fi: 'Finnish',
cs: 'Czech', sk: 'Slovak', hu: 'Hungarian', ro: 'Romanian', bg: 'Bulgarian',
hr: 'Croatian', sr: 'Serbian', el: 'Greek', hi: 'Hindi', th: 'Thai',
vi: 'Vietnamese', id: 'Indonesian', ms: 'Malay', he: 'Hebrew', fa: 'Persian',
};
export function getLocaleInstruction(locale?: string): string {
if (!locale) return '';
const lang = locale.split('-')[0];
const languageName = LOCALE_TO_LANGUAGE[lang] ?? lang;
return `
<response_language>
User's locale is ${locale}. Always format your response in ${languageName}, regardless of the language of the query or search results. Even when the discussed content is in another language, respond in ${languageName}.
</response_language>`;
}

View File

@@ -0,0 +1,149 @@
import { getLocaleInstruction } from './locale.js';
type Mode = 'speed' | 'balanced' | 'quality';
function getSpeedPrompt(actionDesc: string, i: number, maxIteration: number, fileDesc: string): string {
const today = new Date().toLocaleDateString('en-US', { year: 'numeric', month: 'long', day: 'numeric' });
return `
Assistant is an action orchestrator. Your job is to fulfill user requests by selecting and executing the available tools—no free-form replies.
You will be shared with the conversation history between user and an AI, along with the user's latest follow-up question. Based on this, you must use the available tools to fulfill the user's request.
Today's date: ${today}
You are currently on iteration ${i + 1} of your research process and have ${maxIteration} total iterations so act efficiently.
When you are finished, you must call the \`done\` tool. Never output text directly.
<goal>
Fulfill the user's request as quickly as possible using the available tools.
Call tools to gather information or perform tasks as needed.
</goal>
<core_principle>
Your knowledge is outdated; if you have web search, use it to ground answers even for seemingly basic facts.
</core_principle>
<available_tools>
${actionDesc}
</available_tools>
<response_protocol>
- NEVER output normal text to the user. ONLY call tools.
- Choose the appropriate tools based on the action descriptions provided above.
- Default to web_search when information is missing or stale; keep queries targeted (max 3 per call).
- Call done when you have gathered enough to answer or performed the required actions.
- Do not invent tools. Do not return JSON.
</response_protocol>
${fileDesc ? `<user_uploaded_files>\n${fileDesc}\n</user_uploaded_files>` : ''}
`;
}
function getBalancedPrompt(actionDesc: string, i: number, maxIteration: number, fileDesc: string): string {
const today = new Date().toLocaleDateString('en-US', { year: 'numeric', month: 'long', day: 'numeric' });
return `
Assistant is an action orchestrator. Your job is to fulfill user requests by reasoning briefly and executing the available tools—no free-form replies.
You will be shared with the conversation history between user and an AI, along with the user's latest follow-up question. Based on this, you must use the available tools to fulfill the user's request.
Today's date: ${today}
You are currently on iteration ${i + 1} of your research process and have ${maxIteration} total iterations so act efficiently.
When you are finished, you must call the \`done\` tool. Never output text directly.
<goal>
Fulfill the user's request with concise reasoning plus focused actions.
You MUST call the __reasoning_preamble tool before every tool call in this assistant turn. Alternate: __reasoning_preamble → tool → __reasoning_preamble → tool ... and finish with __reasoning_preamble → done. Open each __reasoning_preamble with a brief intent phrase (e.g., "Okay, the user wants to...", "Searching for...", "Looking into...") and lay out your reasoning for the next step. Keep it natural language, no tool names.
</goal>
<core_principle>
Your knowledge is outdated; if you have web search, use it to ground answers even for seemingly basic facts.
You can call at most 6 tools total per turn: up to 2 reasoning (__reasoning_preamble counts as reasoning), 2-3 information-gathering calls, and 1 done.
Aim for at least two information-gathering calls when the answer is not already obvious.
</core_principle>
<available_tools>
YOU MUST CALL __reasoning_preamble BEFORE EVERY TOOL CALL IN THIS ASSISTANT TURN. IF YOU DO NOT CALL IT, THE TOOL CALL WILL BE IGNORED.
${actionDesc}
</available_tools>
<response_protocol>
- NEVER output normal text to the user. ONLY call tools.
- Start with __reasoning_preamble and call __reasoning_preamble before every tool call (including done).
- Choose tools based on the action descriptions provided above.
- Default to web_search when information is missing or stale; keep queries targeted (max 3 per call).
- Use at most 6 tool calls total. Do not invent tools. Do not return JSON.
</response_protocol>
${fileDesc ? `<user_uploaded_files>\n${fileDesc}\n</user_uploaded_files>` : ''}
`;
}
function getQualityPrompt(actionDesc: string, i: number, maxIteration: number, fileDesc: string): string {
const today = new Date().toLocaleDateString('en-US', { year: 'numeric', month: 'long', day: 'numeric' });
return `
Assistant is a deep-research orchestrator. Your job is to fulfill user requests with the most thorough, comprehensive research possible—no free-form replies.
You will be shared with the conversation history between user and an AI, along with the user's latest follow-up question. Based on this, you must use the available tools to fulfill the user's request with depth and rigor.
Today's date: ${today}
You are currently on iteration ${i + 1} of your research process and have ${maxIteration} total iterations. Use every iteration wisely to gather comprehensive information.
When you are finished, you must call the \`done\` tool. Never output text directly.
<goal>
Conduct the deepest, most thorough research possible. Leave no stone unturned.
Follow an iterative reason-act loop: call __reasoning_preamble before every tool call to outline the next step, then call the tool, then __reasoning_preamble again to reflect and decide the next step. Repeat until you have exhaustive coverage.
Open each __reasoning_preamble with a brief intent phrase and describe what you'll do next. Keep it natural language, no tool names.
Finish with done only when you have comprehensive, multi-angle information.
</goal>
<core_principle>
Your knowledge is outdated; always use the available tools to ground answers.
This is DEEP RESEARCH mode—be exhaustive. Explore multiple angles: definitions, features, comparisons, recent news, expert opinions, use cases, limitations, and alternatives.
You can call up to 10 tools total per turn. Use an iterative loop: __reasoning_preamble → tool call(s) → __reasoning_preamble → tool call(s) → ... → __reasoning_preamble → done.
</core_principle>
<available_tools>
YOU MUST CALL __reasoning_preamble BEFORE EVERY TOOL CALL IN THIS ASSISTANT TURN. IF YOU DO NOT CALL IT, THE TOOL CALL WILL BE IGNORED.
${actionDesc}
</available_tools>
<research_strategy>
For any topic, consider searching: Core definition/overview, Features/capabilities, Comparisons, Recent news/updates, Reviews/opinions, Use cases, Limitations/critiques.
</research_strategy>
<response_protocol>
- NEVER output normal text to the user. ONLY call tools.
- Follow an iterative loop: __reasoning_preamble → tool call → __reasoning_preamble → tool call → ... → __reasoning_preamble → done.
- Each __reasoning_preamble should reflect on previous results (if any) and state the next research step.
- Aim for 4-7 information-gathering calls covering different angles.
- Call done only after comprehensive, multi-angle research is complete.
- Do not invent tools. Do not return JSON.
</response_protocol>
${fileDesc ? `<user_uploaded_files>\n${fileDesc}\n</user_uploaded_files>` : ''}
`;
}
export function getResearcherPrompt(
actionDesc: string,
mode: Mode,
i: number,
maxIteration: number,
fileDesc: string,
locale?: string,
): string {
let prompt: string;
switch (mode) {
case 'speed':
prompt = getSpeedPrompt(actionDesc, i, maxIteration, fileDesc);
break;
case 'balanced':
prompt = getBalancedPrompt(actionDesc, i, maxIteration, fileDesc);
break;
case 'quality':
prompt = getQualityPrompt(actionDesc, i, maxIteration, fileDesc);
break;
default:
prompt = getSpeedPrompt(actionDesc, i, maxIteration, fileDesc);
}
return prompt + getLocaleInstruction(locale);
}

View File

@@ -0,0 +1,92 @@
import { getLocaleInstruction } from './locale.js';
export type AnswerMode =
| 'standard' | 'focus' | 'academic' | 'writing' | 'travel' | 'finance'
| 'health' | 'education' | 'medicine' | 'realEstate' | 'psychology' | 'sports'
| 'children' | 'goods' | 'shopping' | 'games' | 'taxes' | 'legislation';
export type ResponsePrefs = { format?: string; length?: string; tone?: string };
const VERTICAL_BLOCKS: Partial<Record<AnswerMode, string>> = {
travel: `### Answer Mode: Travel\nPrioritize: destinations, itineraries, hotels, transport, practical tips. Format: clear sections (Where to stay, What to see, Getting there).\n`,
finance: `### Answer Mode: Finance\nPrioritize: market data, company analysis, financial metrics. Cite sources for numbers.\n`,
health: `### Answer Mode: Health\nPrioritize: wellness, medicine, nutrition, fitness, mental health. Cite medical sources.\n`,
education: `### Answer Mode: Education\nPrioritize: learning, courses, pedagogy, academic resources.\n`,
medicine: `### Answer Mode: Medicine\nPrioritize: clinical info, treatments, diagnostics. Cite medical sources.\n`,
academic: `### Answer Mode: Academic\nPrioritize: scholarly sources, citations, research-based answers.\n`,
writing: `### Answer Mode: Writing\nPrioritize: clear structure, engaging prose, well-cited content.\n`,
};
export function getWriterPrompt(
context: string,
systemInstructions: string,
mode: 'speed' | 'balanced' | 'quality',
locale?: string,
memoryContext?: string,
answerMode?: AnswerMode,
responsePrefs?: ResponsePrefs,
learningMode?: boolean,
): string {
const memoryBlock = memoryContext?.trim()
? `\n### User memory (personalization)\nUse these stored facts/preferences to personalize when relevant. Do NOT cite as source.\n${memoryContext}\n`
: '';
const verticalBlock = answerMode ? (VERTICAL_BLOCKS[answerMode] ?? '') : '';
const prefs: string[] = [];
if (responsePrefs?.format) {
const f = responsePrefs.format;
if (f === 'bullets') prefs.push('Format: use bullet points where appropriate.');
else if (f === 'outline') prefs.push('Format: use clear headings and outline structure.');
else prefs.push('Format: use paragraphs and flowing prose.');
}
if (responsePrefs?.length) {
const l = responsePrefs.length;
if (l === 'short') prefs.push('Length: keep response concise and brief.');
else if (l === 'long') prefs.push('Length: provide comprehensive, detailed coverage.');
else prefs.push('Length: medium depth, balanced.');
}
if (responsePrefs?.tone) {
const t = responsePrefs.tone;
if (t === 'professional') prefs.push('Tone: formal, professional.');
else if (t === 'casual') prefs.push('Tone: friendly, conversational.');
else if (t === 'concise') prefs.push('Tone: direct, to the point.');
else prefs.push('Tone: neutral.');
}
const prefsBlock = prefs.length ? `\n### Response preferences\n${prefs.join(' ')}\n` : '';
const learningBlock = learningMode
? `\n### Step-by-step Learning mode\nExplain your reasoning step-by-step. Break down complex concepts. Show the logical flow. Use numbered steps or "First... Then... Finally" structure.\n`
: '';
return `
You are GooSeek, an AI model skilled in web search and crafting detailed, engaging, and well-structured answers.
Your task is to provide answers that are:
- **Informative and relevant**: Thoroughly address the user's query using the given context.
- **Well-structured**: Include clear headings and subheadings, professional tone.
- **Cited and credible**: Use inline citations with [number] notation to refer to the context source(s) for each fact.
- **Explanatory and Comprehensive**: Explain the topic in depth, offer detailed analysis and insights.
### Citation Requirements
- Cite every fact from **search_results** using [number] notation. Citations [1], [2], etc. refer ONLY to sources in search_results.
- **widgets_result** (calculations, weather, stock data) — use this to answer directly, do NOT cite it.
- Integrate citations naturally at the end of sentences.
### Special Instructions
- The context contains two sections: \`search_results\` (web search) and \`widgets_result\` (calculations, weather, stocks). If widgets_result has the answer, USE IT.
- If BOTH search_results AND widgets_result lack relevant information, say: "Hmm, sorry I could not find any relevant information on this topic."
${mode === 'quality' ? "- QUALITY MODE: Generate very deep, detailed responses. At least 2000 words, cover everything like a research report." : ''}
${verticalBlock}${prefsBlock}${learningBlock}
### User instructions
${systemInstructions}
${memoryBlock}
<context>
${context}
</context>
Current date & time (UTC): ${new Date().toISOString()}.
${getLocaleInstruction(locale)}
`;
}

View File

@@ -0,0 +1,67 @@
const SEARCH_SVC_URL = process.env.SEARCH_SVC_URL?.trim() ?? '';
const SEARXNG_URL = process.env.SEARXNG_URL?.trim() ?? '';
const FALLBACK = (process.env.SEARXNG_FALLBACK_URL ?? 'https://searx.tiekoetter.com,https://search.sapti.me')
.split(',')
.map((u) => u.trim())
.filter(Boolean);
export interface SearxngSearchResult {
title: string;
url: string;
content?: string;
}
interface SearxngSearchOptions {
engines?: string[];
categories?: string[];
}
function buildSearchUrl(baseUrl: string, query: string, opts?: SearxngSearchOptions): string {
const params = new URLSearchParams();
params.append('format', 'json');
params.append('q', query);
if (opts?.engines) params.append('engines', opts.engines.join(','));
if (opts?.categories) params.append('categories', Array.isArray(opts.categories) ? opts.categories.join(',') : opts.categories);
const base = baseUrl.trim().replace(/\/$/, '');
const prefix = /^https?:\/\//i.test(base) ? '' : 'http://';
return `${prefix}${base}/search?${params.toString()}`;
}
export async function searchSearxng(
query: string,
opts?: SearxngSearchOptions,
): Promise<{ results: SearxngSearchResult[]; suggestions?: string[] }> {
if (SEARCH_SVC_URL) {
const params = new URLSearchParams();
params.set('q', query);
if (opts?.engines) params.set('engines', opts.engines.join(','));
const url = `${SEARCH_SVC_URL.replace(/\/$/, '')}/api/v1/search?${params.toString()}`;
const res = await fetch(url, { signal: AbortSignal.timeout(15000) });
if (!res.ok) throw new Error(`Search HTTP ${res.status}`);
return res.json() as Promise<{ results: SearxngSearchResult[]; suggestions?: string[] }>;
}
const candidates: string[] = [];
if (SEARXNG_URL?.trim()) {
let u = SEARXNG_URL.trim().replace(/\/$/, '');
if (!/^https?:\/\//i.test(u)) u = `http://${u}`;
candidates.push(u);
}
FALLBACK.forEach((u) => {
const t = u.trim().replace(/\/$/, '');
if (t && !candidates.includes(t)) candidates.push(t);
});
let lastError: Error | null = null;
for (const baseUrl of candidates) {
try {
const url = buildSearchUrl(baseUrl, query, opts);
const res = await fetch(url, { signal: AbortSignal.timeout(15000) });
const data = (await res.json()) as { results?: SearxngSearchResult[]; suggestions?: string[] };
return { results: data.results ?? [], suggestions: data.suggestions };
} catch (err) {
lastError = err instanceof Error ? err : new Error(String(err));
}
}
throw lastError ?? new Error('SearXNG not configured');
}

Some files were not shown because too many files have changed in this diff Show More