Deploy: migrate k3s → Docker; search logic → master-agents-svc

- deploy/k3s удалён, deploy/docker добавлен (Caddyfile, docker-compose, searxng)
- chat-svc: agents/models/prompts удалены, использует llm-svc (LLMClient, EmbeddingClient)
- master-agents-svc: SearchOrchestrator, classifier, researcher, actions, widgets
- web-svc: ChatModelSelector, Optimization, Sources удалены; InputBarPlus; UnregisterSW
- geo-device-svc, localization-svc: Dockerfiles
- docs: 02-k3s-services-spec.md, RUNBOOK/TELEMETRY/WORKING удалены

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
home
2026-02-23 22:14:00 +03:00
parent cd6b7857ba
commit 328d968f3f
180 changed files with 3022 additions and 9798 deletions

View File

@@ -0,0 +1,15 @@
FROM node:22-alpine AS builder
WORKDIR /app
COPY package*.json ./
RUN npm install
COPY tsconfig.json ./
COPY src ./src
RUN npm run build
FROM node:22-alpine
WORKDIR /app
COPY package*.json ./
RUN npm install --omit=dev
COPY --from=builder /app/dist ./dist
EXPOSE 3018
CMD ["node", "dist/index.js"]

View File

@@ -17,10 +17,14 @@
"ollama": "^0.6.3",
"openai": "^6.9.0",
"partial-json": "^0.1.7",
"rfc6902": "^5.1.2",
"turndown": "^7.2.2",
"yahoo-finance2": "^3.13.0",
"zod": "^4.1.12"
},
"devDependencies": {
"@types/node": "^24.8.1",
"@types/turndown": "^5.0.6",
"tsx": "^4.19.2",
"typescript": "^5.9.3"
}

View File

@@ -1,6 +1,6 @@
/**
* master-agents-svc — Master Agent с динамическими под-агентами и инструментами
* API: POST /api/v1/agents/execute
* master-agents-svc — Master Agent + Search Orchestrator (Perplexity-style)
* API: POST /api/v1/agents/execute, POST /api/v1/agents/search (NDJSON stream)
*/
import Fastify from 'fastify';
@@ -8,8 +8,13 @@ import cors from '@fastify/cors';
import { z } from 'zod';
import { loadChatModel } from './lib/models/registry.js';
import { runMasterAgent } from './lib/agent/master.js';
import { createLlmClient } from './lib/llm-client.js';
import SessionManager from './lib/session.js';
import { runSearchOrchestrator } from './lib/agent/searchOrchestrator.js';
const PORT = parseInt(process.env.PORT ?? '3018', 10);
const LLM_SVC_URL = process.env.LLM_SVC_URL ?? '';
const MEMORY_SVC_URL = process.env.MEMORY_SVC_URL ?? '';
const chatModelSchema = z.object({
providerId: z.string(),
@@ -23,6 +28,30 @@ const bodySchema = z.object({
maxSteps: z.number().min(1).max(25).optional().default(15),
});
const answerModeEnum = z.enum([
'standard', 'focus', 'academic', 'writing', 'travel', 'finance',
'health', 'education', 'medicine', 'realEstate', 'psychology', 'sports',
'children', 'goods', 'shopping', 'games', 'taxes', 'legislation',
]);
const searchBodySchema = z.object({
message: z.object({
messageId: z.string().min(1),
chatId: z.string().min(1),
content: z.string().min(1),
}),
optimizationMode: z.enum(['speed', 'balanced', 'quality']),
sources: z.array(z.string()).optional().default([]),
history: z.array(z.tuple([z.string(), z.string()])).optional().default([]),
files: z.array(z.string()).optional().default([]),
chatModel: chatModelSchema,
systemInstructions: z.string().nullable().optional().default(''),
locale: z.string().optional(),
answerMode: answerModeEnum.optional().default('standard'),
responsePrefs: z.object({ format: z.string().optional(), length: z.string().optional(), tone: z.string().optional() }).optional(),
learningMode: z.boolean().optional().default(false),
});
const app = Fastify({ logger: true });
await app.register(cors, { origin: true });
@@ -62,6 +91,105 @@ app.post('/api/v1/agents/execute', async (req, reply) => {
}
});
app.post<{ Body: unknown }>('/api/v1/agents/search', async (req, reply) => {
const parsed = searchBodySchema.safeParse(req.body);
if (!parsed.success) {
return reply.status(400).send({ message: 'Invalid request body', error: parsed.error.issues });
}
const body = parsed.data;
if (!LLM_SVC_URL) {
return reply.status(503).send({ message: 'LLM_SVC_URL not configured. llm-svc required.' });
}
if (body.message.content === '') {
return reply.status(400).send({ message: 'Please provide a message to process' });
}
let memoryContext: string | undefined;
const authHeader = req.headers.authorization;
const useMemory = MEMORY_SVC_URL && authHeader && (body.optimizationMode === 'balanced' || body.optimizationMode === 'quality');
if (useMemory) {
try {
const memRes = await fetch(`${MEMORY_SVC_URL.replace(/\/$/, '')}/api/v1/memory`, {
headers: { Authorization: authHeader! },
signal: AbortSignal.timeout(3000),
});
if (memRes.ok) {
const memData = (await memRes.json()) as { items?: { key: string; value: string }[] };
const items = memData.items ?? [];
if (items.length > 0) {
memoryContext = items.map((r) => `- ${r.key}: ${r.value}`).join('\n');
}
}
} catch (err) {
req.log.warn({ err }, 'Memory fetch failed');
}
}
try {
const llm = createLlmClient({ providerId: body.chatModel.providerId, key: body.chatModel.key });
const history = body.history.map((msg) =>
msg[0] === 'human' ? { role: 'user' as const, content: msg[1] } : { role: 'assistant' as const, content: msg[1] });
const session = SessionManager.createSession();
const encoder = new TextEncoder();
const stream = new ReadableStream({
start(controller) {
const disconnect = session.subscribe((event: string, data: unknown) => {
const d = data as { type?: string; block?: unknown; blockId?: string; patch?: unknown; data?: unknown };
if (event === 'data') {
if (d.type === 'block') controller.enqueue(encoder.encode(JSON.stringify({ type: 'block', block: d.block }) + '\n'));
else if (d.type === 'updateBlock') controller.enqueue(encoder.encode(JSON.stringify({ type: 'updateBlock', blockId: d.blockId, patch: d.patch }) + '\n'));
else if (d.type === 'researchComplete') controller.enqueue(encoder.encode(JSON.stringify({ type: 'researchComplete' }) + '\n'));
} else if (event === 'end') {
controller.enqueue(encoder.encode(JSON.stringify({ type: 'messageEnd' }) + '\n'));
controller.close();
session.removeAllListeners();
} else if (event === 'error') {
controller.enqueue(encoder.encode(JSON.stringify({ type: 'error', data: d.data }) + '\n'));
controller.close();
session.removeAllListeners();
}
});
runSearchOrchestrator(session, {
chatHistory: history,
followUp: body.message.content,
config: {
llm,
mode: body.optimizationMode,
sources: (body.sources as ('web' | 'discussions' | 'academic')[]) ?? [],
fileIds: body.files,
systemInstructions: body.systemInstructions || 'None',
locale: body.locale ?? 'en',
memoryContext,
answerMode: body.answerMode,
responsePrefs: body.responsePrefs,
learningMode: body.learningMode,
},
}).catch((err: Error) => {
req.log.error(err);
session.emit('error', { data: err?.message ?? 'Error during search.' });
});
req.raw.on?.('abort', () => {
disconnect();
try {
controller.close();
} catch {}
});
},
});
return reply
.header('Content-Type', 'application/x-ndjson')
.header('Cache-Control', 'no-cache')
.send(stream);
} catch (err) {
req.log.error(err);
return reply.status(500).send({ message: 'An error occurred while processing search request' });
}
});
try {
await app.listen({ port: PORT, host: '0.0.0.0' });
console.log(`master-agents-svc listening on :${PORT}`);

View File

@@ -0,0 +1,28 @@
import z from 'zod';
import type { ResearchAction } from './types.js';
const schema = z.object({
plan: z.string().describe('A concise natural-language plan in one short paragraph. Open with a short intent phrase and lay out the steps you will take.'),
});
const actionDescription = `
Use this tool FIRST on every turn to state your plan in natural language before any other action. Keep it short, action-focused, and tailored to the current query.
Make sure to not include reference to any tools or actions you might take, just the plan itself.
YOU CAN NEVER CALL ANY OTHER TOOL BEFORE CALLING THIS ONE FIRST, IF YOU DO, THAT CALL WOULD BE IGNORED.
`;
const planAction: ResearchAction<typeof schema> = {
name: '__reasoning_preamble',
schema,
getToolDescription: () =>
'Use this FIRST on every turn to state your plan in natural language before any other action. Keep it short, action-focused, and tailored to the current query.',
getDescription: () => actionDescription,
enabled: (config) => config.mode !== 'speed',
execute: async (input) => ({
type: 'reasoning',
reasoning: input.plan,
}),
};
export default planAction;

View File

@@ -0,0 +1,77 @@
import z from 'zod';
import type { ResearchAction } from './types.js';
import type { Chunk, SearchResultsResearchBlock } from '../types.js';
import { searchSearxng } from '../searxng.js';
const schema = z.object({
queries: z.array(z.string()).describe('List of academic search queries'),
});
const academicSearchAction: ResearchAction<typeof schema> = {
name: 'academic_search',
schema,
getToolDescription: () =>
'Use this tool to perform academic searches for scholarly articles, papers, and research studies. Provide up to 3 queries at a time.',
getDescription: () =>
'Use this tool to perform academic searches for scholarly articles and research studies. Provide concise search queries. You can provide up to 3 queries at a time.',
enabled: (config) =>
config.sources.includes('academic') &&
config.classification.classification.skipSearch === false &&
config.classification.classification.academicSearch === true,
execute: async (input, additionalConfig) => {
input.queries = input.queries.slice(0, 3);
const researchBlock = additionalConfig.session.getBlock(additionalConfig.researchBlockId);
if (researchBlock && researchBlock.type === 'research') {
researchBlock.data.subSteps.push({
id: crypto.randomUUID(),
type: 'searching',
searching: input.queries,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
const searchResultsBlockId = crypto.randomUUID();
let searchResultsEmitted = false;
const results: Chunk[] = [];
const search = async (q: string) => {
const res = await searchSearxng(q, { engines: ['arxiv', 'google scholar', 'pubmed'] });
const resultChunks: Chunk[] = res.results.map((r) => ({
content: r.content || r.title,
metadata: { title: r.title, url: r.url },
}));
results.push(...resultChunks);
if (!searchResultsEmitted && researchBlock && researchBlock.type === 'research') {
searchResultsEmitted = true;
researchBlock.data.subSteps.push({
id: searchResultsBlockId,
type: 'search_results',
reading: resultChunks,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
} else if (searchResultsEmitted && researchBlock && researchBlock.type === 'research') {
const subStepIndex = researchBlock.data.subSteps.findIndex((s) => s.id === searchResultsBlockId);
const subStep = researchBlock.data.subSteps[subStepIndex] as SearchResultsResearchBlock | undefined;
if (subStep) {
subStep.reading.push(...resultChunks);
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
}
};
await Promise.all(input.queries.map(search));
return { type: 'search_results', results };
},
};
export default academicSearchAction;

View File

@@ -0,0 +1,16 @@
import z from 'zod';
import type { ResearchAction } from './types.js';
const emptySchema = z.object({});
const doneAction: ResearchAction<typeof emptySchema> = {
name: 'done',
schema: emptySchema,
getToolDescription: () =>
'Only call this after __reasoning_preamble AND after any other needed tool calls when you truly have enough to answer. Do not call if information is still missing.',
getDescription: () =>
'Use this action ONLY when you have completed all necessary research and are ready to provide a final answer. YOU MUST CALL THIS ACTION TO SIGNAL COMPLETION; DO NOT OUTPUT FINAL ANSWERS DIRECTLY TO THE USER.',
enabled: () => true,
execute: async () => ({ type: 'done' }),
};
export default doneAction;

View File

@@ -0,0 +1,57 @@
import type { ResearchAction, ActionConfig, AdditionalConfig, ToolCall } from './types.js';
import __reasoning_preamble from './__reasoning_preamble.js';
import done from './done.js';
import webSearch from './web_search.js';
import academicSearch from './academic_search.js';
import socialSearch from './social_search.js';
import scrapeUrl from './scrape_url.js';
const actions = new Map<string, ResearchAction>();
function register(action: ResearchAction) {
actions.set(action.name, action);
}
register(__reasoning_preamble);
register(done);
register(webSearch);
register(academicSearch);
register(socialSearch);
register(scrapeUrl);
export function getAction(name: string): ResearchAction | undefined {
return actions.get(name);
}
export function getAvailableActions(config: ActionConfig): ResearchAction[] {
return Array.from(actions.values()).filter((a) => a.enabled(config));
}
export function getAvailableActionTools(config: ActionConfig): { name: string; description: string; schema: unknown }[] {
return getAvailableActions(config).map((a) => ({
name: a.name,
description: a.getToolDescription({ mode: config.mode }),
schema: a.schema,
}));
}
export function getAvailableActionsDescriptions(config: ActionConfig): string {
return getAvailableActions(config)
.map((a) => `<tool name="${a.name}">\n${a.getDescription({ mode: config.mode })}\n</tool>`)
.join('\n\n');
}
/** Параллельное выполнение tool calls */
export async function executeAll(
toolCalls: ToolCall[],
additionalConfig: AdditionalConfig,
): Promise<import('./types.js').ActionOutput[]> {
const results = await Promise.all(
toolCalls.map(async (tc) => {
const action = getAction(tc.name);
if (!action) throw new Error(`Action ${tc.name} not found`);
return action.execute(tc.arguments as never, additionalConfig);
}),
);
return results;
}

View File

@@ -0,0 +1,71 @@
import z from 'zod';
import TurndownService from 'turndown';
import type { ResearchAction } from './types.js';
import type { Chunk, ReadingResearchBlock } from '../types.js';
const turndownService = new TurndownService();
const schema = z.object({
urls: z.array(z.string()).describe('A list of URLs to scrape content from.'),
});
const scrapeURLAction: ResearchAction<typeof schema> = {
name: 'scrape_url',
schema,
getToolDescription: () =>
'Use this tool to scrape and extract content from the provided URLs. You can provide up to 3 URLs at a time. NEVER CALL THIS TOOL EXPLICITLY YOURSELF UNLESS INSTRUCTED TO DO SO BY THE USER.',
getDescription: () =>
'Use this tool to scrape content from specific web pages. Only call when the user has specifically requested information from certain URLs. Never call yourself to get extra information without user instruction.',
enabled: () => true,
execute: async (params, additionalConfig) => {
params.urls = params.urls.slice(0, 3);
const researchBlock = additionalConfig.session.getBlock(additionalConfig.researchBlockId);
let readingBlockId = crypto.randomUUID();
let readingEmitted = false;
const results: Chunk[] = [];
await Promise.all(
params.urls.map(async (url) => {
try {
const res = await fetch(url, { signal: AbortSignal.timeout(10000) });
const text = await res.text();
const title = text.match(/<title>(.*?)<\/title>/i)?.[1] || `Content from ${url}`;
if (!readingEmitted && researchBlock && researchBlock.type === 'research') {
readingEmitted = true;
researchBlock.data.subSteps.push({
id: readingBlockId,
type: 'reading',
reading: [{ content: '', metadata: { url, title } }],
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
} else if (readingEmitted && researchBlock && researchBlock.type === 'research') {
const subStepIndex = researchBlock.data.subSteps.findIndex((s) => s.id === readingBlockId);
const subStep = researchBlock.data.subSteps[subStepIndex] as ReadingResearchBlock | undefined;
if (subStep) {
subStep.reading.push({ content: '', metadata: { url, title } });
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
}
const markdown = turndownService.turndown(text);
results.push({ content: markdown, metadata: { url, title } });
} catch (error) {
results.push({
content: `Failed to fetch content from ${url}: ${error}`,
metadata: { url, title: `Error fetching ${url}` },
});
}
}),
);
return { type: 'search_results', results };
},
};
export default scrapeURLAction;

View File

@@ -0,0 +1,77 @@
import z from 'zod';
import type { ResearchAction } from './types.js';
import type { Chunk, SearchResultsResearchBlock } from '../types.js';
import { searchSearxng } from '../searxng.js';
const schema = z.object({
queries: z.array(z.string()).describe('List of social search queries'),
});
const socialSearchAction: ResearchAction<typeof schema> = {
name: 'social_search',
schema,
getToolDescription: () =>
'Use this tool to perform social media searches for relevant posts, discussions, and trends. Provide up to 3 queries at a time.',
getDescription: () =>
'Use this tool to perform social media searches for posts, discussions, and trends. Provide concise search queries. You can provide up to 3 queries at a time.',
enabled: (config) =>
config.sources.includes('discussions') &&
config.classification.classification.skipSearch === false &&
config.classification.classification.discussionSearch === true,
execute: async (input, additionalConfig) => {
input.queries = input.queries.slice(0, 3);
const researchBlock = additionalConfig.session.getBlock(additionalConfig.researchBlockId);
if (researchBlock && researchBlock.type === 'research') {
researchBlock.data.subSteps.push({
id: crypto.randomUUID(),
type: 'searching',
searching: input.queries,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
const searchResultsBlockId = crypto.randomUUID();
let searchResultsEmitted = false;
const results: Chunk[] = [];
const search = async (q: string) => {
const res = await searchSearxng(q, { engines: ['reddit'] });
const resultChunks: Chunk[] = res.results.map((r) => ({
content: r.content || r.title,
metadata: { title: r.title, url: r.url },
}));
results.push(...resultChunks);
if (!searchResultsEmitted && researchBlock && researchBlock.type === 'research') {
searchResultsEmitted = true;
researchBlock.data.subSteps.push({
id: searchResultsBlockId,
type: 'search_results',
reading: resultChunks,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
} else if (searchResultsEmitted && researchBlock && researchBlock.type === 'research') {
const subStepIndex = researchBlock.data.subSteps.findIndex((s) => s.id === searchResultsBlockId);
const subStep = researchBlock.data.subSteps[subStepIndex] as SearchResultsResearchBlock | undefined;
if (subStep) {
subStep.reading.push(...resultChunks);
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
}
};
await Promise.all(input.queries.map(search));
return { type: 'search_results', results };
},
};
export default socialSearchAction;

View File

@@ -0,0 +1,52 @@
import z from 'zod';
import type SessionManager from '../session.js';
import type { Chunk } from '../types.js';
export type ClassifierOutput = {
classification: {
skipSearch: boolean;
personalSearch: boolean;
academicSearch: boolean;
discussionSearch: boolean;
showWeatherWidget: boolean;
showStockWidget: boolean;
showCalculationWidget: boolean;
};
standaloneFollowUp: string;
};
export type SearchSources = 'web' | 'discussions' | 'academic';
export type SearchMode = 'speed' | 'balanced' | 'quality';
export type ActionConfig = {
classification: ClassifierOutput;
fileIds: string[];
mode: SearchMode;
sources: SearchSources[];
hasEmbedding?: boolean;
};
export type AdditionalConfig = {
session: SessionManager;
researchBlockId: string;
fileIds: string[];
};
export type SearchActionOutput = { type: 'search_results'; results: Chunk[] };
export type DoneActionOutput = { type: 'done' };
export type ReasoningActionOutput = { type: 'reasoning'; reasoning: string };
export type ActionOutput = SearchActionOutput | DoneActionOutput | ReasoningActionOutput;
export type ToolCall = { id: string; name: string; arguments: Record<string, unknown> };
export interface ResearchAction<TSchema extends z.ZodObject<Record<string, z.ZodTypeAny>> = z.ZodObject<Record<string, z.ZodTypeAny>>> {
name: string;
schema: TSchema;
getToolDescription: (config: { mode: SearchMode }) => string;
getDescription: (config: { mode: SearchMode }) => string;
enabled: (config: ActionConfig) => boolean;
execute: (
params: z.infer<TSchema>,
additionalConfig: AdditionalConfig,
) => Promise<ActionOutput>;
}

View File

@@ -0,0 +1,80 @@
import z from 'zod';
import type { ResearchAction } from './types.js';
import type { Chunk, SearchResultsResearchBlock } from '../types.js';
import { searchSearxng } from '../searxng.js';
const schema = z.object({
queries: z.array(z.string()).describe('An array of search queries to perform web searches for.'),
});
const webSearchAction: ResearchAction<typeof schema> = {
name: 'web_search',
schema,
getToolDescription: () =>
'Use this tool to perform web searches based on the provided queries. You can provide up to 3 queries at a time.',
getDescription: () =>
'Use this tool to perform web searches. Your queries should be targeted and specific, SEO-friendly keywords. You can search for 3 queries in one go.',
enabled: (config) =>
config.sources.includes('web') && config.classification.classification.skipSearch === false,
execute: async (input, additionalConfig) => {
input.queries = input.queries.slice(0, 3);
const researchBlock = additionalConfig.session.getBlock(additionalConfig.researchBlockId);
if (researchBlock && researchBlock.type === 'research') {
researchBlock.data.subSteps.push({
id: crypto.randomUUID(),
type: 'searching',
searching: input.queries,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
const searchResultsBlockId = crypto.randomUUID();
let searchResultsEmitted = false;
const results: Chunk[] = [];
const search = async (q: string) => {
let res: { results: { content?: string; title: string; url: string }[] };
try {
res = await searchSearxng(q);
} catch {
return;
}
const resultChunks: Chunk[] = res.results.map((r) => ({
content: r.content || r.title,
metadata: { title: r.title, url: r.url },
}));
results.push(...resultChunks);
if (!searchResultsEmitted && researchBlock && researchBlock.type === 'research') {
searchResultsEmitted = true;
researchBlock.data.subSteps.push({
id: searchResultsBlockId,
type: 'search_results',
reading: resultChunks,
});
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
} else if (searchResultsEmitted && researchBlock && researchBlock.type === 'research') {
const subStepIndex = researchBlock.data.subSteps.findIndex((s) => s.id === searchResultsBlockId);
const subStep = researchBlock.data.subSteps[subStepIndex] as SearchResultsResearchBlock | undefined;
if (subStep) {
subStep.reading.push(...resultChunks);
additionalConfig.session.updateBlock(additionalConfig.researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: researchBlock.data.subSteps },
]);
}
}
};
await Promise.all(input.queries.map(search));
return { type: 'search_results', results };
},
};
export default webSearchAction;

View File

@@ -0,0 +1,39 @@
import z from 'zod';
import type { LlmClient } from '../llm-client.js';
import { getClassifierPrompt } from '../prompts/classifier.js';
import formatChatHistoryAsString from '../utils/formatHistory.js';
const schema = z.object({
classification: z.object({
skipSearch: z.boolean(),
personalSearch: z.boolean(),
academicSearch: z.boolean(),
discussionSearch: z.boolean(),
showWeatherWidget: z.boolean(),
showStockWidget: z.boolean(),
showCalculationWidget: z.boolean(),
}),
standaloneFollowUp: z.string(),
});
export type ClassifierInput = {
chatHistory: { role: string; content: string }[];
query: string;
llm: LlmClient;
locale?: string;
enabledSources: ('web' | 'discussions' | 'academic')[];
};
export async function classify(input: ClassifierInput): Promise<z.infer<typeof schema>> {
const output = await input.llm.generateObject<z.infer<typeof schema>>({
messages: [
{ role: 'system', content: getClassifierPrompt(input.locale) },
{
role: 'user',
content: `<conversation_history>\n${formatChatHistoryAsString(input.chatHistory)}\n</conversation_history>\n<user_query>\n${input.query}\n</user_query>`,
},
],
schema,
});
return output;
}

View File

@@ -0,0 +1,195 @@
import z from 'zod';
import type { LlmClient } from '../llm-client.js';
import type SessionManager from '../session.js';
import type { Chunk, ReasoningResearchBlock } from '../types.js';
import type { ClassifierOutput } from '../actions/types.js';
import { getResearcherPrompt } from '../prompts/researcher.js';
import { getAvailableActionTools, getAvailableActionsDescriptions, executeAll } from '../actions/registry.js';
import formatChatHistoryAsString from '../utils/formatHistory.js';
export type ResearcherConfig = {
mode: 'speed' | 'balanced' | 'quality';
sources: ('web' | 'discussions' | 'academic')[];
fileIds: string[];
locale?: string;
};
export type ResearcherInput = {
chatHistory: { role: string; content: string }[];
followUp: string;
classification: ClassifierOutput;
config: ResearcherConfig;
};
export type ResearcherOutput = {
searchFindings: Chunk[];
};
export type ToolCall = { id: string; name: string; arguments: Record<string, unknown> };
export async function research(
session: SessionManager,
llm: LlmClient,
input: ResearcherInput,
): Promise<ResearcherOutput> {
const maxIteration = input.config.mode === 'speed' ? 2 : input.config.mode === 'balanced' ? 6 : 25;
const actionConfig = {
classification: input.classification,
fileIds: input.config.fileIds,
mode: input.config.mode,
sources: input.config.sources,
hasEmbedding: false,
};
const availableTools = getAvailableActionTools(actionConfig);
const availableActionsDescription = getAvailableActionsDescriptions(actionConfig);
const researchBlockId = crypto.randomUUID();
session.emitBlock({
id: researchBlockId,
type: 'research',
data: { subSteps: [] },
});
const agentMessageHistory: { role: string; content: string; tool_calls?: ToolCall[] }[] = [
{
role: 'user',
content: `<conversation>\n${formatChatHistoryAsString(input.chatHistory.slice(-10))}\nUser: ${input.followUp} (Standalone: ${input.classification.standaloneFollowUp})\n</conversation>`,
},
];
const actionOutput: { type: string; results?: Chunk[] }[] = [];
for (let i = 0; i < maxIteration; i++) {
const researcherPrompt = getResearcherPrompt(
availableActionsDescription,
input.config.mode,
i,
maxIteration,
'',
input.config.locale,
);
const toolsForLlm = availableTools.map((t) => ({
name: t.name,
description: t.description,
schema: t.schema as z.ZodObject<Record<string, z.ZodTypeAny>>,
}));
const actionStream = llm.streamText({
messages: [{ role: 'system', content: researcherPrompt }, ...agentMessageHistory],
tools: toolsForLlm,
});
const block = session.getBlock(researchBlockId);
let reasoningEmitted = false;
let reasoningId = crypto.randomUUID();
const finalToolCalls: ToolCall[] = [];
for await (const partialRes of actionStream) {
if (partialRes.toolCallChunk?.length) {
for (const tc of partialRes.toolCallChunk) {
if (
tc.name === '__reasoning_preamble' &&
tc.arguments?.plan &&
!reasoningEmitted &&
block &&
block.type === 'research'
) {
reasoningEmitted = true;
(block.data.subSteps as ResearchBlockSubStep[]).push({
id: reasoningId,
type: 'reasoning',
reasoning: String(tc.arguments.plan),
});
session.updateBlock(researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: block.data.subSteps },
]);
} else if (
tc.name === '__reasoning_preamble' &&
tc.arguments?.plan &&
reasoningEmitted &&
block &&
block.type === 'research'
) {
const subStepIndex = block.data.subSteps.findIndex((s) => s.id === reasoningId);
if (subStepIndex !== -1) {
const subStep = block.data.subSteps[subStepIndex] as ReasoningResearchBlock;
subStep.reasoning = String(tc.arguments.plan);
session.updateBlock(researchBlockId, [
{ op: 'replace', path: '/data/subSteps', value: block.data.subSteps },
]);
}
}
const existingIndex = finalToolCalls.findIndex((ftc) => ftc.id === tc.id);
if (existingIndex !== -1) {
finalToolCalls[existingIndex].arguments = { ...finalToolCalls[existingIndex].arguments, ...tc.arguments };
} else {
finalToolCalls.push({ id: tc.id, name: tc.name, arguments: tc.arguments ?? {} });
}
}
}
}
if (finalToolCalls.length === 0) break;
if (finalToolCalls[finalToolCalls.length - 1].name === 'done') break;
agentMessageHistory.push({
role: 'assistant',
content: '',
tool_calls: finalToolCalls,
});
const results = await executeAll(finalToolCalls, {
session,
researchBlockId,
fileIds: input.config.fileIds,
});
actionOutput.push(...results);
for (let j = 0; j < finalToolCalls.length; j++) {
agentMessageHistory.push({
role: 'tool',
content: JSON.stringify(results[j]),
id: finalToolCalls[j].id,
name: finalToolCalls[j].name,
} as { role: string; content: string; id: string; name: string });
}
}
const searchResults = actionOutput
.filter((a) => a.type === 'search_results' && a.results)
.flatMap((a) => a.results as Chunk[]);
const seenUrls = new Map<string, number>();
const filteredSearchResults = searchResults
.map((result, index) => {
const url = result.metadata?.url as string | undefined;
if (url && !seenUrls.has(url)) {
seenUrls.set(url, index);
return result;
}
if (url && seenUrls.has(url)) {
const existingIndex = seenUrls.get(url)!;
const existing = searchResults[existingIndex];
existing.content += `\n\n${result.content}`;
return undefined;
}
return result;
})
.filter((r): r is Chunk => r !== undefined);
session.emitBlock({
id: crypto.randomUUID(),
type: 'source',
data: filteredSearchResults,
});
return { searchFindings: filteredSearchResults };
}
type ResearchBlockSubStep = { id: string; type: string; reasoning?: string; reading?: Chunk[]; searching?: string[] };

View File

@@ -0,0 +1,156 @@
import type { LlmClient } from '../llm-client.js';
import SessionManager from '../session.js';
import type { TextBlock } from '../types.js';
import type { ClassifierOutput } from '../actions/types.js';
import { getClassifierPrompt } from '../prompts/classifier.js';
import { getWriterPrompt } from '../prompts/writer.js';
import { classify } from './classifier.js';
import { research } from './researcher.js';
import { executeAllWidgets } from '../widgets/index.js';
export type SearchOrchestratorConfig = {
llm: LlmClient;
mode: 'speed' | 'balanced' | 'quality';
sources: ('web' | 'discussions' | 'academic')[];
fileIds: string[];
systemInstructions: string;
locale?: string;
memoryContext?: string;
answerMode?: import('../prompts/writer.js').AnswerMode;
responsePrefs?: { format?: string; length?: string; tone?: string };
learningMode?: boolean;
};
export type SearchOrchestratorInput = {
chatHistory: { role: string; content: string }[];
followUp: string;
config: SearchOrchestratorConfig;
};
export async function runSearchOrchestrator(
session: SessionManager,
input: SearchOrchestratorInput,
): Promise<void> {
const { chatHistory, followUp, config } = input;
const classification = await classify({
chatHistory,
query: followUp,
llm: config.llm,
locale: config.locale,
enabledSources: config.sources,
});
const widgetPromise = executeAllWidgets({
chatHistory,
followUp,
classification,
llm: config.llm,
}).then((outputs) => {
for (const o of outputs) {
session.emitBlock({
id: crypto.randomUUID(),
type: 'widget',
data: { widgetType: o.type, params: o.data ?? {} },
});
}
return outputs;
});
let searchPromise: Promise<{ searchFindings: import('../types.js').Chunk[] }> | null = null;
if (!classification.classification.skipSearch) {
searchPromise = research(session, config.llm, {
chatHistory,
followUp,
classification,
config: {
mode: config.mode,
sources: config.sources,
fileIds: config.fileIds,
locale: config.locale,
},
});
}
const [widgetOutputs, searchResults] = await Promise.all([widgetPromise, searchPromise ?? Promise.resolve({ searchFindings: [] })]);
session.emit('data', { type: 'researchComplete' });
const MAX_RESULTS_FOR_WRITER = 15;
const MAX_CONTENT_PER_RESULT = 180;
const findingsForWriter = (searchResults?.searchFindings ?? []).slice(0, MAX_RESULTS_FOR_WRITER);
const finalContext =
findingsForWriter
.map((f, index) => {
const content = f.content.length > MAX_CONTENT_PER_RESULT ? f.content.slice(0, MAX_CONTENT_PER_RESULT) + '…' : f.content;
return `<result index=${index + 1} title="${String(f.metadata?.title ?? '').replace(/"/g, "'")}">${content}</result>`;
})
.join('\n') || '';
const widgetContext = widgetOutputs
.map((o) => `<result>${o.llmContext}</result>`)
.join('\n-------------\n');
const finalContextWithWidgets =
`<search_results note="These are the search results and assistant can cite these">\n${finalContext}\n</search_results>\n` +
`<widgets_result noteForAssistant="Its output is already showed to the user, assistant can use this information to answer the query but do not CITE this as a source">\n${widgetContext}\n</widgets_result>`;
const writerPrompt = getWriterPrompt(
finalContextWithWidgets,
config.systemInstructions,
config.mode,
config.locale,
config.memoryContext,
config.answerMode,
config.responsePrefs,
config.learningMode,
);
const answerStream = config.llm.streamText({
messages: [
{ role: 'system', content: writerPrompt },
...chatHistory,
{ role: 'user', content: followUp },
],
options: { maxTokens: 4096 },
});
let responseBlockId = '';
let hasContent = false;
for await (const chunk of answerStream) {
if (!chunk.contentChunk && !responseBlockId) continue;
if (!responseBlockId) {
const block: TextBlock = {
id: crypto.randomUUID(),
type: 'text',
data: chunk.contentChunk ?? '',
};
session.emitBlock(block);
responseBlockId = block.id;
if (chunk.contentChunk) hasContent = true;
} else {
const block = session.getBlock(responseBlockId) as TextBlock | null;
if (block) {
block.data += chunk.contentChunk ?? '';
if (chunk.contentChunk) hasContent = true;
session.updateBlock(block.id, [{ op: 'replace', path: '/data', value: block.data }]);
}
}
}
if (!hasContent && findingsForWriter.length > 0) {
const lines = findingsForWriter.slice(0, 10).map((f, i) => {
const title = (f.metadata?.title as string) ?? 'Без названия';
const excerpt = f.content.length > 120 ? f.content.slice(0, 120) + '…' : f.content;
return `${i + 1}. **${title}** — ${excerpt}`;
});
session.emitBlock({
id: crypto.randomUUID(),
type: 'text',
data: `## По найденным источникам\n\n${lines.join('\n\n')}\n\n*Ответ LLM недоступен. Проверьте модель в Settings.*`,
});
}
session.emit('end', {});
}

View File

@@ -0,0 +1,37 @@
/**
* EmbeddingClient — HTTP-клиент к llm-svc для эмбеддингов
*/
const LLM_SVC_URL = process.env.LLM_SVC_URL ?? '';
export interface EmbeddingClient {
embedText(texts: string[]): Promise<number[][]>;
embedChunks(chunks: { content: string; metadata: Record<string, unknown> }[]): Promise<number[][]>;
}
function getBaseUrl(): string {
if (!LLM_SVC_URL) throw new Error('LLM_SVC_URL is required for EmbeddingClient');
return LLM_SVC_URL.replace(/\/$/, '');
}
export function createEmbeddingClient(model: { providerId: string; key: string }): EmbeddingClient {
const base = getBaseUrl();
return {
async embedText(texts: string[]): Promise<number[][]> {
if (texts.length === 0) return [];
const res = await fetch(`${base}/api/v1/embeddings`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model, texts }),
signal: AbortSignal.timeout(60000),
});
if (!res.ok) throw new Error(`llm-svc embeddings failed: ${res.status} ${await res.text()}`);
const data = (await res.json()) as { embeddings: number[][] };
return data.embeddings;
},
async embedChunks(chunks: { content: string; metadata: Record<string, unknown> }[]): Promise<number[][]> {
const texts = chunks.map((c) => c.content);
return this.embedText(texts);
},
};
}

View File

@@ -0,0 +1,100 @@
/**
* LlmClient — HTTP-клиент к llm-svc для генерации
*/
import z from 'zod';
export type Message = { role: string; content: string; id?: string; name?: string; tool_calls?: { id: string; name: string; arguments: Record<string, unknown> }[] };
export type ToolCall = { id: string; name: string; arguments: Record<string, unknown> };
const LLM_SVC_URL = process.env.LLM_SVC_URL ?? '';
export type GenerateTextInput = {
messages: Message[];
tools?: { name: string; description: string; schema: z.ZodObject<Record<string, z.ZodTypeAny>> }[];
options?: { maxTokens?: number; temperature?: number };
};
export type GenerateTextOutput = { content: string; toolCalls: ToolCall[] };
export type StreamTextOutput = { contentChunk: string; toolCallChunk: ToolCall[]; done?: boolean };
export interface LlmClient {
generateText(input: GenerateTextInput): Promise<GenerateTextOutput>;
streamText(input: GenerateTextInput): AsyncGenerator<StreamTextOutput>;
generateObject<T>(input: { schema: z.ZodTypeAny; messages: Message[]; options?: object }): Promise<T>;
}
function getBaseUrl(): string {
if (!LLM_SVC_URL) throw new Error('LLM_SVC_URL required');
return LLM_SVC_URL.replace(/\/$/, '');
}
function serializeTool(t: NonNullable<GenerateTextInput['tools']>[0]) {
return { name: t.name, description: t.description, schema: z.toJSONSchema(t.schema) as Record<string, unknown> };
}
export function createLlmClient(model: { providerId: string; key: string }): LlmClient {
const base = getBaseUrl();
return {
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
const res = await fetch(`${base}/api/v1/generate`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model, messages: input.messages, tools: input.tools?.map(serializeTool), options: input.options }),
signal: AbortSignal.timeout(120000),
});
if (!res.ok) throw new Error(`llm-svc generate failed: ${res.status} ${await res.text()}`);
return res.json() as Promise<GenerateTextOutput>;
},
async *streamText(input: GenerateTextInput): AsyncGenerator<StreamTextOutput> {
const res = await fetch(`${base}/api/v1/generate/stream`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model, messages: input.messages, tools: input.tools?.map(serializeTool), options: input.options }),
signal: AbortSignal.timeout(120000),
});
if (!res.ok) throw new Error(`llm-svc stream failed: ${res.status} ${await res.text()}`);
const reader = res.body?.getReader();
if (!reader) throw new Error('No response body');
const decoder = new TextDecoder();
let buffer = '';
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() ?? '';
for (const line of lines) {
if (!line.trim()) continue;
try {
const parsed = JSON.parse(line) as StreamTextOutput | { error?: string };
if ('error' in parsed && parsed.error) throw new Error(parsed.error);
yield parsed as StreamTextOutput;
} catch (e) {
if (!(e instanceof SyntaxError)) throw e;
}
}
}
if (buffer.trim()) {
try {
const parsed = JSON.parse(buffer) as StreamTextOutput | { error?: string };
if ('error' in parsed && parsed.error) throw new Error(parsed.error);
yield parsed as StreamTextOutput;
} catch (e) {
if (!(e instanceof SyntaxError)) throw e;
}
}
},
async generateObject<T>(input: { schema: z.ZodTypeAny; messages: Message[]; options?: object }): Promise<T> {
const res = await fetch(`${base}/api/v1/generate/object`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model, messages: input.messages, schema: z.toJSONSchema(input.schema), options: input.options }),
signal: AbortSignal.timeout(60000),
});
if (!res.ok) throw new Error(`llm-svc generateObject failed: ${res.status} ${await res.text()}`);
const data = (await res.json()) as { object: T };
return data.object;
},
};
}

View File

@@ -0,0 +1,64 @@
import { getLocaleInstruction } from './locale.js';
const baseClassifierPrompt = `
<role>
Assistant is an advanced AI system designed to analyze the user query and the conversation history to determine the most appropriate classification for the search operation.
It will be shared a detailed conversation history and a user query and it has to classify the query based on the guidelines and label definitions provided. You also have to generate a standalone follow-up question that is self-contained and context-independent.
</role>
<labels>
NOTE: BY GENERAL KNOWLEDGE WE MEAN INFORMATION THAT IS OBVIOUS, WIDELY KNOWN, OR CAN BE INFERRED WITHOUT EXTERNAL SOURCES FOR EXAMPLE MATHEMATICAL FACTS, BASIC SCIENTIFIC KNOWLEDGE, COMMON HISTORICAL EVENTS, ETC.
1. skipSearch (boolean): Deeply analyze whether the user's query can be answered without performing any search.
- Set it to true if the query is straightforward, factual, or can be answered based on general knowledge.
- Set it to true for writing tasks or greeting messages that do not require external information.
- Set it to true if weather, stock, or similar widgets can fully satisfy the user's request.
- Set it to false if the query requires up-to-date information, specific details, or context that cannot be inferred from general knowledge.
- ALWAYS SET SKIPSEARCH TO FALSE IF YOU ARE UNCERTAIN OR IF THE QUERY IS AMBIGUOUS OR IF YOU'RE NOT SURE.
2. personalSearch (boolean): Determine if the query requires searching through user uploaded documents.
- Set it to true if the query explicitly references or implies the need to access user-uploaded documents.
- Set it to false if the query does not reference user-uploaded documents or if the information can be obtained through general web search.
- ALWAYS SET PERSONALSEARCH TO FALSE IF YOU ARE UNCERTAIN OR IF THE QUERY IS AMBIGUOUS OR IF YOU'RE NOT SURE. AND SET SKIPSEARCH TO FALSE AS WELL.
3. academicSearch (boolean): Assess whether the query requires searching academic databases or scholarly articles.
- Set it to true if the query explicitly requests scholarly information, research papers, academic articles, or citations.
- Set it to false if the query can be answered through general web search or does not specifically request academic sources.
4. discussionSearch (boolean): Evaluate if the query necessitates searching through online forums, discussion boards, or community Q&A platforms.
- Set it to true if the query seeks opinions, personal experiences, community advice, or discussions.
- Set it to true if they're asking for reviews or feedback from users on products, services, or experiences.
- Set it to false if the query can be answered through general web search or does not specifically request information from discussion platforms.
5. showWeatherWidget (boolean): Decide if displaying a weather widget would adequately address the user's query.
- Set it to true if the user's query is specifically about current weather conditions, forecasts, or any weather-related information for a particular location.
- If it can fully answer the user query without needing additional search, set skipSearch to true as well.
6. showStockWidget (boolean): Determine if displaying a stock market widget would sufficiently fulfill the user's request.
- Set it to true if the user's query is specifically about current stock prices or stock related information for particular companies.
- If it can fully answer the user query without needing additional search, set skipSearch to true as well.
7. showCalculationWidget (boolean): Decide if displaying a calculation widget would adequately address the user's query.
- Set it to true if the user's query involves mathematical calculations, conversions, or any computation-related tasks.
- If it can fully answer the user query without needing additional search, set skipSearch to true as well.
</labels>
<standalone_followup>
For the standalone follow up, you have to generate a self contained, context independant reformulation of the user's query.
Rephrase the user's query in a way that it can be understood without any prior context from the conversation history.
The standalone follow-up should be concise and to the point.
</standalone_followup>
<output_format>
You must respond in the following JSON format without any extra text, explanations or filler sentences:
{
"classification": {
"skipSearch": boolean,
"personalSearch": boolean,
"academicSearch": boolean,
"discussionSearch": boolean,
"showWeatherWidget": boolean,
"showStockWidget": boolean,
"showCalculationWidget": boolean,
},
"standaloneFollowUp": string
}
</output_format>
`;
export function getClassifierPrompt(locale?: string): string {
return baseClassifierPrompt + getLocaleInstruction(locale);
}

View File

@@ -0,0 +1,19 @@
const LOCALE_TO_LANGUAGE: Record<string, string> = {
ru: 'Russian', en: 'English', de: 'German', fr: 'French', es: 'Spanish',
it: 'Italian', pt: 'Portuguese', uk: 'Ukrainian', pl: 'Polish', zh: 'Chinese',
ja: 'Japanese', ko: 'Korean', ar: 'Arabic', tr: 'Turkish', be: 'Belarusian',
kk: 'Kazakh', sv: 'Swedish', nb: 'Norwegian', da: 'Danish', fi: 'Finnish',
cs: 'Czech', sk: 'Slovak', hu: 'Hungarian', ro: 'Romanian', bg: 'Bulgarian',
hr: 'Croatian', sr: 'Serbian', el: 'Greek', hi: 'Hindi', th: 'Thai',
vi: 'Vietnamese', id: 'Indonesian', ms: 'Malay', he: 'Hebrew', fa: 'Persian',
};
export function getLocaleInstruction(locale?: string): string {
if (!locale) return '';
const lang = locale.split('-')[0];
const languageName = LOCALE_TO_LANGUAGE[lang] ?? lang;
return `
<response_language>
User's locale is ${locale}. Always format your response in ${languageName}, regardless of the language of the query or search results. Even when the discussed content is in another language, respond in ${languageName}.
</response_language>`;
}

View File

@@ -0,0 +1,149 @@
import { getLocaleInstruction } from './locale.js';
type Mode = 'speed' | 'balanced' | 'quality';
function getSpeedPrompt(actionDesc: string, i: number, maxIteration: number, fileDesc: string): string {
const today = new Date().toLocaleDateString('en-US', { year: 'numeric', month: 'long', day: 'numeric' });
return `
Assistant is an action orchestrator. Your job is to fulfill user requests by selecting and executing the available tools—no free-form replies.
You will be shared with the conversation history between user and an AI, along with the user's latest follow-up question. Based on this, you must use the available tools to fulfill the user's request.
Today's date: ${today}
You are currently on iteration ${i + 1} of your research process and have ${maxIteration} total iterations so act efficiently.
When you are finished, you must call the \`done\` tool. Never output text directly.
<goal>
Fulfill the user's request as quickly as possible using the available tools.
Call tools to gather information or perform tasks as needed.
</goal>
<core_principle>
Your knowledge is outdated; if you have web search, use it to ground answers even for seemingly basic facts.
</core_principle>
<available_tools>
${actionDesc}
</available_tools>
<response_protocol>
- NEVER output normal text to the user. ONLY call tools.
- Choose the appropriate tools based on the action descriptions provided above.
- Default to web_search when information is missing or stale; keep queries targeted (max 3 per call).
- Call done when you have gathered enough to answer or performed the required actions.
- Do not invent tools. Do not return JSON.
</response_protocol>
${fileDesc ? `<user_uploaded_files>\n${fileDesc}\n</user_uploaded_files>` : ''}
`;
}
function getBalancedPrompt(actionDesc: string, i: number, maxIteration: number, fileDesc: string): string {
const today = new Date().toLocaleDateString('en-US', { year: 'numeric', month: 'long', day: 'numeric' });
return `
Assistant is an action orchestrator. Your job is to fulfill user requests by reasoning briefly and executing the available tools—no free-form replies.
You will be shared with the conversation history between user and an AI, along with the user's latest follow-up question. Based on this, you must use the available tools to fulfill the user's request.
Today's date: ${today}
You are currently on iteration ${i + 1} of your research process and have ${maxIteration} total iterations so act efficiently.
When you are finished, you must call the \`done\` tool. Never output text directly.
<goal>
Fulfill the user's request with concise reasoning plus focused actions.
You MUST call the __reasoning_preamble tool before every tool call in this assistant turn. Alternate: __reasoning_preamble → tool → __reasoning_preamble → tool ... and finish with __reasoning_preamble → done. Open each __reasoning_preamble with a brief intent phrase (e.g., "Okay, the user wants to...", "Searching for...", "Looking into...") and lay out your reasoning for the next step. Keep it natural language, no tool names.
</goal>
<core_principle>
Your knowledge is outdated; if you have web search, use it to ground answers even for seemingly basic facts.
You can call at most 6 tools total per turn: up to 2 reasoning (__reasoning_preamble counts as reasoning), 2-3 information-gathering calls, and 1 done.
Aim for at least two information-gathering calls when the answer is not already obvious.
</core_principle>
<available_tools>
YOU MUST CALL __reasoning_preamble BEFORE EVERY TOOL CALL IN THIS ASSISTANT TURN. IF YOU DO NOT CALL IT, THE TOOL CALL WILL BE IGNORED.
${actionDesc}
</available_tools>
<response_protocol>
- NEVER output normal text to the user. ONLY call tools.
- Start with __reasoning_preamble and call __reasoning_preamble before every tool call (including done).
- Choose tools based on the action descriptions provided above.
- Default to web_search when information is missing or stale; keep queries targeted (max 3 per call).
- Use at most 6 tool calls total. Do not invent tools. Do not return JSON.
</response_protocol>
${fileDesc ? `<user_uploaded_files>\n${fileDesc}\n</user_uploaded_files>` : ''}
`;
}
function getQualityPrompt(actionDesc: string, i: number, maxIteration: number, fileDesc: string): string {
const today = new Date().toLocaleDateString('en-US', { year: 'numeric', month: 'long', day: 'numeric' });
return `
Assistant is a deep-research orchestrator. Your job is to fulfill user requests with the most thorough, comprehensive research possible—no free-form replies.
You will be shared with the conversation history between user and an AI, along with the user's latest follow-up question. Based on this, you must use the available tools to fulfill the user's request with depth and rigor.
Today's date: ${today}
You are currently on iteration ${i + 1} of your research process and have ${maxIteration} total iterations. Use every iteration wisely to gather comprehensive information.
When you are finished, you must call the \`done\` tool. Never output text directly.
<goal>
Conduct the deepest, most thorough research possible. Leave no stone unturned.
Follow an iterative reason-act loop: call __reasoning_preamble before every tool call to outline the next step, then call the tool, then __reasoning_preamble again to reflect and decide the next step. Repeat until you have exhaustive coverage.
Open each __reasoning_preamble with a brief intent phrase and describe what you'll do next. Keep it natural language, no tool names.
Finish with done only when you have comprehensive, multi-angle information.
</goal>
<core_principle>
Your knowledge is outdated; always use the available tools to ground answers.
This is DEEP RESEARCH mode—be exhaustive. Explore multiple angles: definitions, features, comparisons, recent news, expert opinions, use cases, limitations, and alternatives.
You can call up to 10 tools total per turn. Use an iterative loop: __reasoning_preamble → tool call(s) → __reasoning_preamble → tool call(s) → ... → __reasoning_preamble → done.
</core_principle>
<available_tools>
YOU MUST CALL __reasoning_preamble BEFORE EVERY TOOL CALL IN THIS ASSISTANT TURN. IF YOU DO NOT CALL IT, THE TOOL CALL WILL BE IGNORED.
${actionDesc}
</available_tools>
<research_strategy>
For any topic, consider searching: Core definition/overview, Features/capabilities, Comparisons, Recent news/updates, Reviews/opinions, Use cases, Limitations/critiques.
</research_strategy>
<response_protocol>
- NEVER output normal text to the user. ONLY call tools.
- Follow an iterative loop: __reasoning_preamble → tool call → __reasoning_preamble → tool call → ... → __reasoning_preamble → done.
- Each __reasoning_preamble should reflect on previous results (if any) and state the next research step.
- Aim for 4-7 information-gathering calls covering different angles.
- Call done only after comprehensive, multi-angle research is complete.
- Do not invent tools. Do not return JSON.
</response_protocol>
${fileDesc ? `<user_uploaded_files>\n${fileDesc}\n</user_uploaded_files>` : ''}
`;
}
export function getResearcherPrompt(
actionDesc: string,
mode: Mode,
i: number,
maxIteration: number,
fileDesc: string,
locale?: string,
): string {
let prompt: string;
switch (mode) {
case 'speed':
prompt = getSpeedPrompt(actionDesc, i, maxIteration, fileDesc);
break;
case 'balanced':
prompt = getBalancedPrompt(actionDesc, i, maxIteration, fileDesc);
break;
case 'quality':
prompt = getQualityPrompt(actionDesc, i, maxIteration, fileDesc);
break;
default:
prompt = getSpeedPrompt(actionDesc, i, maxIteration, fileDesc);
}
return prompt + getLocaleInstruction(locale);
}

View File

@@ -0,0 +1,92 @@
import { getLocaleInstruction } from './locale.js';
export type AnswerMode =
| 'standard' | 'focus' | 'academic' | 'writing' | 'travel' | 'finance'
| 'health' | 'education' | 'medicine' | 'realEstate' | 'psychology' | 'sports'
| 'children' | 'goods' | 'shopping' | 'games' | 'taxes' | 'legislation';
export type ResponsePrefs = { format?: string; length?: string; tone?: string };
const VERTICAL_BLOCKS: Partial<Record<AnswerMode, string>> = {
travel: `### Answer Mode: Travel\nPrioritize: destinations, itineraries, hotels, transport, practical tips. Format: clear sections (Where to stay, What to see, Getting there).\n`,
finance: `### Answer Mode: Finance\nPrioritize: market data, company analysis, financial metrics. Cite sources for numbers.\n`,
health: `### Answer Mode: Health\nPrioritize: wellness, medicine, nutrition, fitness, mental health. Cite medical sources.\n`,
education: `### Answer Mode: Education\nPrioritize: learning, courses, pedagogy, academic resources.\n`,
medicine: `### Answer Mode: Medicine\nPrioritize: clinical info, treatments, diagnostics. Cite medical sources.\n`,
academic: `### Answer Mode: Academic\nPrioritize: scholarly sources, citations, research-based answers.\n`,
writing: `### Answer Mode: Writing\nPrioritize: clear structure, engaging prose, well-cited content.\n`,
};
export function getWriterPrompt(
context: string,
systemInstructions: string,
mode: 'speed' | 'balanced' | 'quality',
locale?: string,
memoryContext?: string,
answerMode?: AnswerMode,
responsePrefs?: ResponsePrefs,
learningMode?: boolean,
): string {
const memoryBlock = memoryContext?.trim()
? `\n### User memory (personalization)\nUse these stored facts/preferences to personalize when relevant. Do NOT cite as source.\n${memoryContext}\n`
: '';
const verticalBlock = answerMode ? (VERTICAL_BLOCKS[answerMode] ?? '') : '';
const prefs: string[] = [];
if (responsePrefs?.format) {
const f = responsePrefs.format;
if (f === 'bullets') prefs.push('Format: use bullet points where appropriate.');
else if (f === 'outline') prefs.push('Format: use clear headings and outline structure.');
else prefs.push('Format: use paragraphs and flowing prose.');
}
if (responsePrefs?.length) {
const l = responsePrefs.length;
if (l === 'short') prefs.push('Length: keep response concise and brief.');
else if (l === 'long') prefs.push('Length: provide comprehensive, detailed coverage.');
else prefs.push('Length: medium depth, balanced.');
}
if (responsePrefs?.tone) {
const t = responsePrefs.tone;
if (t === 'professional') prefs.push('Tone: formal, professional.');
else if (t === 'casual') prefs.push('Tone: friendly, conversational.');
else if (t === 'concise') prefs.push('Tone: direct, to the point.');
else prefs.push('Tone: neutral.');
}
const prefsBlock = prefs.length ? `\n### Response preferences\n${prefs.join(' ')}\n` : '';
const learningBlock = learningMode
? `\n### Step-by-step Learning mode\nExplain your reasoning step-by-step. Break down complex concepts. Show the logical flow. Use numbered steps or "First... Then... Finally" structure.\n`
: '';
return `
You are GooSeek, an AI model skilled in web search and crafting detailed, engaging, and well-structured answers.
Your task is to provide answers that are:
- **Informative and relevant**: Thoroughly address the user's query using the given context.
- **Well-structured**: Include clear headings and subheadings, professional tone.
- **Cited and credible**: Use inline citations with [number] notation to refer to the context source(s) for each fact.
- **Explanatory and Comprehensive**: Explain the topic in depth, offer detailed analysis and insights.
### Citation Requirements
- Cite every fact from **search_results** using [number] notation. Citations [1], [2], etc. refer ONLY to sources in search_results.
- **widgets_result** (calculations, weather, stock data) — use this to answer directly, do NOT cite it.
- Integrate citations naturally at the end of sentences.
### Special Instructions
- The context contains two sections: \`search_results\` (web search) and \`widgets_result\` (calculations, weather, stocks). If widgets_result has the answer, USE IT.
- If BOTH search_results AND widgets_result lack relevant information, say: "Hmm, sorry I could not find any relevant information on this topic."
${mode === 'quality' ? "- QUALITY MODE: Generate very deep, detailed responses. At least 2000 words, cover everything like a research report." : ''}
${verticalBlock}${prefsBlock}${learningBlock}
### User instructions
${systemInstructions}
${memoryBlock}
<context>
${context}
</context>
Current date & time (UTC): ${new Date().toISOString()}.
${getLocaleInstruction(locale)}
`;
}

View File

@@ -0,0 +1,67 @@
const SEARCH_SVC_URL = process.env.SEARCH_SVC_URL?.trim() ?? '';
const SEARXNG_URL = process.env.SEARXNG_URL?.trim() ?? '';
const FALLBACK = (process.env.SEARXNG_FALLBACK_URL ?? 'https://searx.tiekoetter.com,https://search.sapti.me')
.split(',')
.map((u) => u.trim())
.filter(Boolean);
export interface SearxngSearchResult {
title: string;
url: string;
content?: string;
}
interface SearxngSearchOptions {
engines?: string[];
categories?: string[];
}
function buildSearchUrl(baseUrl: string, query: string, opts?: SearxngSearchOptions): string {
const params = new URLSearchParams();
params.append('format', 'json');
params.append('q', query);
if (opts?.engines) params.append('engines', opts.engines.join(','));
if (opts?.categories) params.append('categories', Array.isArray(opts.categories) ? opts.categories.join(',') : opts.categories);
const base = baseUrl.trim().replace(/\/$/, '');
const prefix = /^https?:\/\//i.test(base) ? '' : 'http://';
return `${prefix}${base}/search?${params.toString()}`;
}
export async function searchSearxng(
query: string,
opts?: SearxngSearchOptions,
): Promise<{ results: SearxngSearchResult[]; suggestions?: string[] }> {
if (SEARCH_SVC_URL) {
const params = new URLSearchParams();
params.set('q', query);
if (opts?.engines) params.set('engines', opts.engines.join(','));
const url = `${SEARCH_SVC_URL.replace(/\/$/, '')}/api/v1/search?${params.toString()}`;
const res = await fetch(url, { signal: AbortSignal.timeout(15000) });
if (!res.ok) throw new Error(`Search HTTP ${res.status}`);
return res.json() as Promise<{ results: SearxngSearchResult[]; suggestions?: string[] }>;
}
const candidates: string[] = [];
if (SEARXNG_URL?.trim()) {
let u = SEARXNG_URL.trim().replace(/\/$/, '');
if (!/^https?:\/\//i.test(u)) u = `http://${u}`;
candidates.push(u);
}
FALLBACK.forEach((u) => {
const t = u.trim().replace(/\/$/, '');
if (t && !candidates.includes(t)) candidates.push(t);
});
let lastError: Error | null = null;
for (const baseUrl of candidates) {
try {
const url = buildSearchUrl(baseUrl, query, opts);
const res = await fetch(url, { signal: AbortSignal.timeout(15000) });
const data = (await res.json()) as { results?: SearxngSearchResult[]; suggestions?: string[] };
return { results: data.results ?? [], suggestions: data.suggestions };
} catch (err) {
lastError = err instanceof Error ? err : new Error(String(err));
}
}
throw lastError ?? new Error('SearXNG not configured');
}

View File

@@ -0,0 +1,68 @@
import { EventEmitter } from 'node:events';
import { applyPatch, type Operation } from 'rfc6902';
import type { Block } from './types.js';
const sessions = new Map<string, SessionManager>();
class SessionManager {
readonly id: string;
private blocks = new Map<string, Block>();
private events: { event: string; data: unknown }[] = [];
private emitter = new EventEmitter();
constructor(id?: string) {
this.id = id ?? crypto.randomUUID();
}
static createSession(): SessionManager {
const s = new SessionManager();
sessions.set(s.id, s);
return s;
}
emit(event: string, data: unknown) {
this.emitter.emit(event, data);
this.events.push({ event, data });
}
emitBlock(block: Block) {
this.blocks.set(block.id, block);
this.emit('data', { type: 'block', block });
}
getBlock(blockId: string): Block | undefined {
return this.blocks.get(blockId);
}
updateBlock(blockId: string, patch: Operation[]) {
const block = this.blocks.get(blockId);
if (block) {
applyPatch(block, patch);
this.blocks.set(blockId, block);
this.emit('data', { type: 'updateBlock', blockId, patch });
}
}
getAllBlocks(): Block[] {
return Array.from(this.blocks.values());
}
subscribe(listener: (event: string, data: unknown) => void): () => void {
const handler = (event: string) => (data: unknown) => listener(event, data);
this.emitter.on('data', handler('data'));
this.emitter.on('end', handler('end'));
this.emitter.on('error', handler('error'));
for (const { event, data } of this.events) listener(event, data);
return () => {
this.emitter.off('data', handler('data'));
this.emitter.off('end', handler('end'));
this.emitter.off('error', handler('error'));
};
}
removeAllListeners() {
this.emitter.removeAllListeners();
}
}
export default SessionManager;

View File

@@ -1,11 +1,47 @@
export type UserMessage = { role: 'user'; content: string };
export type AssistantMessage = {
role: 'assistant';
content: string;
tool_calls?: { id: string; name: string; arguments: Record<string, unknown> }[];
};
export type SystemMessage = { role: 'system'; content: string };
export type ToolMessage = { role: 'tool'; id: string; name: string; content: string };
export type Message =
| { role: 'user'; content: string }
| { role: 'assistant'; content: string; tool_calls?: { id: string; name: string; arguments: Record<string, unknown> }[] }
| { role: 'system'; content: string }
| { role: 'tool'; id: string; name: string; content: string };
export type Message = UserMessage | AssistantMessage | SystemMessage | ToolMessage;
export type ChatTurnMessage = UserMessage | AssistantMessage;
export type ToolMessage = Extract<Message, { role: 'tool' }>;
export type Chunk = { content: string; metadata: Record<string, unknown> };
export type TextBlock = { id: string; type: 'text'; data: string };
export type SourceBlock = { id: string; type: 'source'; data: Chunk[] };
export type WidgetBlock = { id: string; type: 'widget'; data: { widgetType: string; params: Record<string, unknown> } };
export type ReasoningResearchBlock = { id: string; type: 'reasoning'; reasoning: string };
export type SearchingResearchBlock = { id: string; type: 'searching'; searching: string[] };
export type SearchResultsResearchBlock = { id: string; type: 'search_results'; reading: Chunk[] };
export type ReadingResearchBlock = { id: string; type: 'reading'; reading: Chunk[] };
export type UploadSearchingResearchBlock = { id: string; type: 'upload_searching'; queries: string[] };
export type UploadSearchResultsResearchBlock = { id: string; type: 'upload_search_results'; results: Chunk[] };
export type ResearchBlockSubStep =
| ReasoningResearchBlock
| SearchingResearchBlock
| SearchResultsResearchBlock
| ReadingResearchBlock
| UploadSearchingResearchBlock
| UploadSearchResultsResearchBlock;
export type ResearchBlock = { id: string; type: 'research'; data: { subSteps: ResearchBlockSubStep[] } };
export type Block = TextBlock | SourceBlock | WidgetBlock | ResearchBlock;
export type SearchMode = 'speed' | 'balanced' | 'quality';
export type SearchSources = 'web' | 'discussions' | 'academic';
export type ClassifierOutput = {
classification: {
skipSearch: boolean;
personalSearch: boolean;
academicSearch: boolean;
discussionSearch: boolean;
showWeatherWidget: boolean;
showStockWidget: boolean;
showCalculationWidget: boolean;
};
standaloneFollowUp: string;
};

View File

@@ -0,0 +1,7 @@
function formatChatHistoryAsString(history: { role: string; content: string }[]): string {
return history
.map((msg) => `${msg.role === 'assistant' ? 'AI' : 'User'}: ${msg.content}`)
.join('\n');
}
export default formatChatHistoryAsString;

View File

@@ -0,0 +1,49 @@
import z from 'zod';
import { evaluate } from 'mathjs';
import type { Widget } from './types.js';
import formatChatHistoryAsString from '../utils/formatHistory.js';
const schema = z.object({
expression: z.string().describe('Mathematical expression to calculate.'),
notPresent: z.boolean().describe('Whether there is any need for the calculation widget.'),
});
const systemPrompt = `
You are a calculation expression extractor. Determine if there is a mathematical expression to calculate.
If there is, extract it. If not, set notPresent to true.
Respond in JSON: { "expression": string, "notPresent": boolean }
The expression must be valid for MathJS (mathjs.org).
`;
const calculationWidget: Widget = {
type: 'calculationWidget',
shouldExecute: (c) => c.classification.showCalculationWidget,
execute: async (input) => {
const output = await input.llm.generateObject<z.infer<typeof schema>>({
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: `<conversation>\n${formatChatHistoryAsString(input.chatHistory)}\n</conversation>\n<user_follow_up>\n${input.followUp}\n</user_follow_up>` },
],
schema,
});
if (output.notPresent) return;
try {
const result = evaluate(output.expression);
return {
type: 'calculation_result',
llmContext: `The result of "${output.expression}" is: ${result}`,
data: { expression: output.expression, result },
};
} catch (err) {
return {
type: 'calculation_result',
llmContext: 'Calculation failed.',
data: { expression: output.expression, error: String(err) },
};
}
},
};
export default calculationWidget;

View File

@@ -0,0 +1,26 @@
import type { Widget, WidgetInput, WidgetOutput } from './types.js';
const widgets = new Map<string, Widget>();
export function registerWidget(widget: Widget) {
widgets.set(widget.type, widget);
}
export async function executeAllWidgets(input: WidgetInput): Promise<WidgetOutput[]> {
const results: WidgetOutput[] = [];
await Promise.all(
Array.from(widgets.values()).map(async (widget) => {
try {
if (widget.shouldExecute(input.classification)) {
const output = await widget.execute(input);
if (output) results.push(output);
}
} catch (err) {
if (err instanceof Error) {
results.push({ type: widget.type, llmContext: `Error: ${err.message}`, data: { error: err.message } });
}
}
}),
);
return results;
}

View File

@@ -0,0 +1,10 @@
import weatherWidget from './weatherWidget.js';
import stockWidget from './stockWidget.js';
import calculationWidget from './calculationWidget.js';
import { registerWidget, executeAllWidgets } from './executor.js';
registerWidget(weatherWidget);
registerWidget(stockWidget);
registerWidget(calculationWidget);
export { executeAllWidgets };

View File

@@ -0,0 +1,103 @@
import z from 'zod';
import YahooFinance from 'yahoo-finance2';
import type { Widget } from './types.js';
import formatChatHistoryAsString from '../utils/formatHistory.js';
const schema = z.object({
name: z.string().describe('Stock name or ticker (e.g. Nvidia, AAPL)'),
comparisonNames: z.array(z.string()).max(3).optional().default([]),
notPresent: z.boolean().describe('Whether there is no need for the stock widget.'),
});
const systemPrompt = `
You are a stock ticker/name extractor. Determine if the user is asking about stock information and extract the stock name(s).
- If asking about a stock, extract the primary name or ticker.
- If comparing stocks, extract up to 3 comparison names in comparisonNames.
- If not stock-related, set notPresent to true.
Respond in JSON: { "name": string, "comparisonNames": string[], "notPresent": boolean }
`;
const stockWidget: Widget = {
type: 'stockWidget',
shouldExecute: (c) => c.classification.showStockWidget,
execute: async (input) => {
const output = await input.llm.generateObject<z.infer<typeof schema>>({
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: `<conversation>\n${formatChatHistoryAsString(input.chatHistory)}\n</conversation>\n<user_follow_up>\n${input.followUp}\n</user_follow_up>` },
],
schema,
});
if (output.notPresent) return;
try {
const yf = new YahooFinance({ suppressNotices: ['yahooSurvey'] });
const findings = await yf.search(output.name);
if (!findings.quotes?.length) throw new Error(`No quote for: ${output.name}`);
const ticker = findings.quotes[0].symbol as string;
const quote = await yf.quote(ticker);
if (!quote) throw new Error(`No data for: ${ticker}`);
const chart1D = await yf.chart(ticker, {
period1: new Date(Date.now() - 2 * 24 * 60 * 60 * 1000),
period2: new Date(),
interval: '5m',
}).catch(() => null);
const chartData: Record<string, { timestamps: number[]; prices: number[] } | null> = {
'1D': chart1D?.quotes ? { timestamps: chart1D.quotes.map((q) => q.date.getTime()), prices: chart1D.quotes.map((q) => q.close ?? 0) } : null,
'5D': null, '1M': null, '3M': null, '6M': null, '1Y': null, MAX: null,
};
const stockData = {
symbol: quote.symbol,
shortName: quote.shortName || quote.longName || ticker,
longName: quote.longName,
exchange: quote.fullExchangeName || quote.exchange,
currency: quote.currency,
marketState: quote.marketState,
regularMarketPrice: quote.regularMarketPrice,
regularMarketChange: quote.regularMarketChange,
regularMarketChangePercent: quote.regularMarketChangePercent,
regularMarketPreviousClose: quote.regularMarketPreviousClose,
regularMarketOpen: quote.regularMarketOpen,
regularMarketDayHigh: quote.regularMarketDayHigh,
regularMarketDayLow: quote.regularMarketDayLow,
regularMarketVolume: quote.regularMarketVolume,
averageDailyVolume3Month: quote.averageDailyVolume3Month,
marketCap: quote.marketCap,
fiftyTwoWeekLow: quote.fiftyTwoWeekLow,
fiftyTwoWeekHigh: quote.fiftyTwoWeekHigh,
trailingPE: quote.trailingPE,
forwardPE: quote.forwardPE,
dividendYield: quote.dividendYield,
earningsPerShare: quote.epsTrailingTwelveMonths,
website: quote.website,
postMarketPrice: quote.postMarketPrice,
postMarketChange: quote.postMarketChange,
postMarketChangePercent: quote.postMarketChangePercent,
preMarketPrice: quote.preMarketPrice,
preMarketChange: quote.preMarketChange,
preMarketChangePercent: quote.preMarketChangePercent,
chartData,
comparisonData: null,
};
return {
type: 'stock',
llmContext: `Current price of ${stockData.shortName} (${stockData.symbol}) is ${stockData.regularMarketPrice} ${stockData.currency}.`,
data: stockData,
};
} catch (err) {
return {
type: 'stock',
llmContext: 'Failed to fetch stock data.',
data: { error: String(err), ticker: output.name },
};
}
},
};
export default stockWidget;

View File

@@ -0,0 +1,20 @@
import type { ClassifierOutput } from '../actions/types.js';
export type WidgetInput = {
chatHistory: { role: string; content: string }[];
followUp: string;
classification: ClassifierOutput;
llm: { generateObject: <T>(input: { messages: { role: string; content: string }[]; schema: import('zod').ZodTypeAny }) => Promise<T> };
};
export type WidgetOutput = {
type: string;
llmContext: string;
data: Record<string, unknown> | null;
};
export type Widget = {
type: string;
shouldExecute: (classification: ClassifierOutput) => boolean;
execute: (input: WidgetInput) => Promise<WidgetOutput | void>;
};

View File

@@ -0,0 +1,99 @@
import z from 'zod';
import type { Widget } from './types.js';
import formatChatHistoryAsString from '../utils/formatHistory.js';
const schema = z.object({
location: z.string().describe('Human-readable location name. Leave empty if providing coordinates.'),
lat: z.number().describe('Latitude. Only use when location is empty.'),
lon: z.number().describe('Longitude. Only use when location is empty.'),
notPresent: z.boolean().describe('Whether there is no need for the weather widget.'),
});
const systemPrompt = `
You are a location extractor for weather queries. Determine if the user is asking about weather and extract the location.
- If asking about weather, extract location name OR coordinates (never both).
- If using coordinates, set location to empty string.
- If not weather-related or cannot determine location, set notPresent to true.
Respond in JSON: { "location": string, "lat": number, "lon": number, "notPresent": boolean }
`;
const weatherWidget: Widget = {
type: 'weatherWidget',
shouldExecute: (c) => c.classification.showWeatherWidget,
execute: async (input) => {
const output = await input.llm.generateObject<z.infer<typeof schema>>({
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: `<conversation>\n${formatChatHistoryAsString(input.chatHistory)}\n</conversation>\n<user_follow_up>\n${input.followUp}\n</user_follow_up>` },
],
schema,
});
if (output.notPresent) return;
try {
if (output.location) {
const locRes = await fetch(
`https://nominatim.openstreetmap.org/search?q=${encodeURIComponent(output.location)}&format=json&limit=1`,
{ headers: { 'User-Agent': 'GooSeek' } },
);
const locData = (await locRes.json()) as { lat?: string; lon?: string }[];
const loc = locData[0];
if (!loc) throw new Error(`Location not found: ${output.location}`);
const weatherRes = await fetch(
`https://api.open-meteo.com/v1/forecast?latitude=${loc.lat}&longitude=${loc.lon}&current=temperature_2m,relative_humidity_2m,weather_code&daily=weather_code,temperature_2m_max,temperature_2m_min&timezone=auto&forecast_days=7`,
{ headers: { 'User-Agent': 'GooSeek' } },
);
const weatherData = (await weatherRes.json()) as { current?: Record<string, unknown>; daily?: Record<string, unknown>; timezone?: string };
return {
type: 'weather',
llmContext: `Weather in ${output.location}: ${JSON.stringify(weatherData.current)}`,
data: {
location: output.location,
latitude: Number(loc.lat),
longitude: Number(loc.lon),
current: weatherData.current,
daily: weatherData.daily,
timezone: weatherData.timezone,
},
};
}
if (output.lat !== undefined && output.lon !== undefined) {
const [weatherRes, locRes] = await Promise.all([
fetch(
`https://api.open-meteo.com/v1/forecast?latitude=${output.lat}&longitude=${output.lon}&current=temperature_2m,relative_humidity_2m,weather_code&daily=weather_code,temperature_2m_max,temperature_2m_min&timezone=auto&forecast_days=7`,
{ headers: { 'User-Agent': 'GooSeek' } },
),
fetch(
`https://nominatim.openstreetmap.org/reverse?lat=${output.lat}&lon=${output.lon}&format=json`,
{ headers: { 'User-Agent': 'GooSeek' } },
),
]);
const weatherData = (await weatherRes.json()) as { current?: Record<string, unknown>; daily?: Record<string, unknown>; timezone?: string };
const locData = (await locRes.json()) as { display_name?: string };
return {
type: 'weather',
llmContext: `Weather in ${locData.display_name}: ${JSON.stringify(weatherData.current)}`,
data: {
location: locData.display_name,
latitude: output.lat,
longitude: output.lon,
current: weatherData.current,
daily: weatherData.daily,
timezone: weatherData.timezone,
},
};
}
} catch (err) {
return {
type: 'weather',
llmContext: 'Failed to fetch weather data.',
data: { error: String(err) },
};
}
return;
},
};
export default weatherWidget;

View File

@@ -0,0 +1,5 @@
declare module 'turndown' {
export default class TurndownService {
turndown(html: string): string;
}
}