feat: default locale Russian, geo determines language for other countries

- localization-svc: defaultLocale ru, resolveLocale only by geo
- web-svc: DEFAULT_LOCALE ru, layout lang=ru, embeddedTranslations fallback ru
- countryToLocale: default ru when no country or unknown country

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
home
2026-02-23 15:10:38 +03:00
parent 8fc82a3b90
commit cd6b7857ba
606 changed files with 26148 additions and 14297 deletions

View File

@@ -0,0 +1,25 @@
{
"name": "suggestions-svc",
"version": "1.0.0",
"private": true,
"type": "module",
"scripts": {
"dev": "tsx watch src/index.ts",
"build": "tsc",
"start": "node dist/index.js",
"lint": "eslint src --ext .ts"
},
"dependencies": {
"@fastify/cors": "^9.0.1",
"@toolsycc/json-repair": "^0.1.22",
"fastify": "^4.28.1",
"ollama": "^0.6.3",
"openai": "^6.9.0",
"zod": "^4.1.12"
},
"devDependencies": {
"@types/node": "^24.8.1",
"tsx": "^4.19.2",
"typescript": "^5.9.3"
}
}

View File

@@ -0,0 +1,57 @@
/**
* suggestions-svc — AI follow-up suggestions
* API: POST /api/v1/suggestions
*/
import Fastify from 'fastify';
import cors from '@fastify/cors';
import { z } from 'zod';
import { loadChatModel } from './lib/models/registry.js';
import { generateSuggestions } from './lib/agents/suggestions.js';
const PORT = parseInt(process.env.PORT ?? '3017', 10);
const chatModelSchema = z.object({
providerId: z.string(),
key: z.string(),
});
const bodySchema = z.object({
chatHistory: z.array(z.tuple([z.string(), z.string()])),
chatModel: chatModelSchema,
locale: z.string().optional(),
});
const app = Fastify({ logger: true });
await app.register(cors, { origin: true });
app.get('/health', async () => ({ status: 'ok' }));
app.post('/api/v1/suggestions', async (req, reply) => {
try {
const parsed = bodySchema.safeParse((req as { body?: unknown }).body);
if (!parsed.success) {
return reply.status(400).send({ error: 'Invalid body', details: parsed.error.flatten() });
}
const { chatHistory, chatModel, locale } = parsed.data;
const llm = await loadChatModel(chatModel.providerId, chatModel.key);
const history = chatHistory.map(([role, content]) => ({
role: (role === 'human' ? 'user' : 'assistant') as 'user' | 'assistant',
content,
}));
const suggestions = await generateSuggestions({ chatHistory: history, locale }, llm);
return reply.send({ suggestions });
} catch (err) {
req.log.error(err);
const msg = err instanceof Error ? err.message : 'Suggestions generation failed';
return reply.status(500).send({ error: msg });
}
});
try {
await app.listen({ port: PORT, host: '0.0.0.0' });
console.log(`suggestions-svc listening on :${PORT}`);
} catch (err) {
console.error(err);
process.exit(1);
}

View File

@@ -0,0 +1,26 @@
import z from 'zod';
import formatChatHistoryAsString from '../utils/formatHistory.js';
import { getSuggestionGeneratorPrompt } from '../prompts/suggestions.js';
import type { BaseLLM } from '../models/base/llm.js';
import type { ChatTurnMessage } from '../types.js';
type Input = { chatHistory: ChatTurnMessage[]; locale?: string };
export async function generateSuggestions(input: Input, llm: BaseLLM): Promise<string[]> {
const schema = z.object({
suggestions: z.array(z.string()).describe('List of suggested questions or prompts'),
});
const res = await llm.generateObject<{ suggestions: string[] }>({
schema,
messages: [
{ role: 'system', content: getSuggestionGeneratorPrompt(input.locale) },
{
role: 'user',
content: `<chat_history>\n${formatChatHistoryAsString(input.chatHistory)}\n</chat_history>`,
},
],
});
return res.suggestions;
}

View File

@@ -0,0 +1,37 @@
import path from 'node:path';
import fs from 'node:fs';
export type ConfigModelProvider = {
id: string;
name: string;
type: string;
chatModels: { key: string; name: string }[];
config: Record<string, unknown>;
};
function getConfigPath(): string {
return path.join(
process.env.DATA_DIR ? path.resolve(process.cwd(), process.env.DATA_DIR) : process.cwd(),
'data',
'config.json',
);
}
let cached: ConfigModelProvider[] | null = null;
export function getProviderById(id: string): ConfigModelProvider | undefined {
if (cached === null) {
const p = getConfigPath();
if (!fs.existsSync(p)) {
cached = [];
return undefined;
}
try {
const raw = JSON.parse(fs.readFileSync(p, 'utf-8'));
cached = raw.modelProviders ?? [];
} catch {
cached = [];
}
}
return cached?.find((x) => x.id === id);
}

View File

@@ -0,0 +1,11 @@
import type z from 'zod';
import type { Message } from '../../types.js';
export type GenerateObjectInput = {
schema: z.ZodTypeAny;
messages: Message[];
};
export abstract class BaseLLM {
abstract generateObject<T>(input: GenerateObjectInput): Promise<T>;
}

View File

@@ -0,0 +1,45 @@
import { Ollama } from 'ollama';
import z from 'zod';
import { repairJson } from '@toolsycc/json-repair';
import { BaseLLM } from './base/llm.js';
import type { Message } from '../types.js';
type OllamaConfig = { baseURL: string; model: string };
export class OllamaLLM extends BaseLLM {
private client: Ollama;
private model: string;
constructor(config: OllamaConfig) {
super();
this.model = config.model;
this.client = new Ollama({
host: config.baseURL || 'http://localhost:11434',
});
}
private toOllama(msg: Message): { role: 'user' | 'assistant' | 'system'; content: string } {
return {
role: msg.role === 'system' ? 'system' : msg.role === 'user' ? 'user' : 'assistant',
content: msg.content,
};
}
async generateObject<T>(input: {
schema: z.ZodTypeAny;
messages: Message[];
}): Promise<T> {
const messages = input.messages.map((m) => this.toOllama(m));
const res = await this.client.chat({
model: this.model,
messages,
format: z.toJSONSchema(input.schema),
});
const content = res.message?.content;
if (!content) throw new Error('No response from Ollama');
const parsed = JSON.parse(
repairJson(content, { extractJson: true }) as string,
);
return input.schema.parse(parsed) as T;
}
}

View File

@@ -0,0 +1,49 @@
import OpenAI from 'openai';
import { zodResponseFormat } from 'openai/helpers/zod';
import { repairJson } from '@toolsycc/json-repair';
import { BaseLLM } from './base/llm.js';
import type { Message } from '../types.js';
type OpenAIConfig = {
apiKey: string;
model: string;
baseURL?: string;
};
export class OpenAILLM extends BaseLLM {
private client: OpenAI;
private model: string;
constructor(config: OpenAIConfig) {
super();
this.model = config.model;
this.client = new OpenAI({
apiKey: config.apiKey,
baseURL: config.baseURL ?? 'https://api.openai.com/v1',
});
}
private toOpenAI(msg: Message): OpenAI.Chat.Completions.ChatCompletionMessageParam {
if (msg.role === 'system') return { role: 'system', content: msg.content };
if (msg.role === 'assistant') return { role: 'assistant', content: msg.content };
return { role: 'user', content: msg.content };
}
async generateObject<T>(input: {
schema: Parameters<typeof zodResponseFormat>[0];
messages: Message[];
}): Promise<T> {
const messages = input.messages.map((m) => this.toOpenAI(m));
const res = await this.client.chat.completions.parse({
model: this.model,
messages,
response_format: zodResponseFormat(input.schema, 'object'),
});
const content = res.choices?.[0]?.message?.content;
if (!content) throw new Error('No response from OpenAI');
const parsed = JSON.parse(
repairJson(content, { extractJson: true }) as string,
);
return input.schema.parse(parsed) as T;
}
}

View File

@@ -0,0 +1,65 @@
import { getProviderById } from '../config.js';
import { OpenAILLM } from './openai.js';
import { OllamaLLM } from './ollama.js';
import type { BaseLLM } from './base/llm.js';
function getEnvProvider(): { type: string; config: Record<string, unknown> } | null {
const p = (process.env.LLM_PROVIDER ?? '').toLowerCase();
if (p === 'ollama') {
const baseURL =
process.env.OLLAMA_BASE_URL ??
(process.env.DOCKER ? 'http://host.docker.internal:11434' : 'http://localhost:11434');
return { type: 'ollama', config: { baseURL } };
}
if (p === 'openai' || process.env.OPENAI_API_KEY) {
return {
type: 'openai',
config: {
apiKey: process.env.OPENAI_API_KEY ?? '',
baseURL: process.env.OPENAI_BASE_URL ?? 'https://api.openai.com/v1',
},
};
}
return null;
}
export async function loadChatModel(providerId: string, modelKey: string): Promise<BaseLLM> {
let provider = getProviderById(providerId);
if (!provider) {
const env = getEnvProvider();
if (env) {
provider = {
id: 'env',
name: env.type,
type: env.type,
chatModels: [{ key: modelKey, name: modelKey }],
config: env.config,
};
}
}
if (!provider) throw new Error(`Provider not found: ${providerId}`);
const hasModel = provider.chatModels?.some((m) => m.key === modelKey);
if (!hasModel && provider.id !== 'env') {
throw new Error(`Model ${modelKey} not found in provider ${providerId}`);
}
const cfg = provider.config as Record<string, unknown>;
if (provider.type === 'openai') {
const apiKey = (cfg.apiKey as string) || process.env.OPENAI_API_KEY;
const baseURL = (cfg.baseURL as string) || 'https://api.openai.com/v1';
if (!apiKey) throw new Error('OpenAI API key not configured');
return new OpenAILLM({ apiKey, model: modelKey, baseURL });
}
if (provider.type === 'ollama') {
const baseURL =
(cfg.baseURL as string) ||
process.env.OLLAMA_BASE_URL ||
'http://localhost:11434';
return new OllamaLLM({ baseURL, model: modelKey });
}
throw new Error(`Unsupported provider type: ${provider.type}`);
}

View File

@@ -0,0 +1,21 @@
const LOCALE_TO_LANGUAGE: Record<string, string> = {
ru: 'Russian',
en: 'English',
de: 'German',
fr: 'French',
es: 'Spanish',
it: 'Italian',
pt: 'Portuguese',
uk: 'Ukrainian',
pl: 'Polish',
zh: 'Chinese',
ja: 'Japanese',
ko: 'Korean',
};
export function getLocaleInstruction(locale?: string): string {
if (!locale) return '';
const lang = locale.split('-')[0];
const name = LOCALE_TO_LANGUAGE[lang] ?? lang;
return `\n<response_language>\nUser's locale is ${locale}. Always format your response in ${name}.\n</response_language>`;
}

View File

@@ -0,0 +1,17 @@
import { getLocaleInstruction } from './locale.js';
const base = `
You are an AI suggestion generator for an AI powered search engine. You will be given a conversation below. Generate 4-5 DIFFERENT follow-up questions the user could ask to learn more.
Rules:
- Each suggestion must be a distinct question about a different aspect or angle of the topic
- Avoid similar or repetitive phrasing across suggestions
- Focus on what was actually discussed — suggest natural next steps: details, comparisons, implications, counterarguments, recent developments
- Keep suggestions medium length, concrete and actionable
Today's date is ${new Date().toISOString()}
`;
export function getSuggestionGeneratorPrompt(locale?: string): string {
return base + getLocaleInstruction(locale);
}

View File

@@ -0,0 +1,6 @@
export type UserMessage = { role: 'user'; content: string };
export type AssistantMessage = { role: 'assistant'; content: string };
export type SystemMessage = { role: 'system'; content: string };
export type ChatTurnMessage = UserMessage | AssistantMessage;
export type Message = UserMessage | AssistantMessage | SystemMessage;

View File

@@ -0,0 +1,7 @@
import type { ChatTurnMessage } from '../types.js';
export default function formatChatHistoryAsString(history: ChatTurnMessage[]): string {
return history
.map((m) => `${m.role === 'assistant' ? 'AI' : 'User'}: ${m.content}`)
.join('\n');
}

View File

@@ -0,0 +1,13 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"outDir": "dist",
"rootDir": "src",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true
},
"include": ["src/**/*"]
}