feat: default locale Russian, geo determines language for other countries

- localization-svc: defaultLocale ru, resolveLocale only by geo
- web-svc: DEFAULT_LOCALE ru, layout lang=ru, embeddedTranslations fallback ru
- countryToLocale: default ru when no country or unknown country

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
home
2026-02-23 15:10:38 +03:00
parent 8fc82a3b90
commit cd6b7857ba
606 changed files with 26148 additions and 14297 deletions

View File

@@ -0,0 +1,17 @@
FROM node:20-alpine AS builder
WORKDIR /app
COPY package*.json ./
RUN npm install
COPY tsconfig.json ./
COPY src ./src
RUN npm run build
FROM node:20-alpine
WORKDIR /app
COPY package*.json ./
RUN npm install --omit=dev
COPY --from=builder /app/dist ./dist
EXPOSE 3020
ENV DATA_DIR=/app/data
VOLUME /app/data
CMD ["node", "dist/index.js"]

View File

@@ -0,0 +1,25 @@
{
"modelProviders": [
{
"id": "env-ollama",
"name": "Ollama",
"type": "ollama",
"chatModels": [
{
"key": "llama3.2",
"name": "llama3.2"
}
],
"embeddingModels": [
{
"key": "nomic-embed-text",
"name": "nomic-embed-text"
}
],
"config": {
"baseURL": "http://localhost:11434"
},
"hash": "d0cbe7f5ae6a9270120e31e589de2fd33fda94d1a337c249aa34531c403fa7ff"
}
]
}

View File

@@ -0,0 +1,28 @@
{
"name": "llm-svc",
"version": "1.0.0",
"private": true,
"scripts": {
"dev": "tsx watch src/index.ts",
"build": "tsc",
"start": "node dist/index.js",
"lint": "eslint src --ext .ts"
},
"dependencies": {
"@fastify/cors": "^9.0.1",
"@google/genai": "^1.34.0",
"@toolsycc/json-repair": "^0.1.22",
"axios": "^1.8.3",
"fastify": "^4.28.1",
"ollama": "^0.6.3",
"openai": "^6.9.0",
"partial-json": "^0.1.7",
"zod": "^4.1.12"
},
"devDependencies": {
"@types/node": "^24.8.1",
"tsx": "^4.19.2",
"typescript": "^5.9.3"
},
"type": "module"
}

View File

@@ -0,0 +1,300 @@
/**
* llm-svc — LLM provider management microservice
* API: GET/POST/PATCH/DELETE /api/v1/providers, GET/POST /api/v1/providers/:id/models
* Config: data/llm-providers.json, envOnlyMode via LLM_PROVIDER=ollama|timeweb
*/
import Fastify from 'fastify';
import cors from '@fastify/cors';
import { z } from 'zod';
import ModelRegistry from './lib/models/registry.js';
import { isEnvOnlyMode } from './lib/config/serverRegistry.js';
import path from 'node:path';
import fs from 'node:fs';
const DATA_DIR = process.env.DATA_DIR
? path.resolve(process.cwd(), process.env.DATA_DIR)
: process.cwd();
const dataDir = path.join(DATA_DIR, 'data');
if (!fs.existsSync(dataDir)) {
fs.mkdirSync(dataDir, { recursive: true });
}
const PORT = parseInt(process.env.PORT ?? '3020', 10);
const providerPostSchema = z.object({
type: z.string().min(1),
name: z.string().min(1),
config: z.record(z.string(), z.unknown()),
});
const providerPatchSchema = z.object({
name: z.string().min(1),
config: z.record(z.string(), z.unknown()),
});
const modelPostSchema = z.object({
type: z.enum(['chat', 'embedding']),
key: z.string().min(1),
name: z.string().min(1),
});
const app = Fastify({ logger: true });
const corsOrigin = process.env.ALLOWED_ORIGINS
? process.env.ALLOWED_ORIGINS.split(',')
.map((s) => s.trim())
.filter(Boolean)
: true;
await app.register(cors, { origin: corsOrigin });
app.get('/health', async () => ({ status: 'ok' }));
app.get('/ready', async () => ({ status: 'ready' }));
app.get('/metrics', async (_req, reply) => {
reply.header('Content-Type', 'text/plain; charset=utf-8');
return reply.send(
'# HELP gooseek_up Service is up (1) or down (0)\n' +
'# TYPE gooseek_up gauge\n' +
'gooseek_up 1\n',
);
});
/* --- Providers --- */
app.get<{ Querystring: { internal?: string } }>('/api/v1/providers', async (req, reply) => {
try {
const registry = new ModelRegistry();
const providers = await registry.getActiveProviders();
const internal = req.query.internal === '1';
if (internal) {
const { getConfiguredModelProviders } = await import('./lib/config/serverRegistry.js');
const fullProviders = getConfiguredModelProviders();
const merged = providers.map((p) => {
const full = fullProviders.find((f) => f.id === p.id);
return full
? { ...full, chatModels: p.chatModels, embeddingModels: p.embeddingModels }
: p;
});
return reply.send({ providers: merged, envOnlyMode: isEnvOnlyMode() });
}
return reply.send({
providers,
envOnlyMode: isEnvOnlyMode(),
});
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: 'An error has occurred.' });
}
});
app.post<{ Body: unknown }>('/api/v1/providers', async (req, reply) => {
if (isEnvOnlyMode()) {
return reply.status(405).send({ message: 'Not available in env-only mode.' });
}
const parsed = providerPostSchema.safeParse(req.body);
if (!parsed.success) {
return reply.status(400).send({
message: 'Missing required fields (type, name, config).',
error: parsed.error.issues,
});
}
try {
const registry = new ModelRegistry();
const provider = await registry.addProvider(
parsed.data.type,
parsed.data.name,
parsed.data.config,
);
return reply.send({ provider });
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: 'An error has occurred.' });
}
});
app.patch<{ Params: { id: string }; Body: unknown }>(
'/api/v1/providers/:id',
async (req, reply) => {
if (isEnvOnlyMode()) {
return reply
.status(405)
.send({ message: 'Not available in env-only mode.' });
}
const { id } = req.params;
const parsed = providerPatchSchema.safeParse(req.body);
if (!parsed.success) {
return reply.status(400).send({
message: 'Missing required fields (name, config).',
error: parsed.error.issues,
});
}
if (!id) {
return reply.status(400).send({ message: 'Provider ID is required.' });
}
try {
const registry = new ModelRegistry();
const provider = await registry.updateProvider(
id,
parsed.data.name,
parsed.data.config,
);
return reply.send({ provider });
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: 'An error has occurred.' });
}
},
);
app.delete<{ Params: { id: string } }>(
'/api/v1/providers/:id',
async (req, reply) => {
if (isEnvOnlyMode()) {
return reply
.status(405)
.send({ message: 'Not available in env-only mode.' });
}
const { id } = req.params;
if (!id) {
return reply.status(400).send({ message: 'Provider ID is required.' });
}
try {
const registry = new ModelRegistry();
await registry.removeProvider(id);
return reply.send({ message: 'Provider deleted successfully.' });
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: 'An error has occurred.' });
}
},
);
app.get<{ Params: { id: string } }>(
'/api/v1/providers/:id',
async (req, reply) => {
const { id } = req.params;
if (!id) {
return reply.status(400).send({ message: 'Provider ID is required.' });
}
try {
const registry = new ModelRegistry();
const providers = await registry.getActiveProviders();
const provider = providers.find((p) => p.id === id);
if (!provider) {
return reply.status(404).send({ message: 'Provider not found.' });
}
const { getConfiguredModelProviderById } = await import('./lib/config/serverRegistry.js');
const fullProvider = getConfiguredModelProviderById(id);
if (!fullProvider) {
return reply.send(provider);
}
return reply.send({
...fullProvider,
chatModels: provider.chatModels,
embeddingModels: provider.embeddingModels,
});
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: 'An error has occurred.' });
}
},
);
app.get<{ Params: { id: string } }>(
'/api/v1/providers/:id/models',
async (req, reply) => {
const { id } = req.params;
if (!id) {
return reply.status(400).send({ message: 'Provider ID is required.' });
}
try {
const registry = new ModelRegistry();
const providers = await registry.getActiveProviders();
const provider = providers.find((p) => p.id === id);
if (!provider) {
return reply.status(404).send({ message: 'Provider not found.' });
}
return reply.send({
chatModels: provider.chatModels,
embeddingModels: provider.embeddingModels,
});
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: 'An error has occurred.' });
}
},
);
app.post<{ Params: { id: string }; Body: unknown }>(
'/api/v1/providers/:id/models',
async (req, reply) => {
if (isEnvOnlyMode()) {
return reply
.status(405)
.send({ message: 'Not available in env-only mode.' });
}
const { id } = req.params;
const parsed = modelPostSchema.safeParse(req.body);
if (!parsed.success) {
return reply.status(400).send({
message: 'key, name and type (chat|embedding) must be provided',
error: parsed.error.issues,
});
}
if (!id) {
return reply.status(400).send({ message: 'Provider ID is required.' });
}
try {
const registry = new ModelRegistry();
await registry.addProviderModel(id, parsed.data.type, {
key: parsed.data.key,
name: parsed.data.name,
});
return reply.send({ message: 'Model added successfully.' });
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: 'An error has occurred.' });
}
},
);
const modelDeleteSchema = z.object({
type: z.enum(['chat', 'embedding']),
key: z.string().min(1),
});
app.delete<{ Params: { id: string }; Body: unknown }>(
'/api/v1/providers/:id/models',
async (req, reply) => {
if (isEnvOnlyMode()) {
return reply
.status(405)
.send({ message: 'Not available in env-only mode.' });
}
const { id } = req.params;
const parsed = modelDeleteSchema.safeParse(req.body ?? {});
if (!parsed.success) {
return reply.status(400).send({
message: 'type and key must be provided',
error: parsed.error.issues,
});
}
if (!id) {
return reply.status(400).send({ message: 'Provider ID is required.' });
}
try {
const registry = new ModelRegistry();
await registry.removeProviderModel(id, parsed.data.type, parsed.data.key);
return reply.send({ message: 'Model deleted successfully.' });
} catch (err) {
app.log.error(err);
return reply.status(500).send({ message: 'An error has occurred.' });
}
},
);
try {
await app.listen({ port: PORT, host: '0.0.0.0' });
console.log('llm-svc listening on :' + PORT);
} catch (err) {
console.error(err);
process.exit(1);
}

View File

@@ -0,0 +1,227 @@
import path from 'node:path';
import fs from 'node:fs';
import crypto from 'node:crypto';
import type { ConfigModelProvider } from './types.js';
import { hashObj } from '../serverUtils.js';
const defaultProvidersConfig = { modelProviders: [] };
class ProvidersConfig {
private configPath: string;
private currentConfig: { modelProviders: ConfigModelProvider[] } =
defaultProvidersConfig;
constructor() {
const dataDir = process.env.DATA_DIR
? path.resolve(process.cwd(), process.env.DATA_DIR)
: process.cwd();
this.configPath = path.join(dataDir, 'data', 'llm-providers.json');
this.initialize();
}
private ensureDir(): void {
const dir = path.dirname(this.configPath);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
}
private load(): void {
this.ensureDir();
if (fs.existsSync(this.configPath)) {
try {
const raw = fs.readFileSync(this.configPath, 'utf-8');
this.currentConfig = JSON.parse(raw) as {
modelProviders: ConfigModelProvider[];
};
if (!Array.isArray(this.currentConfig.modelProviders)) {
this.currentConfig.modelProviders = [];
}
} catch (err) {
if (err instanceof SyntaxError) {
console.error(
`Error parsing llm-providers.json at ${this.configPath}:`,
err,
);
this.currentConfig = { ...defaultProvidersConfig };
this.save();
} else {
throw err;
}
}
} else {
this.currentConfig = { ...defaultProvidersConfig };
this.save();
}
}
private initialize(): void {
this.load();
const envProvider = this.buildEnvProvider();
if (envProvider) {
this.currentConfig.modelProviders = [envProvider];
this.save();
}
}
private save(): void {
this.ensureDir();
fs.writeFileSync(
this.configPath,
JSON.stringify(this.currentConfig, null, 2),
);
}
/** Env-only mode: LLM from LLM_PROVIDER=ollama|timeweb, no CRUD for providers */
public isEnvOnlyMode(): boolean {
return ['ollama', 'timeweb'].includes(
(process.env.LLM_PROVIDER ?? '').toLowerCase(),
);
}
private buildEnvProvider(): ConfigModelProvider | null {
const providerType = (process.env.LLM_PROVIDER ?? '').toLowerCase();
if (providerType === 'ollama') {
const defaultUrl =
process.env.DOCKER
? 'http://host.docker.internal:11434'
: 'http://localhost:11434';
const baseURL = process.env.OLLAMA_BASE_URL ?? defaultUrl;
const embeddingBaseURL =
process.env.OLLAMA_EMBEDDING_BASE_URL ?? baseURL;
const chatModel = process.env.LLM_CHAT_MODEL ?? 'llama3.2';
const embeddingModel =
process.env.LLM_EMBEDDING_MODEL ?? 'nomic-embed-text';
const config: Record<string, string> = {
baseURL,
...(embeddingBaseURL !== baseURL && { embeddingBaseURL }),
};
return {
id: 'env-ollama',
name: 'Ollama',
type: 'ollama',
chatModels: [{ key: chatModel, name: chatModel }],
embeddingModels: [{ key: embeddingModel, name: embeddingModel }],
config,
hash: hashObj(config),
};
}
if (providerType === 'timeweb') {
const baseURL =
process.env.TIMEWEB_API_BASE_URL ?? 'https://api.timeweb.cloud';
const agentAccessId = process.env.TIMEWEB_AGENT_ACCESS_ID ?? '';
const apiKey = process.env.TIMEWEB_API_KEY ?? '';
const model = process.env.LLM_CHAT_MODEL ?? 'gpt-4';
const xProxySource = process.env.TIMEWEB_X_PROXY_SOURCE;
if (!agentAccessId || !apiKey) return null;
const config: Record<string, string> = {
baseURL,
agentAccessId,
apiKey,
model,
...(xProxySource && { xProxySource }),
};
return {
id: 'env-timeweb',
name: 'Timeweb Cloud AI',
type: 'timeweb',
chatModels: [{ key: model, name: model }],
embeddingModels: [],
config,
hash: hashObj({ baseURL, agentAccessId }),
};
}
return null;
}
public getModelProviders(): ConfigModelProvider[] {
return JSON.parse(JSON.stringify(this.currentConfig.modelProviders));
}
public getModelProviderById(id: string): ConfigModelProvider | undefined {
return this.currentConfig.modelProviders.find((p) => p.id === id);
}
public addModelProvider(
type: string,
name: string,
config: Record<string, unknown>,
): ConfigModelProvider {
const newProvider: ConfigModelProvider = {
id: crypto.randomUUID(),
name,
type,
config,
chatModels: [],
embeddingModels: [],
hash: hashObj(config),
};
this.currentConfig.modelProviders.push(newProvider);
this.save();
return newProvider;
}
public removeModelProvider(id: string): void {
this.currentConfig.modelProviders =
this.currentConfig.modelProviders.filter((p) => p.id !== id);
this.save();
}
public updateModelProvider(
id: string,
name: string,
config: Record<string, unknown>,
): ConfigModelProvider {
const provider = this.currentConfig.modelProviders.find((p) => p.id === id);
if (!provider) throw new Error('Provider not found');
provider.name = name;
provider.config = config;
provider.hash = hashObj(config);
this.save();
return provider;
}
public addProviderModel(
providerId: string,
type: 'embedding' | 'chat',
model: { key: string; name: string },
): void {
const provider = this.currentConfig.modelProviders.find(
(p) => p.id === providerId,
);
if (!provider) throw new Error('Invalid provider id');
if (type === 'chat') {
provider.chatModels.push(model);
} else {
provider.embeddingModels.push(model);
}
this.save();
}
public removeProviderModel(
providerId: string,
type: 'embedding' | 'chat',
modelKey: string,
): void {
const provider = this.currentConfig.modelProviders.find(
(p) => p.id === providerId,
);
if (!provider) throw new Error('Invalid provider id');
if (type === 'chat') {
provider.chatModels = provider.chatModels.filter((m) => m.key !== modelKey);
} else {
provider.embeddingModels = provider.embeddingModels.filter(
(m) => m.key !== modelKey,
);
}
this.save();
}
}
export const providersConfig = new ProvidersConfig();

View File

@@ -0,0 +1,2 @@
export { providersConfig } from './ProvidersConfig.js';
export type { ConfigModelProvider, LlmProvidersConfig, UIConfigField } from './types.js';

View File

@@ -0,0 +1,15 @@
import { providersConfig } from './ProvidersConfig.js';
import type { ConfigModelProvider } from './types.js';
export const getConfiguredModelProviders = (): ConfigModelProvider[] => {
return providersConfig.getModelProviders();
};
export const getConfiguredModelProviderById = (
id: string,
): ConfigModelProvider | undefined => {
return providersConfig.getModelProviderById(id);
};
export const isEnvOnlyMode = (): boolean =>
providersConfig.isEnvOnlyMode();

View File

@@ -0,0 +1,28 @@
import type { Model } from '../models/types.js';
export type UIConfigField = {
name: string;
key: string;
type: 'string' | 'password' | 'select' | 'textarea' | 'switch';
required: boolean;
description: string;
scope: 'client' | 'server';
env?: string;
placeholder?: string;
default?: string | boolean;
options?: { name: string; value: string }[];
};
export type ConfigModelProvider = {
id: string;
name: string;
type: string;
chatModels: Model[];
embeddingModels: Model[];
config: Record<string, unknown>;
hash: string;
};
export type LlmProvidersConfig = {
modelProviders: ConfigModelProvider[];
};

View File

@@ -0,0 +1,8 @@
import type { Chunk } from '../../types.js';
abstract class BaseEmbedding<CONFIG> {
constructor(protected config: CONFIG) {}
abstract embedText(texts: string[]): Promise<number[][]>;
abstract embedChunks(chunks: Chunk[]): Promise<number[][]>;
}
export default BaseEmbedding;

View File

@@ -0,0 +1,19 @@
import type {
GenerateObjectInput,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
} from '../types.js';
abstract class BaseLLM<CONFIG> {
constructor(protected config: CONFIG) {}
abstract generateText(input: GenerateTextInput): Promise<GenerateTextOutput>;
abstract streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput>;
abstract generateObject<T>(input: GenerateObjectInput): Promise<T>;
abstract streamObject<T>(
input: GenerateObjectInput,
): AsyncGenerator<Partial<T>>;
}
export default BaseLLM;

View File

@@ -0,0 +1,44 @@
import type { ModelList, ProviderMetadata } from '../types.js';
import type { UIConfigField } from '../../config/types.js';
import type BaseLLM from './llm.js';
import type BaseEmbedding from './embedding.js';
abstract class BaseModelProvider<CONFIG> {
constructor(
protected id: string,
protected name: string,
protected config: CONFIG,
) {}
abstract getDefaultModels(): Promise<ModelList>;
abstract getModelList(): Promise<ModelList>;
abstract loadChatModel(modelName: string): Promise<BaseLLM<unknown>>;
abstract loadEmbeddingModel(modelName: string): Promise<BaseEmbedding<unknown>>;
static getProviderConfigFields(): UIConfigField[] {
throw new Error('Method not implemented.');
}
static getProviderMetadata(): ProviderMetadata {
throw new Error('Method not Implemented.');
}
static parseAndValidate(raw: unknown): unknown {
throw new Error('Method not Implemented.');
}
}
export type ProviderConstructor<CONFIG> = {
new (id: string, name: string, config: CONFIG): BaseModelProvider<CONFIG>;
parseAndValidate(raw: unknown): CONFIG;
getProviderConfigFields: () => UIConfigField[];
getProviderMetadata: () => ProviderMetadata;
};
export const createProviderInstance = <P extends ProviderConstructor<unknown>>(
Provider: P,
id: string,
name: string,
rawConfig: unknown,
): InstanceType<P> => {
const cfg = Provider.parseAndValidate(rawConfig);
return new Provider(id, name, cfg) as InstanceType<P>;
};
export default BaseModelProvider;

View File

@@ -0,0 +1,5 @@
import OpenAILLM from '../openai/openaiLLM.js';
class AnthropicLLM extends OpenAILLM {}
export default AnthropicLLM;

View File

@@ -0,0 +1,115 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import BaseEmbedding from '../../base/embedding.js';
import BaseModelProvider from '../../base/provider.js';
import BaseLLM from '../../base/llm.js';
import AnthropicLLM from './anthropicLLM.js';
interface AnthropicConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Anthropic API key',
required: true,
placeholder: 'Anthropic API Key',
env: 'ANTHROPIC_API_KEY',
scope: 'server',
},
];
class AnthropicProvider extends BaseModelProvider<AnthropicConfig> {
constructor(id: string, name: string, config: AnthropicConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
const res = await fetch('https://api.anthropic.com/v1/models?limit=999', {
method: 'GET',
headers: {
'x-api-key': this.config.apiKey,
'anthropic-version': '2023-06-01',
'Content-type': 'application/json',
},
});
if (!res.ok) {
throw new Error(`Failed to fetch Anthropic models: ${res.statusText}`);
}
const data = (await res.json()).data;
const models: Model[] = data.map((m: any) => {
return {
key: m.id,
name: m.display_name,
};
});
return {
embedding: [],
chat: models,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Anthropic Chat Model. Invalid Model Selected',
);
}
return new AnthropicLLM({
apiKey: this.config.apiKey,
model: key,
baseURL: 'https://api.anthropic.com/v1',
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
throw new Error('Anthropic provider does not support embedding models.');
}
static parseAndValidate(raw: any): AnthropicConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'anthropic',
name: 'Anthropic',
};
}
}
export default AnthropicProvider;

View File

@@ -0,0 +1,5 @@
import OpenAIEmbedding from '../openai/openaiEmbedding.js';
class GeminiEmbedding extends OpenAIEmbedding {}
export default GeminiEmbedding;

View File

@@ -0,0 +1,5 @@
import OpenAILLM from '../openai/openaiLLM.js';
class GeminiLLM extends OpenAILLM {}
export default GeminiLLM;

View File

@@ -0,0 +1,144 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import GeminiEmbedding from './geminiEmbedding.js';
import BaseEmbedding from '../../base/embedding.js';
import BaseModelProvider from '../../base/provider.js';
import BaseLLM from '../../base/llm.js';
import GeminiLLM from './geminiLLM.js';
interface GeminiConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Gemini API key',
required: true,
placeholder: 'Gemini API Key',
env: 'GEMINI_API_KEY',
scope: 'server',
},
];
class GeminiProvider extends BaseModelProvider<GeminiConfig> {
constructor(id: string, name: string, config: GeminiConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
const res = await fetch(
`https://generativelanguage.googleapis.com/v1beta/models?key=${this.config.apiKey}`,
{
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
},
);
const data = await res.json();
let defaultEmbeddingModels: Model[] = [];
let defaultChatModels: Model[] = [];
data.models.forEach((m: any) => {
if (
m.supportedGenerationMethods.some(
(genMethod: string) =>
genMethod === 'embedText' || genMethod === 'embedContent',
)
) {
defaultEmbeddingModels.push({
key: m.name,
name: m.displayName,
});
} else if (m.supportedGenerationMethods.includes('generateContent')) {
defaultChatModels.push({
key: m.name,
name: m.displayName,
});
}
});
return {
embedding: defaultEmbeddingModels,
chat: defaultChatModels,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Gemini Chat Model. Invalid Model Selected',
);
}
return new GeminiLLM({
apiKey: this.config.apiKey,
model: key,
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai',
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Gemini Embedding Model. Invalid Model Selected.',
);
}
return new GeminiEmbedding({
apiKey: this.config.apiKey,
model: key,
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai',
});
}
static parseAndValidate(raw: any): GeminiConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'gemini',
name: 'Gemini',
};
}
}
export default GeminiProvider;

View File

@@ -0,0 +1,5 @@
import OpenAILLM from '../openai/openaiLLM.js';
class GroqLLM extends OpenAILLM {}
export default GroqLLM;

View File

@@ -0,0 +1,113 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import BaseEmbedding from '../../base/embedding.js';
import BaseModelProvider from '../../base/provider.js';
import BaseLLM from '../../base/llm.js';
import GroqLLM from './groqLLM.js';
interface GroqConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Groq API key',
required: true,
placeholder: 'Groq API Key',
env: 'GROQ_API_KEY',
scope: 'server',
},
];
class GroqProvider extends BaseModelProvider<GroqConfig> {
constructor(id: string, name: string, config: GroqConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
const res = await fetch(`https://api.groq.com/openai/v1/models`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.config.apiKey}`,
},
});
const data = await res.json();
const defaultChatModels: Model[] = [];
data.data.forEach((m: any) => {
defaultChatModels.push({
key: m.id,
name: m.id,
});
});
return {
embedding: [],
chat: defaultChatModels,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error('Error Loading Groq Chat Model. Invalid Model Selected');
}
return new GroqLLM({
apiKey: this.config.apiKey,
model: key,
baseURL: 'https://api.groq.com/openai/v1',
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
throw new Error('Groq Provider does not support embedding models.');
}
static parseAndValidate(raw: any): GroqConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'groq',
name: 'Groq',
};
}
}
export default GroqProvider;

View File

@@ -0,0 +1,20 @@
import type { ProviderConstructor } from '../base/provider.js';
import OpenAIProvider from './openai/index.js';
import OllamaProvider from './ollama/index.js';
import TimewebProvider from './timeweb/index.js';
import GeminiProvider from './gemini/index.js';
import GroqProvider from './groq/index.js';
import LemonadeProvider from './lemonade/index.js';
import AnthropicProvider from './anthropic/index.js';
import LMStudioProvider from './lmstudio/index.js';
export const providers: Record<string, ProviderConstructor<unknown>> = {
openai: OpenAIProvider as unknown as ProviderConstructor<unknown>,
ollama: OllamaProvider as unknown as ProviderConstructor<unknown>,
timeweb: TimewebProvider as unknown as ProviderConstructor<unknown>,
gemini: GeminiProvider as unknown as ProviderConstructor<unknown>,
groq: GroqProvider as unknown as ProviderConstructor<unknown>,
lemonade: LemonadeProvider as unknown as ProviderConstructor<unknown>,
anthropic: AnthropicProvider as unknown as ProviderConstructor<unknown>,
lmstudio: LMStudioProvider as unknown as ProviderConstructor<unknown>,
};

View File

@@ -0,0 +1,153 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import BaseModelProvider from '../../base/provider.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import BaseLLM from '../../base/llm.js';
import LemonadeLLM from './lemonadeLLM.js';
import BaseEmbedding from '../../base/embedding.js';
import LemonadeEmbedding from './lemonadeEmbedding.js';
interface LemonadeConfig {
baseURL: string;
apiKey?: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for Lemonade API',
required: true,
placeholder: 'https://api.lemonade.ai/v1',
env: 'LEMONADE_BASE_URL',
scope: 'server',
},
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Lemonade API key (optional)',
required: false,
placeholder: 'Lemonade API Key',
env: 'LEMONADE_API_KEY',
scope: 'server',
},
];
class LemonadeProvider extends BaseModelProvider<LemonadeConfig> {
constructor(id: string, name: string, config: LemonadeConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
try {
const res = await fetch(`${this.config.baseURL}/models`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
...(this.config.apiKey
? { Authorization: `Bearer ${this.config.apiKey}` }
: {}),
},
});
const data = await res.json();
const models: Model[] = data.data
.filter((m: any) => m.recipe === 'llamacpp')
.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: models,
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to Lemonade API. Please ensure the base URL is correct and the service is available.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Lemonade Chat Model. Invalid Model Selected',
);
}
return new LemonadeLLM({
apiKey: this.config.apiKey || 'not-needed',
model: key,
baseURL: this.config.baseURL,
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Lemonade Embedding Model. Invalid Model Selected.',
);
}
return new LemonadeEmbedding({
apiKey: this.config.apiKey || 'not-needed',
model: key,
baseURL: this.config.baseURL,
});
}
static parseAndValidate(raw: any): LemonadeConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.baseURL)
throw new Error('Invalid config provided. Base URL must be provided');
return {
baseURL: String(raw.baseURL),
apiKey: raw.apiKey ? String(raw.apiKey) : undefined,
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'lemonade',
name: 'Lemonade',
};
}
}
export default LemonadeProvider;

View File

@@ -0,0 +1,5 @@
import OpenAIEmbedding from '../openai/openaiEmbedding.js';
class LemonadeEmbedding extends OpenAIEmbedding {}
export default LemonadeEmbedding;

View File

@@ -0,0 +1,5 @@
import OpenAILLM from '../openai/openaiLLM.js';
class LemonadeLLM extends OpenAILLM {}
export default LemonadeLLM;

View File

@@ -0,0 +1,143 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import BaseModelProvider from '../../base/provider.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import LMStudioLLM from './lmstudioLLM.js';
import BaseLLM from '../../base/llm.js';
import BaseEmbedding from '../../base/embedding.js';
import LMStudioEmbedding from './lmstudioEmbedding.js';
interface LMStudioConfig {
baseURL: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for LM Studio server',
required: true,
placeholder: 'http://localhost:1234',
env: 'LM_STUDIO_BASE_URL',
scope: 'server',
},
];
class LMStudioProvider extends BaseModelProvider<LMStudioConfig> {
constructor(id: string, name: string, config: LMStudioConfig) {
super(id, name, config);
}
private normalizeBaseURL(url: string): string {
const trimmed = url.trim().replace(/\/+$/, '');
return trimmed.endsWith('/v1') ? trimmed : `${trimmed}/v1`;
}
async getDefaultModels(): Promise<ModelList> {
try {
const baseURL = this.normalizeBaseURL(this.config.baseURL);
const res = await fetch(`${baseURL}/models`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
const data = await res.json();
const models: Model[] = data.data.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: models,
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to LM Studio. Please ensure the base URL is correct and the LM Studio server is running.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading LM Studio Chat Model. Invalid Model Selected',
);
}
return new LMStudioLLM({
apiKey: 'lm-studio',
model: key,
baseURL: this.normalizeBaseURL(this.config.baseURL),
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading LM Studio Embedding Model. Invalid Model Selected.',
);
}
return new LMStudioEmbedding({
apiKey: 'lm-studio',
model: key,
baseURL: this.normalizeBaseURL(this.config.baseURL),
});
}
static parseAndValidate(raw: any): LMStudioConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.baseURL)
throw new Error('Invalid config provided. Base URL must be provided');
return {
baseURL: String(raw.baseURL),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'lmstudio',
name: 'LM Studio',
};
}
}
export default LMStudioProvider;

View File

@@ -0,0 +1,5 @@
import OpenAIEmbedding from '../openai/openaiEmbedding.js';
class LMStudioEmbedding extends OpenAIEmbedding {}
export default LMStudioEmbedding;

View File

@@ -0,0 +1,5 @@
import OpenAILLM from '../openai/openaiLLM.js';
class LMStudioLLM extends OpenAILLM {}
export default LMStudioLLM;

View File

@@ -0,0 +1,133 @@
import type { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import BaseModelProvider from '../../base/provider.js';
import type { Model, ModelList, ProviderMetadata } from '../../types.js';
import type BaseLLM from '../../base/llm.js';
import type BaseEmbedding from '../../base/embedding.js';
import OllamaLLM from './ollamaLLM.js';
import OllamaEmbedding from './ollamaEmbedding.js';
interface OllamaConfig {
baseURL: string;
embeddingBaseURL?: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for the Ollama',
required: true,
placeholder:
process.env.DOCKER === '1'
? 'http://host.docker.internal:11434'
: 'http://localhost:11434',
env: 'OLLAMA_BASE_URL',
scope: 'server',
},
];
class OllamaProvider extends BaseModelProvider<OllamaConfig> {
constructor(id: string, name: string, config: OllamaConfig) {
super(id, name, config);
}
private async fetchModels(baseURL: string): Promise<Model[]> {
const res = await fetch(`${baseURL}/api/tags`, {
method: 'GET',
headers: { 'Content-type': 'application/json' },
});
const data = (await res.json()) as { models?: { name?: string; model?: string }[] };
return (data.models ?? []).map((m) => ({
name: m.model ?? m.name ?? '',
key: m.model ?? m.name ?? '',
}));
}
async getDefaultModels(): Promise<ModelList> {
try {
const [chatModels, embeddingModels] = await Promise.all([
this.fetchModels(this.config.baseURL),
this.fetchModels(
this.config.embeddingBaseURL ?? this.config.baseURL,
),
]);
return { chat: chatModels, embedding: embeddingModels };
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to Ollama API. Please ensure the base URL is correct and the Ollama server is running.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id);
if (!configProvider) {
return defaultModels;
}
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<unknown>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Ollama Chat Model. Invalid Model Selected',
);
}
return new OllamaLLM({
baseURL: this.config.baseURL,
model: key,
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<unknown>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Ollama Embedding Model. Invalid Model Selected.',
);
}
return new OllamaEmbedding({
model: key,
baseURL: this.config.embeddingBaseURL ?? this.config.baseURL,
});
}
static parseAndValidate(raw: unknown): OllamaConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
const obj = raw as Record<string, unknown>;
if (!obj.baseURL)
throw new Error('Invalid config provided. Base URL must be provided');
return {
baseURL: String(obj.baseURL),
embeddingBaseURL: obj.embeddingBaseURL
? String(obj.embeddingBaseURL)
: undefined,
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return { key: 'ollama', name: 'Ollama' };
}
}
export default OllamaProvider;

View File

@@ -0,0 +1,37 @@
import { Ollama } from 'ollama';
import BaseEmbedding from '../../base/embedding.js';
import type { Chunk } from '../../../types.js';
type OllamaConfig = {
model: string;
baseURL?: string;
};
class OllamaEmbedding extends BaseEmbedding<OllamaConfig> {
ollamaClient: Ollama;
constructor(protected config: OllamaConfig) {
super(config);
this.ollamaClient = new Ollama({
host: this.config.baseURL ?? 'http://localhost:11434',
});
}
async embedText(texts: string[]): Promise<number[][]> {
const response = await this.ollamaClient.embed({
input: texts,
model: this.config.model,
});
return response.embeddings;
}
async embedChunks(chunks: Chunk[]): Promise<number[][]> {
const response = await this.ollamaClient.embed({
input: chunks.map((c) => c.content),
model: this.config.model,
});
return response.embeddings;
}
}
export default OllamaEmbedding;

View File

@@ -0,0 +1,251 @@
import z from 'zod';
import BaseLLM from '../../base/llm.js';
import type {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
} from '../../types.js';
import { Ollama, type Tool as OllamaTool, type Message as OllamaMessage } from 'ollama';
import { parse } from 'partial-json';
import crypto from 'node:crypto';
import type { Message } from '../../../types.js';
import { repairJson } from '@toolsycc/json-repair';
type OllamaConfig = {
baseURL: string;
model: string;
options?: GenerateOptions;
};
const reasoningModels = [
'gpt-oss',
'deepseek-r1',
'qwen3',
'deepseek-v3.1',
'magistral',
'nemotron-3-nano',
];
class OllamaLLM extends BaseLLM<OllamaConfig> {
ollamaClient: Ollama;
constructor(protected config: OllamaConfig) {
super(config);
this.ollamaClient = new Ollama({
host: this.config.baseURL || 'http://localhost:11434',
});
}
convertToOllamaMessages(messages: Message[]): OllamaMessage[] {
return messages.map((msg) => {
if (msg.role === 'tool') {
return {
role: 'tool',
tool_name: msg.name,
content: msg.content,
} as OllamaMessage;
}
if (msg.role === 'assistant') {
return {
role: 'assistant',
content: msg.content,
tool_calls:
msg.tool_calls?.map((tc, i) => ({
function: {
index: i,
name: tc.name,
arguments: tc.arguments,
},
})) ?? [],
};
}
return msg as OllamaMessage;
});
}
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
const ollamaTools: OllamaTool[] = [];
input.tools?.forEach((tool) => {
ollamaTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema).properties ?? {},
},
});
});
const res = await this.ollamaClient.chat({
model: this.config.model,
messages: this.convertToOllamaMessages(input.messages),
tools: ollamaTools.length > 0 ? ollamaTools : undefined,
...(reasoningModels.some((m) => this.config.model.includes(m))
? { think: false }
: {}),
options: {
top_p: input.options?.topP ?? this.config.options?.topP,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 0.7,
num_predict: input.options?.maxTokens ?? this.config.options?.maxTokens,
num_ctx: 32000,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ??
this.config.options?.presencePenalty,
stop:
input.options?.stopSequences ?? this.config.options?.stopSequences,
},
});
return {
content: res.message.content ?? '',
toolCalls:
res.message.tool_calls?.map((tc) => ({
id: crypto.randomUUID(),
name: tc.function.name,
arguments: (tc.function.arguments as Record<string, unknown>) ?? {},
})) ?? [],
additionalInfo: { reasoning: res.message.thinking },
};
}
async *streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput> {
const ollamaTools: OllamaTool[] = [];
input.tools?.forEach((tool) => {
ollamaTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema) as Record<string, unknown>,
},
});
});
const stream = await this.ollamaClient.chat({
model: this.config.model,
messages: this.convertToOllamaMessages(input.messages),
stream: true,
...(reasoningModels.some((m) => this.config.model.includes(m))
? { think: false }
: {}),
tools: ollamaTools.length > 0 ? ollamaTools : undefined,
options: {
top_p: input.options?.topP ?? this.config.options?.topP,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 0.7,
num_ctx: 32000,
num_predict: input.options?.maxTokens ?? this.config.options?.maxTokens,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ??
this.config.options?.presencePenalty,
stop:
input.options?.stopSequences ?? this.config.options?.stopSequences,
},
});
for await (const chunk of stream) {
yield {
contentChunk: chunk.message.content ?? '',
toolCallChunk:
chunk.message.tool_calls?.map((tc, i) => ({
id: crypto
.createHash('sha256')
.update(`${i}-${tc.function.name}`)
.digest('hex'),
name: tc.function.name,
arguments: (tc.function.arguments as Record<string, unknown>) ?? {},
})) ?? [],
done: chunk.done,
additionalInfo: { reasoning: chunk.message.thinking },
};
}
}
async generateObject<T>(input: GenerateObjectInput): Promise<T> {
const response = await this.ollamaClient.chat({
model: this.config.model,
messages: this.convertToOllamaMessages(input.messages),
format: z.toJSONSchema(input.schema),
...(reasoningModels.some((m) => this.config.model.includes(m))
? { think: false }
: {}),
options: {
top_p: input.options?.topP ?? this.config.options?.topP,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 0.7,
num_predict: input.options?.maxTokens ?? this.config.options?.maxTokens,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ??
this.config.options?.presencePenalty,
stop:
input.options?.stopSequences ?? this.config.options?.stopSequences,
},
});
try {
return input.schema.parse(
JSON.parse(
repairJson(response.message.content ?? '{}', {
extractJson: true,
}) as string,
),
) as T;
} catch (err) {
throw new Error(`Error parsing response from Ollama: ${err}`);
}
}
async *streamObject<T>(
input: GenerateObjectInput,
): AsyncGenerator<Partial<T>> {
let receivedObj = '';
const stream = await this.ollamaClient.chat({
model: this.config.model,
messages: this.convertToOllamaMessages(input.messages),
format: z.toJSONSchema(input.schema),
stream: true,
...(reasoningModels.some((m) => this.config.model.includes(m))
? { think: false }
: {}),
options: {
top_p: input.options?.topP ?? this.config.options?.topP,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 0.7,
num_predict: input.options?.maxTokens ?? this.config.options?.maxTokens,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ??
this.config.options?.presencePenalty,
stop:
input.options?.stopSequences ?? this.config.options?.stopSequences,
},
});
for await (const chunk of stream) {
receivedObj += chunk.message.content ?? '';
try {
yield parse(receivedObj) as Partial<T>;
} catch {
yield {} as Partial<T>;
}
}
}
}
export default OllamaLLM;

View File

@@ -0,0 +1,226 @@
import { UIConfigField } from '../../../config/types.js';
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
import { Model, ModelList, ProviderMetadata } from '../../types.js';
import OpenAIEmbedding from './openaiEmbedding.js';
import BaseEmbedding from '../../base/embedding.js';
import BaseModelProvider from '../../base/provider.js';
import BaseLLM from '../../base/llm.js';
import OpenAILLM from './openaiLLM.js';
interface OpenAIConfig {
apiKey: string;
baseURL: string;
}
const defaultChatModels: Model[] = [
{
name: 'GPT-3.5 Turbo',
key: 'gpt-3.5-turbo',
},
{
name: 'GPT-4',
key: 'gpt-4',
},
{
name: 'GPT-4 turbo',
key: 'gpt-4-turbo',
},
{
name: 'GPT-4 omni',
key: 'gpt-4o',
},
{
name: 'GPT-4o (2024-05-13)',
key: 'gpt-4o-2024-05-13',
},
{
name: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
},
{
name: 'GPT 4.1 nano',
key: 'gpt-4.1-nano',
},
{
name: 'GPT 4.1 mini',
key: 'gpt-4.1-mini',
},
{
name: 'GPT 4.1',
key: 'gpt-4.1',
},
{
name: 'GPT 5 nano',
key: 'gpt-5-nano',
},
{
name: 'GPT 5',
key: 'gpt-5',
},
{
name: 'GPT 5 Mini',
key: 'gpt-5-mini',
},
{
name: 'GPT 5 Pro',
key: 'gpt-5-pro',
},
{
name: 'GPT 5.1',
key: 'gpt-5.1',
},
{
name: 'GPT 5.2',
key: 'gpt-5.2',
},
{
name: 'GPT 5.2 Pro',
key: 'gpt-5.2-pro',
},
{
name: 'o1',
key: 'o1',
},
{
name: 'o3',
key: 'o3',
},
{
name: 'o3 Mini',
key: 'o3-mini',
},
{
name: 'o4 Mini',
key: 'o4-mini',
},
];
const defaultEmbeddingModels: Model[] = [
{
name: 'Text Embedding 3 Small',
key: 'text-embedding-3-small',
},
{
name: 'Text Embedding 3 Large',
key: 'text-embedding-3-large',
},
];
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your OpenAI API key',
required: true,
placeholder: 'OpenAI API Key',
env: 'OPENAI_API_KEY',
scope: 'server',
},
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for the OpenAI API',
required: true,
placeholder: 'OpenAI Base URL',
default: 'https://api.openai.com/v1',
env: 'OPENAI_BASE_URL',
scope: 'server',
},
];
class OpenAIProvider extends BaseModelProvider<OpenAIConfig> {
constructor(id: string, name: string, config: OpenAIConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
if (this.config.baseURL === 'https://api.openai.com/v1') {
return {
embedding: defaultEmbeddingModels,
chat: defaultChatModels,
};
}
return {
embedding: [],
chat: [],
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading OpenAI Chat Model. Invalid Model Selected',
);
}
return new OpenAILLM({
apiKey: this.config.apiKey,
model: key,
baseURL: this.config.baseURL,
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading OpenAI Embedding Model. Invalid Model Selected.',
);
}
return new OpenAIEmbedding({
apiKey: this.config.apiKey,
model: key,
baseURL: this.config.baseURL,
});
}
static parseAndValidate(raw: any): OpenAIConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey || !raw.baseURL)
throw new Error(
'Invalid config provided. API key and base URL must be provided',
);
return {
apiKey: String(raw.apiKey),
baseURL: String(raw.baseURL),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'openai',
name: 'OpenAI',
};
}
}
export default OpenAIProvider;

View File

@@ -0,0 +1,42 @@
import OpenAI from 'openai';
import BaseEmbedding from '../../base/embedding.js';
import { Chunk } from '../../../types.js';
type OpenAIConfig = {
apiKey: string;
model: string;
baseURL?: string;
};
class OpenAIEmbedding extends BaseEmbedding<OpenAIConfig> {
openAIClient: OpenAI;
constructor(protected config: OpenAIConfig) {
super(config);
this.openAIClient = new OpenAI({
apiKey: config.apiKey,
baseURL: config.baseURL,
});
}
async embedText(texts: string[]): Promise<number[][]> {
const response = await this.openAIClient.embeddings.create({
model: this.config.model,
input: texts,
});
return response.data.map((embedding) => embedding.embedding);
}
async embedChunks(chunks: Chunk[]): Promise<number[][]> {
const response = await this.openAIClient.embeddings.create({
model: this.config.model,
input: chunks.map((c) => c.content),
});
return response.data.map((embedding) => embedding.embedding);
}
}
export default OpenAIEmbedding;

View File

@@ -0,0 +1,275 @@
import OpenAI from 'openai';
import BaseLLM from '../../base/llm.js';
import { zodTextFormat, zodResponseFormat } from 'openai/helpers/zod';
import {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
ToolCall,
} from '../../types.js';
import { parse } from 'partial-json';
import z from 'zod';
import {
ChatCompletionAssistantMessageParam,
ChatCompletionMessageParam,
ChatCompletionTool,
ChatCompletionToolMessageParam,
} from 'openai/resources/index.mjs';
import { Message } from '../../../types.js';
import { repairJson } from '@toolsycc/json-repair';
type OpenAIConfig = {
apiKey: string;
model: string;
baseURL?: string;
options?: GenerateOptions;
};
class OpenAILLM extends BaseLLM<OpenAIConfig> {
openAIClient: OpenAI;
constructor(protected config: OpenAIConfig) {
super(config);
this.openAIClient = new OpenAI({
apiKey: this.config.apiKey,
baseURL: this.config.baseURL || 'https://api.openai.com/v1',
});
}
convertToOpenAIMessages(messages: Message[]): ChatCompletionMessageParam[] {
return messages.map((msg) => {
if (msg.role === 'tool') {
return {
role: 'tool',
tool_call_id: msg.id,
content: msg.content,
} as ChatCompletionToolMessageParam;
} else if (msg.role === 'assistant') {
return {
role: 'assistant',
content: msg.content,
...(msg.tool_calls &&
msg.tool_calls.length > 0 && {
tool_calls: msg.tool_calls?.map((tc) => ({
id: tc.id,
type: 'function',
function: {
name: tc.name,
arguments: JSON.stringify(tc.arguments),
},
})),
}),
} as ChatCompletionAssistantMessageParam;
}
return msg;
});
}
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
const openaiTools: ChatCompletionTool[] = [];
input.tools?.forEach((tool) => {
openaiTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema),
},
});
});
const response = await this.openAIClient.chat.completions.create({
model: this.config.model,
tools: openaiTools.length > 0 ? openaiTools : undefined,
messages: this.convertToOpenAIMessages(input.messages),
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
});
if (response.choices && response.choices.length > 0) {
return {
content: response.choices[0].message.content!,
toolCalls:
response.choices[0].message.tool_calls
?.map((tc) => {
if (tc.type === 'function') {
return {
name: tc.function.name,
id: tc.id,
arguments: JSON.parse(tc.function.arguments),
};
}
})
.filter((tc) => tc !== undefined) || [],
additionalInfo: {
finishReason: response.choices[0].finish_reason,
},
};
}
throw new Error('No response from OpenAI');
}
async *streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput> {
const openaiTools: ChatCompletionTool[] = [];
input.tools?.forEach((tool) => {
openaiTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema),
},
});
});
const stream = await this.openAIClient.chat.completions.create({
model: this.config.model,
messages: this.convertToOpenAIMessages(input.messages),
tools: openaiTools.length > 0 ? openaiTools : undefined,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
stream: true,
});
let recievedToolCalls: { name: string; id: string; arguments: string }[] =
[];
for await (const chunk of stream) {
if (chunk.choices && chunk.choices.length > 0) {
const toolCalls = chunk.choices[0].delta.tool_calls;
yield {
contentChunk: chunk.choices[0].delta.content || '',
toolCallChunk:
toolCalls?.map((tc) => {
if (!recievedToolCalls[tc.index]) {
const call = {
name: tc.function?.name!,
id: tc.id!,
arguments: tc.function?.arguments || '',
};
recievedToolCalls.push(call);
return { ...call, arguments: parse(call.arguments || '{}') };
} else {
const existingCall = recievedToolCalls[tc.index];
existingCall.arguments += tc.function?.arguments || '';
return {
...existingCall,
arguments: parse(existingCall.arguments),
};
}
}) || [],
done: chunk.choices[0].finish_reason !== null,
additionalInfo: {
finishReason: chunk.choices[0].finish_reason,
},
};
}
}
}
async generateObject<T>(input: GenerateObjectInput): Promise<T> {
const response = await this.openAIClient.chat.completions.parse({
messages: this.convertToOpenAIMessages(input.messages),
model: this.config.model,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
response_format: zodResponseFormat(input.schema, 'object'),
});
if (response.choices && response.choices.length > 0) {
try {
return input.schema.parse(
JSON.parse(
repairJson(response.choices[0].message.content!, {
extractJson: true,
}) as string,
),
) as T;
} catch (err) {
throw new Error(`Error parsing response from OpenAI: ${err}`);
}
}
throw new Error('No response from OpenAI');
}
async *streamObject<T>(input: GenerateObjectInput): AsyncGenerator<T> {
let recievedObj: string = '';
const stream = this.openAIClient.responses.stream({
model: this.config.model,
input: input.messages,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
text: {
format: zodTextFormat(input.schema, 'object'),
},
});
for await (const chunk of stream) {
if (chunk.type === 'response.output_text.delta' && chunk.delta) {
recievedObj += chunk.delta;
try {
yield parse(recievedObj) as T;
} catch (err) {
console.log('Error parsing partial object from OpenAI:', err);
yield {} as T;
}
} else if (chunk.type === 'response.output_text.done' && chunk.text) {
try {
yield parse(chunk.text) as T;
} catch (err) {
throw new Error(`Error parsing response from OpenAI: ${err}`);
}
}
}
}
}
export default OpenAILLM;

View File

@@ -0,0 +1,165 @@
import type { UIConfigField } from '../../../config/types.js';
import BaseModelProvider from '../../base/provider.js';
import type { Model, ModelList, ProviderMetadata } from '../../types.js';
import type BaseLLM from '../../base/llm.js';
import type BaseEmbedding from '../../base/embedding.js';
import TimewebLLM from './timewebLLM.js';
interface TimewebConfig {
baseURL: string;
agentAccessId: string;
apiKey: string;
model: string;
xProxySource?: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'API Base URL',
key: 'baseURL',
description: 'Timeweb Cloud AI API base URL',
required: true,
placeholder: 'https://api.timeweb.cloud',
env: 'TIMEWEB_API_BASE_URL',
scope: 'server',
},
{
type: 'string',
name: 'Agent Access ID',
key: 'agentAccessId',
description: 'Agent access ID from Timeweb Cloud AI',
required: true,
placeholder: '',
env: 'TIMEWEB_AGENT_ACCESS_ID',
scope: 'server',
},
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Bearer token for Timeweb Cloud AI',
required: true,
placeholder: '',
env: 'TIMEWEB_API_KEY',
scope: 'server',
},
{
type: 'string',
name: 'Model',
key: 'model',
description: 'Model key (e.g. gpt-4)',
required: true,
placeholder: 'gpt-4',
env: 'LLM_CHAT_MODEL',
scope: 'server',
},
{
type: 'string',
name: 'X-Proxy-Source',
key: 'xProxySource',
description: 'Optional header for Timeweb API',
required: false,
placeholder: '',
env: 'TIMEWEB_X_PROXY_SOURCE',
scope: 'server',
},
];
class TimewebProvider extends BaseModelProvider<TimewebConfig> {
constructor(id: string, name: string, config: TimewebConfig) {
super(id, name, config);
}
getTimewebBaseURL(): string {
const base = this.config.baseURL.replace(/\/$/, '');
return `${base}/api/v1/cloud-ai/agents/${this.config.agentAccessId}/v1`;
}
async getDefaultModels(): Promise<ModelList> {
try {
const url = `${this.getTimewebBaseURL()}/models`;
const res = await fetch(url, {
headers: {
Authorization: `Bearer ${this.config.apiKey}`,
'Content-Type': 'application/json',
...(this.config.xProxySource && {
'x-proxy-source': this.config.xProxySource,
}),
},
});
if (!res.ok) {
throw new Error(`Timeweb API error: ${res.status} ${res.statusText}`);
}
const data = (await res.json()) as { data?: { id: string }[] };
const models: Model[] = (data.data ?? []).map((m) => ({
name: m.id,
key: m.id,
}));
const chat =
models.length > 0
? models
: [{ name: this.config.model, key: this.config.model }];
return { chat, embedding: [] };
} catch {
return {
chat: [{ name: this.config.model, key: this.config.model }],
embedding: [],
};
}
}
async getModelList(): Promise<ModelList> {
return this.getDefaultModels();
}
async loadChatModel(key: string): Promise<BaseLLM<unknown>> {
return new TimewebLLM({
apiKey: this.config.apiKey,
baseURL: this.getTimewebBaseURL(),
model: key,
defaultHeaders: this.config.xProxySource
? { 'x-proxy-source': this.config.xProxySource }
: undefined,
});
}
async loadEmbeddingModel(_key: string): Promise<BaseEmbedding<unknown>> {
throw new Error(
'Timeweb Cloud AI does not provide embedding models. Use Ollama for embeddings.',
);
}
static parseAndValidate(raw: unknown): TimewebConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
const obj = raw as Record<string, unknown>;
if (!obj.baseURL || !obj.agentAccessId || !obj.apiKey)
throw new Error(
'Invalid config. baseURL, agentAccessId and apiKey are required',
);
return {
baseURL: String(obj.baseURL),
agentAccessId: String(obj.agentAccessId),
apiKey: String(obj.apiKey),
model: String(obj.model ?? 'gpt-4'),
xProxySource: obj.xProxySource ? String(obj.xProxySource) : undefined,
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return { key: 'timeweb', name: 'Timeweb Cloud AI' };
}
}
export default TimewebProvider;

View File

@@ -0,0 +1,245 @@
import OpenAI from 'openai';
import BaseLLM from '../../base/llm.js';
import { zodResponseFormat } from 'openai/helpers/zod';
import type {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
} from '../../types.js';
import { parse } from 'partial-json';
import z from 'zod';
import type {
ChatCompletionAssistantMessageParam,
ChatCompletionMessageParam,
ChatCompletionTool,
ChatCompletionToolMessageParam,
} from 'openai/resources/index.mjs';
import type { Message } from '../../../types.js';
import { repairJson } from '@toolsycc/json-repair';
type TimewebConfig = {
apiKey: string;
baseURL: string;
model: string;
options?: GenerateOptions;
defaultHeaders?: Record<string, string>;
};
class TimewebLLM extends BaseLLM<TimewebConfig> {
openAIClient: OpenAI;
constructor(protected config: TimewebConfig) {
super(config);
this.openAIClient = new OpenAI({
apiKey: this.config.apiKey,
baseURL: this.config.baseURL,
defaultHeaders: this.config.defaultHeaders,
});
}
convertToOpenAIMessages(messages: Message[]): ChatCompletionMessageParam[] {
return messages.map((msg) => {
if (msg.role === 'tool') {
return {
role: 'tool',
tool_call_id: msg.id,
content: msg.content,
} as ChatCompletionToolMessageParam;
}
if (msg.role === 'assistant') {
return {
role: 'assistant',
content: msg.content,
...(msg.tool_calls &&
msg.tool_calls.length > 0 && {
tool_calls: msg.tool_calls.map((tc) => ({
id: tc.id,
type: 'function' as const,
function: {
name: tc.name,
arguments: JSON.stringify(tc.arguments),
},
})),
}),
} as ChatCompletionAssistantMessageParam;
}
return msg;
});
}
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
const openaiTools: ChatCompletionTool[] = [];
input.tools?.forEach((tool) => {
openaiTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema),
},
});
});
const response = await this.openAIClient.chat.completions.create({
model: this.config.model,
tools: openaiTools.length > 0 ? openaiTools : undefined,
messages: this.convertToOpenAIMessages(input.messages),
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ??
this.config.options?.presencePenalty,
});
if (response.choices && response.choices.length > 0) {
return {
content: response.choices[0].message.content ?? '',
toolCalls:
response.choices[0].message.tool_calls
?.map((tc) => {
if (tc.type === 'function') {
return {
name: tc.function.name,
id: tc.id!,
arguments: JSON.parse(tc.function.arguments ?? '{}'),
};
}
return undefined;
})
.filter((tc): tc is NonNullable<typeof tc> => tc !== undefined) ??
[],
additionalInfo: {
finishReason: response.choices[0].finish_reason ?? undefined,
},
};
}
throw new Error('No response from Timeweb');
}
async *streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput> {
const openaiTools: ChatCompletionTool[] = [];
input.tools?.forEach((tool) => {
openaiTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema),
},
});
});
const stream = await this.openAIClient.chat.completions.create({
model: this.config.model,
messages: this.convertToOpenAIMessages(input.messages),
tools: openaiTools.length > 0 ? openaiTools : undefined,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ??
this.config.options?.presencePenalty,
stream: true,
});
const receivedToolCalls: {
name: string;
id: string;
arguments: string;
}[] = [];
for await (const chunk of stream) {
if (chunk.choices && chunk.choices.length > 0) {
const toolCalls = chunk.choices[0].delta.tool_calls;
yield {
contentChunk: chunk.choices[0].delta.content ?? '',
toolCallChunk:
toolCalls?.map((tc) => {
if (!receivedToolCalls[tc.index!]) {
const call = {
name: tc.function?.name ?? '',
id: tc.id ?? '',
arguments: tc.function?.arguments ?? '',
};
receivedToolCalls[tc.index!] = call;
return { ...call, arguments: parse(call.arguments || '{}') };
}
const existingCall = receivedToolCalls[tc.index!];
existingCall.arguments += tc.function?.arguments ?? '';
return {
...existingCall,
arguments: parse(existingCall.arguments),
};
}) ?? [],
done: chunk.choices[0].finish_reason !== null,
additionalInfo: {
finishReason: chunk.choices[0].finish_reason ?? undefined,
},
};
}
}
}
async generateObject<T>(input: GenerateObjectInput): Promise<T> {
const response = await this.openAIClient.chat.completions.create({
messages: this.convertToOpenAIMessages(input.messages),
model: this.config.model,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ??
this.config.options?.presencePenalty,
response_format: zodResponseFormat(input.schema, 'object'),
});
if (response.choices && response.choices.length > 0) {
try {
return input.schema.parse(
JSON.parse(
repairJson(response.choices[0].message.content ?? '{}', {
extractJson: true,
}) as string,
),
) as T;
} catch (err) {
throw new Error(`Error parsing response from Timeweb: ${err}`);
}
}
throw new Error('No response from Timeweb');
}
async *streamObject<T>(
input: GenerateObjectInput,
): AsyncGenerator<Partial<T>> {
const result = await this.generateObject<T>(input);
yield result;
}
}
export default TimewebLLM;

View File

@@ -0,0 +1,212 @@
import type { ConfigModelProvider } from '../config/types.js';
import BaseModelProvider, {
createProviderInstance,
} from './base/provider.js';
import { getConfiguredModelProviders } from '../config/serverRegistry.js';
import { providers } from './providers/index.js';
import type { MinimalProvider, ModelList } from './types.js';
import { providersConfig } from '../config/ProvidersConfig.js';
class ModelRegistry {
activeProviders: (ConfigModelProvider & {
provider: BaseModelProvider<unknown>;
})[] = [];
constructor() {
this.initializeActiveProviders();
}
private initializeActiveProviders(): void {
const configuredProviders = getConfiguredModelProviders();
configuredProviders.forEach((p) => {
try {
const providerCtor = providers[p.type];
if (!providerCtor) throw new Error(`Invalid provider type: ${p.type}`);
this.activeProviders.push({
...p,
provider: createProviderInstance(
providerCtor,
p.id,
p.name,
p.config,
),
});
} catch (err) {
console.error(
`Failed to initialize provider. Type: ${p.type}, ID: ${p.id}, Config: ${JSON.stringify(p.config)}, Error: ${err}`,
);
}
});
}
async getActiveProviders(): Promise<MinimalProvider[]> {
const result: MinimalProvider[] = [];
await Promise.all(
this.activeProviders.map(async (p) => {
let m: ModelList = { chat: [], embedding: [] };
try {
m = await p.provider.getModelList();
} catch (err: unknown) {
const msg = err instanceof Error ? err.message : String(err);
console.error(
`Failed to get model list. Type: ${p.type}, ID: ${p.id}, Error: ${msg}`,
);
m = {
chat: [{ key: 'error', name: msg }],
embedding: [],
};
}
result.push({
id: p.id,
name: p.name,
chatModels: m.chat,
embeddingModels: m.embedding,
});
}),
);
return result;
}
async loadChatModel(providerId: string, modelName: string) {
const provider = this.activeProviders.find((p) => p.id === providerId);
if (!provider) throw new Error('Invalid provider id');
return provider.provider.loadChatModel(modelName);
}
async loadEmbeddingModel(providerId: string, modelName: string) {
const provider = this.activeProviders.find((p) => p.id === providerId);
if (!provider) throw new Error('Invalid provider id');
return provider.provider.loadEmbeddingModel(modelName);
}
async addProvider(
type: string,
name: string,
config: Record<string, unknown>,
): Promise<ConfigModelProvider> {
const providerCtor = providers[type];
if (!providerCtor) throw new Error('Invalid provider type');
const newProvider = providersConfig.addModelProvider(type, name, config);
const instance = createProviderInstance(
providerCtor,
newProvider.id,
newProvider.name,
newProvider.config,
);
let m: ModelList = { chat: [], embedding: [] };
try {
m = await instance.getModelList();
} catch (err: unknown) {
const msg = err instanceof Error ? err.message : String(err);
console.error(
`Failed to get model list for newly added provider. Type: ${type}, ID: ${newProvider.id}, Error: ${msg}`,
);
m = {
chat: [{ key: 'error', name: msg }],
embedding: [],
};
}
this.activeProviders.push({
...newProvider,
provider: instance,
});
return {
...newProvider,
chatModels: m.chat,
embeddingModels: m.embedding,
};
}
async removeProvider(providerId: string): Promise<void> {
providersConfig.removeModelProvider(providerId);
this.activeProviders = this.activeProviders.filter(
(p) => p.id !== providerId,
);
}
async updateProvider(
providerId: string,
name: string,
config: Record<string, unknown>,
): Promise<ConfigModelProvider> {
const updated = providersConfig.updateModelProvider(
providerId,
name,
config,
);
const providerCtor = providers[updated.type];
if (!providerCtor) throw new Error('Invalid provider type');
const instance = createProviderInstance(
providerCtor,
providerId,
name,
config,
);
let m: ModelList = { chat: [], embedding: [] };
try {
m = await instance.getModelList();
} catch (err: unknown) {
const msg = err instanceof Error ? err.message : String(err);
console.error(
`Failed to get model list for updated provider. Type: ${updated.type}, ID: ${updated.id}, Error: ${msg}`,
);
m = {
chat: [{ key: 'error', name: msg }],
embedding: [],
};
}
const idx = this.activeProviders.findIndex((p) => p.id === providerId);
if (idx >= 0) {
this.activeProviders[idx] = {
...updated,
provider: instance,
chatModels: m.chat,
embeddingModels: m.embedding,
};
} else {
this.activeProviders.push({
...updated,
provider: instance,
chatModels: m.chat,
embeddingModels: m.embedding,
});
}
return {
...updated,
chatModels: m.chat,
embeddingModels: m.embedding,
};
}
async addProviderModel(
providerId: string,
type: 'embedding' | 'chat',
model: { key: string; name: string },
): Promise<void> {
providersConfig.addProviderModel(providerId, type, model);
}
async removeProviderModel(
providerId: string,
type: 'embedding' | 'chat',
modelKey: string,
): Promise<void> {
providersConfig.removeProviderModel(providerId, type, modelKey);
}
}
export default ModelRegistry;

View File

@@ -0,0 +1,86 @@
import type { z } from 'zod';
import type { Message } from '../types.js';
export type Model = {
name: string;
key: string;
};
export type ModelList = {
embedding: Model[];
chat: Model[];
};
export type ProviderMetadata = {
name: string;
key: string;
};
export type MinimalProvider = {
id: string;
name: string;
chatModels: Model[];
embeddingModels: Model[];
};
export type ModelWithProvider = {
key: string;
providerId: string;
};
export type GenerateOptions = {
temperature?: number;
maxTokens?: number;
topP?: number;
stopSequences?: string[];
frequencyPenalty?: number;
presencePenalty?: number;
};
export type Tool = {
name: string;
description: string;
schema: z.ZodTypeAny;
};
export type ToolCall = {
id: string;
name: string;
arguments: Record<string, unknown>;
};
export type GenerateTextInput = {
messages: Message[];
tools?: Tool[];
options?: GenerateOptions;
};
export type GenerateTextOutput = {
content: string;
toolCalls: ToolCall[];
additionalInfo?: Record<string, unknown>;
};
export type StreamTextOutput = {
contentChunk: string;
toolCallChunk: ToolCall[];
additionalInfo?: Record<string, unknown>;
done?: boolean;
};
export type GenerateObjectInput = {
schema: z.ZodTypeAny;
messages: Message[];
options?: GenerateOptions;
};
export type GenerateObjectOutput<T> = {
object: T;
additionalInfo?: Record<string, unknown>;
};
export type StreamObjectOutput<T> = {
objectChunk: Partial<T>;
additionalInfo?: Record<string, unknown>;
done?: boolean;
};

View File

@@ -0,0 +1,6 @@
import crypto from 'node:crypto';
export const hashObj = (obj: Record<string, unknown>): string => {
const json = JSON.stringify(obj, Object.keys(obj).sort());
return crypto.createHash('sha256').update(json).digest('hex');
};

View File

@@ -0,0 +1,39 @@
export type ToolCall = {
id: string;
name: string;
arguments: Record<string, unknown>;
};
export type SystemMessage = {
role: 'system';
content: string;
};
export type AssistantMessage = {
role: 'assistant';
content: string;
tool_calls?: ToolCall[];
};
export type UserMessage = {
role: 'user';
content: string;
};
export type ToolMessage = {
role: 'tool';
id: string;
name: string;
content: string;
};
export type Message =
| UserMessage
| AssistantMessage
| SystemMessage
| ToolMessage;
export type Chunk = {
content: string;
metadata: Record<string, unknown>;
};

View File

@@ -0,0 +1,15 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"outDir": "dist",
"rootDir": "src",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"resolveJsonModule": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}