feat: default locale Russian, geo determines language for other countries
- localization-svc: defaultLocale ru, resolveLocale only by geo - web-svc: DEFAULT_LOCALE ru, layout lang=ru, embeddedTranslations fallback ru - countryToLocale: default ru when no country or unknown country Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
@@ -0,0 +1,5 @@
|
||||
import OpenAILLM from '../openai/openaiLLM.js';
|
||||
|
||||
class AnthropicLLM extends OpenAILLM {}
|
||||
|
||||
export default AnthropicLLM;
|
||||
115
services/llm-svc/src/lib/models/providers/anthropic/index.ts
Normal file
115
services/llm-svc/src/lib/models/providers/anthropic/index.ts
Normal file
@@ -0,0 +1,115 @@
|
||||
import { UIConfigField } from '../../../config/types.js';
|
||||
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
|
||||
import { Model, ModelList, ProviderMetadata } from '../../types.js';
|
||||
import BaseEmbedding from '../../base/embedding.js';
|
||||
import BaseModelProvider from '../../base/provider.js';
|
||||
import BaseLLM from '../../base/llm.js';
|
||||
import AnthropicLLM from './anthropicLLM.js';
|
||||
|
||||
interface AnthropicConfig {
|
||||
apiKey: string;
|
||||
}
|
||||
|
||||
const providerConfigFields: UIConfigField[] = [
|
||||
{
|
||||
type: 'password',
|
||||
name: 'API Key',
|
||||
key: 'apiKey',
|
||||
description: 'Your Anthropic API key',
|
||||
required: true,
|
||||
placeholder: 'Anthropic API Key',
|
||||
env: 'ANTHROPIC_API_KEY',
|
||||
scope: 'server',
|
||||
},
|
||||
];
|
||||
|
||||
class AnthropicProvider extends BaseModelProvider<AnthropicConfig> {
|
||||
constructor(id: string, name: string, config: AnthropicConfig) {
|
||||
super(id, name, config);
|
||||
}
|
||||
|
||||
async getDefaultModels(): Promise<ModelList> {
|
||||
const res = await fetch('https://api.anthropic.com/v1/models?limit=999', {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'x-api-key': this.config.apiKey,
|
||||
'anthropic-version': '2023-06-01',
|
||||
'Content-type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
if (!res.ok) {
|
||||
throw new Error(`Failed to fetch Anthropic models: ${res.statusText}`);
|
||||
}
|
||||
|
||||
const data = (await res.json()).data;
|
||||
|
||||
const models: Model[] = data.map((m: any) => {
|
||||
return {
|
||||
key: m.id,
|
||||
name: m.display_name,
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
embedding: [],
|
||||
chat: models,
|
||||
};
|
||||
}
|
||||
|
||||
async getModelList(): Promise<ModelList> {
|
||||
const defaultModels = await this.getDefaultModels();
|
||||
const configProvider = getConfiguredModelProviderById(this.id)!;
|
||||
|
||||
return {
|
||||
embedding: [],
|
||||
chat: [...defaultModels.chat, ...configProvider.chatModels],
|
||||
};
|
||||
}
|
||||
|
||||
async loadChatModel(key: string): Promise<BaseLLM<any>> {
|
||||
const modelList = await this.getModelList();
|
||||
|
||||
const exists = modelList.chat.find((m) => m.key === key);
|
||||
|
||||
if (!exists) {
|
||||
throw new Error(
|
||||
'Error Loading Anthropic Chat Model. Invalid Model Selected',
|
||||
);
|
||||
}
|
||||
|
||||
return new AnthropicLLM({
|
||||
apiKey: this.config.apiKey,
|
||||
model: key,
|
||||
baseURL: 'https://api.anthropic.com/v1',
|
||||
});
|
||||
}
|
||||
|
||||
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
|
||||
throw new Error('Anthropic provider does not support embedding models.');
|
||||
}
|
||||
|
||||
static parseAndValidate(raw: any): AnthropicConfig {
|
||||
if (!raw || typeof raw !== 'object')
|
||||
throw new Error('Invalid config provided. Expected object');
|
||||
if (!raw.apiKey)
|
||||
throw new Error('Invalid config provided. API key must be provided');
|
||||
|
||||
return {
|
||||
apiKey: String(raw.apiKey),
|
||||
};
|
||||
}
|
||||
|
||||
static getProviderConfigFields(): UIConfigField[] {
|
||||
return providerConfigFields;
|
||||
}
|
||||
|
||||
static getProviderMetadata(): ProviderMetadata {
|
||||
return {
|
||||
key: 'anthropic',
|
||||
name: 'Anthropic',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export default AnthropicProvider;
|
||||
@@ -0,0 +1,5 @@
|
||||
import OpenAIEmbedding from '../openai/openaiEmbedding.js';
|
||||
|
||||
class GeminiEmbedding extends OpenAIEmbedding {}
|
||||
|
||||
export default GeminiEmbedding;
|
||||
@@ -0,0 +1,5 @@
|
||||
import OpenAILLM from '../openai/openaiLLM.js';
|
||||
|
||||
class GeminiLLM extends OpenAILLM {}
|
||||
|
||||
export default GeminiLLM;
|
||||
144
services/llm-svc/src/lib/models/providers/gemini/index.ts
Normal file
144
services/llm-svc/src/lib/models/providers/gemini/index.ts
Normal file
@@ -0,0 +1,144 @@
|
||||
import { UIConfigField } from '../../../config/types.js';
|
||||
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
|
||||
import { Model, ModelList, ProviderMetadata } from '../../types.js';
|
||||
import GeminiEmbedding from './geminiEmbedding.js';
|
||||
import BaseEmbedding from '../../base/embedding.js';
|
||||
import BaseModelProvider from '../../base/provider.js';
|
||||
import BaseLLM from '../../base/llm.js';
|
||||
import GeminiLLM from './geminiLLM.js';
|
||||
|
||||
interface GeminiConfig {
|
||||
apiKey: string;
|
||||
}
|
||||
|
||||
const providerConfigFields: UIConfigField[] = [
|
||||
{
|
||||
type: 'password',
|
||||
name: 'API Key',
|
||||
key: 'apiKey',
|
||||
description: 'Your Gemini API key',
|
||||
required: true,
|
||||
placeholder: 'Gemini API Key',
|
||||
env: 'GEMINI_API_KEY',
|
||||
scope: 'server',
|
||||
},
|
||||
];
|
||||
|
||||
class GeminiProvider extends BaseModelProvider<GeminiConfig> {
|
||||
constructor(id: string, name: string, config: GeminiConfig) {
|
||||
super(id, name, config);
|
||||
}
|
||||
|
||||
async getDefaultModels(): Promise<ModelList> {
|
||||
const res = await fetch(
|
||||
`https://generativelanguage.googleapis.com/v1beta/models?key=${this.config.apiKey}`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
const data = await res.json();
|
||||
|
||||
let defaultEmbeddingModels: Model[] = [];
|
||||
let defaultChatModels: Model[] = [];
|
||||
|
||||
data.models.forEach((m: any) => {
|
||||
if (
|
||||
m.supportedGenerationMethods.some(
|
||||
(genMethod: string) =>
|
||||
genMethod === 'embedText' || genMethod === 'embedContent',
|
||||
)
|
||||
) {
|
||||
defaultEmbeddingModels.push({
|
||||
key: m.name,
|
||||
name: m.displayName,
|
||||
});
|
||||
} else if (m.supportedGenerationMethods.includes('generateContent')) {
|
||||
defaultChatModels.push({
|
||||
key: m.name,
|
||||
name: m.displayName,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
embedding: defaultEmbeddingModels,
|
||||
chat: defaultChatModels,
|
||||
};
|
||||
}
|
||||
|
||||
async getModelList(): Promise<ModelList> {
|
||||
const defaultModels = await this.getDefaultModels();
|
||||
const configProvider = getConfiguredModelProviderById(this.id)!;
|
||||
|
||||
return {
|
||||
embedding: [
|
||||
...defaultModels.embedding,
|
||||
...configProvider.embeddingModels,
|
||||
],
|
||||
chat: [...defaultModels.chat, ...configProvider.chatModels],
|
||||
};
|
||||
}
|
||||
|
||||
async loadChatModel(key: string): Promise<BaseLLM<any>> {
|
||||
const modelList = await this.getModelList();
|
||||
|
||||
const exists = modelList.chat.find((m) => m.key === key);
|
||||
|
||||
if (!exists) {
|
||||
throw new Error(
|
||||
'Error Loading Gemini Chat Model. Invalid Model Selected',
|
||||
);
|
||||
}
|
||||
|
||||
return new GeminiLLM({
|
||||
apiKey: this.config.apiKey,
|
||||
model: key,
|
||||
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai',
|
||||
});
|
||||
}
|
||||
|
||||
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
|
||||
const modelList = await this.getModelList();
|
||||
const exists = modelList.embedding.find((m) => m.key === key);
|
||||
|
||||
if (!exists) {
|
||||
throw new Error(
|
||||
'Error Loading Gemini Embedding Model. Invalid Model Selected.',
|
||||
);
|
||||
}
|
||||
|
||||
return new GeminiEmbedding({
|
||||
apiKey: this.config.apiKey,
|
||||
model: key,
|
||||
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai',
|
||||
});
|
||||
}
|
||||
|
||||
static parseAndValidate(raw: any): GeminiConfig {
|
||||
if (!raw || typeof raw !== 'object')
|
||||
throw new Error('Invalid config provided. Expected object');
|
||||
if (!raw.apiKey)
|
||||
throw new Error('Invalid config provided. API key must be provided');
|
||||
|
||||
return {
|
||||
apiKey: String(raw.apiKey),
|
||||
};
|
||||
}
|
||||
|
||||
static getProviderConfigFields(): UIConfigField[] {
|
||||
return providerConfigFields;
|
||||
}
|
||||
|
||||
static getProviderMetadata(): ProviderMetadata {
|
||||
return {
|
||||
key: 'gemini',
|
||||
name: 'Gemini',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export default GeminiProvider;
|
||||
@@ -0,0 +1,5 @@
|
||||
import OpenAILLM from '../openai/openaiLLM.js';
|
||||
|
||||
class GroqLLM extends OpenAILLM {}
|
||||
|
||||
export default GroqLLM;
|
||||
113
services/llm-svc/src/lib/models/providers/groq/index.ts
Normal file
113
services/llm-svc/src/lib/models/providers/groq/index.ts
Normal file
@@ -0,0 +1,113 @@
|
||||
import { UIConfigField } from '../../../config/types.js';
|
||||
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
|
||||
import { Model, ModelList, ProviderMetadata } from '../../types.js';
|
||||
import BaseEmbedding from '../../base/embedding.js';
|
||||
import BaseModelProvider from '../../base/provider.js';
|
||||
import BaseLLM from '../../base/llm.js';
|
||||
import GroqLLM from './groqLLM.js';
|
||||
|
||||
interface GroqConfig {
|
||||
apiKey: string;
|
||||
}
|
||||
|
||||
const providerConfigFields: UIConfigField[] = [
|
||||
{
|
||||
type: 'password',
|
||||
name: 'API Key',
|
||||
key: 'apiKey',
|
||||
description: 'Your Groq API key',
|
||||
required: true,
|
||||
placeholder: 'Groq API Key',
|
||||
env: 'GROQ_API_KEY',
|
||||
scope: 'server',
|
||||
},
|
||||
];
|
||||
|
||||
class GroqProvider extends BaseModelProvider<GroqConfig> {
|
||||
constructor(id: string, name: string, config: GroqConfig) {
|
||||
super(id, name, config);
|
||||
}
|
||||
|
||||
async getDefaultModels(): Promise<ModelList> {
|
||||
const res = await fetch(`https://api.groq.com/openai/v1/models`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${this.config.apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const data = await res.json();
|
||||
|
||||
const defaultChatModels: Model[] = [];
|
||||
|
||||
data.data.forEach((m: any) => {
|
||||
defaultChatModels.push({
|
||||
key: m.id,
|
||||
name: m.id,
|
||||
});
|
||||
});
|
||||
|
||||
return {
|
||||
embedding: [],
|
||||
chat: defaultChatModels,
|
||||
};
|
||||
}
|
||||
|
||||
async getModelList(): Promise<ModelList> {
|
||||
const defaultModels = await this.getDefaultModels();
|
||||
const configProvider = getConfiguredModelProviderById(this.id)!;
|
||||
|
||||
return {
|
||||
embedding: [
|
||||
...defaultModels.embedding,
|
||||
...configProvider.embeddingModels,
|
||||
],
|
||||
chat: [...defaultModels.chat, ...configProvider.chatModels],
|
||||
};
|
||||
}
|
||||
|
||||
async loadChatModel(key: string): Promise<BaseLLM<any>> {
|
||||
const modelList = await this.getModelList();
|
||||
|
||||
const exists = modelList.chat.find((m) => m.key === key);
|
||||
|
||||
if (!exists) {
|
||||
throw new Error('Error Loading Groq Chat Model. Invalid Model Selected');
|
||||
}
|
||||
|
||||
return new GroqLLM({
|
||||
apiKey: this.config.apiKey,
|
||||
model: key,
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
});
|
||||
}
|
||||
|
||||
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
|
||||
throw new Error('Groq Provider does not support embedding models.');
|
||||
}
|
||||
|
||||
static parseAndValidate(raw: any): GroqConfig {
|
||||
if (!raw || typeof raw !== 'object')
|
||||
throw new Error('Invalid config provided. Expected object');
|
||||
if (!raw.apiKey)
|
||||
throw new Error('Invalid config provided. API key must be provided');
|
||||
|
||||
return {
|
||||
apiKey: String(raw.apiKey),
|
||||
};
|
||||
}
|
||||
|
||||
static getProviderConfigFields(): UIConfigField[] {
|
||||
return providerConfigFields;
|
||||
}
|
||||
|
||||
static getProviderMetadata(): ProviderMetadata {
|
||||
return {
|
||||
key: 'groq',
|
||||
name: 'Groq',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export default GroqProvider;
|
||||
20
services/llm-svc/src/lib/models/providers/index.ts
Normal file
20
services/llm-svc/src/lib/models/providers/index.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
import type { ProviderConstructor } from '../base/provider.js';
|
||||
import OpenAIProvider from './openai/index.js';
|
||||
import OllamaProvider from './ollama/index.js';
|
||||
import TimewebProvider from './timeweb/index.js';
|
||||
import GeminiProvider from './gemini/index.js';
|
||||
import GroqProvider from './groq/index.js';
|
||||
import LemonadeProvider from './lemonade/index.js';
|
||||
import AnthropicProvider from './anthropic/index.js';
|
||||
import LMStudioProvider from './lmstudio/index.js';
|
||||
|
||||
export const providers: Record<string, ProviderConstructor<unknown>> = {
|
||||
openai: OpenAIProvider as unknown as ProviderConstructor<unknown>,
|
||||
ollama: OllamaProvider as unknown as ProviderConstructor<unknown>,
|
||||
timeweb: TimewebProvider as unknown as ProviderConstructor<unknown>,
|
||||
gemini: GeminiProvider as unknown as ProviderConstructor<unknown>,
|
||||
groq: GroqProvider as unknown as ProviderConstructor<unknown>,
|
||||
lemonade: LemonadeProvider as unknown as ProviderConstructor<unknown>,
|
||||
anthropic: AnthropicProvider as unknown as ProviderConstructor<unknown>,
|
||||
lmstudio: LMStudioProvider as unknown as ProviderConstructor<unknown>,
|
||||
};
|
||||
153
services/llm-svc/src/lib/models/providers/lemonade/index.ts
Normal file
153
services/llm-svc/src/lib/models/providers/lemonade/index.ts
Normal file
@@ -0,0 +1,153 @@
|
||||
import { UIConfigField } from '../../../config/types.js';
|
||||
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
|
||||
import BaseModelProvider from '../../base/provider.js';
|
||||
import { Model, ModelList, ProviderMetadata } from '../../types.js';
|
||||
import BaseLLM from '../../base/llm.js';
|
||||
import LemonadeLLM from './lemonadeLLM.js';
|
||||
import BaseEmbedding from '../../base/embedding.js';
|
||||
import LemonadeEmbedding from './lemonadeEmbedding.js';
|
||||
|
||||
interface LemonadeConfig {
|
||||
baseURL: string;
|
||||
apiKey?: string;
|
||||
}
|
||||
|
||||
const providerConfigFields: UIConfigField[] = [
|
||||
{
|
||||
type: 'string',
|
||||
name: 'Base URL',
|
||||
key: 'baseURL',
|
||||
description: 'The base URL for Lemonade API',
|
||||
required: true,
|
||||
placeholder: 'https://api.lemonade.ai/v1',
|
||||
env: 'LEMONADE_BASE_URL',
|
||||
scope: 'server',
|
||||
},
|
||||
{
|
||||
type: 'password',
|
||||
name: 'API Key',
|
||||
key: 'apiKey',
|
||||
description: 'Your Lemonade API key (optional)',
|
||||
required: false,
|
||||
placeholder: 'Lemonade API Key',
|
||||
env: 'LEMONADE_API_KEY',
|
||||
scope: 'server',
|
||||
},
|
||||
];
|
||||
|
||||
class LemonadeProvider extends BaseModelProvider<LemonadeConfig> {
|
||||
constructor(id: string, name: string, config: LemonadeConfig) {
|
||||
super(id, name, config);
|
||||
}
|
||||
|
||||
async getDefaultModels(): Promise<ModelList> {
|
||||
try {
|
||||
const res = await fetch(`${this.config.baseURL}/models`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(this.config.apiKey
|
||||
? { Authorization: `Bearer ${this.config.apiKey}` }
|
||||
: {}),
|
||||
},
|
||||
});
|
||||
|
||||
const data = await res.json();
|
||||
|
||||
const models: Model[] = data.data
|
||||
.filter((m: any) => m.recipe === 'llamacpp')
|
||||
.map((m: any) => {
|
||||
return {
|
||||
name: m.id,
|
||||
key: m.id,
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
embedding: models,
|
||||
chat: models,
|
||||
};
|
||||
} catch (err) {
|
||||
if (err instanceof TypeError) {
|
||||
throw new Error(
|
||||
'Error connecting to Lemonade API. Please ensure the base URL is correct and the service is available.',
|
||||
);
|
||||
}
|
||||
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async getModelList(): Promise<ModelList> {
|
||||
const defaultModels = await this.getDefaultModels();
|
||||
const configProvider = getConfiguredModelProviderById(this.id)!;
|
||||
|
||||
return {
|
||||
embedding: [
|
||||
...defaultModels.embedding,
|
||||
...configProvider.embeddingModels,
|
||||
],
|
||||
chat: [...defaultModels.chat, ...configProvider.chatModels],
|
||||
};
|
||||
}
|
||||
|
||||
async loadChatModel(key: string): Promise<BaseLLM<any>> {
|
||||
const modelList = await this.getModelList();
|
||||
|
||||
const exists = modelList.chat.find((m) => m.key === key);
|
||||
|
||||
if (!exists) {
|
||||
throw new Error(
|
||||
'Error Loading Lemonade Chat Model. Invalid Model Selected',
|
||||
);
|
||||
}
|
||||
|
||||
return new LemonadeLLM({
|
||||
apiKey: this.config.apiKey || 'not-needed',
|
||||
model: key,
|
||||
baseURL: this.config.baseURL,
|
||||
});
|
||||
}
|
||||
|
||||
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
|
||||
const modelList = await this.getModelList();
|
||||
const exists = modelList.embedding.find((m) => m.key === key);
|
||||
|
||||
if (!exists) {
|
||||
throw new Error(
|
||||
'Error Loading Lemonade Embedding Model. Invalid Model Selected.',
|
||||
);
|
||||
}
|
||||
|
||||
return new LemonadeEmbedding({
|
||||
apiKey: this.config.apiKey || 'not-needed',
|
||||
model: key,
|
||||
baseURL: this.config.baseURL,
|
||||
});
|
||||
}
|
||||
|
||||
static parseAndValidate(raw: any): LemonadeConfig {
|
||||
if (!raw || typeof raw !== 'object')
|
||||
throw new Error('Invalid config provided. Expected object');
|
||||
if (!raw.baseURL)
|
||||
throw new Error('Invalid config provided. Base URL must be provided');
|
||||
|
||||
return {
|
||||
baseURL: String(raw.baseURL),
|
||||
apiKey: raw.apiKey ? String(raw.apiKey) : undefined,
|
||||
};
|
||||
}
|
||||
|
||||
static getProviderConfigFields(): UIConfigField[] {
|
||||
return providerConfigFields;
|
||||
}
|
||||
|
||||
static getProviderMetadata(): ProviderMetadata {
|
||||
return {
|
||||
key: 'lemonade',
|
||||
name: 'Lemonade',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export default LemonadeProvider;
|
||||
@@ -0,0 +1,5 @@
|
||||
import OpenAIEmbedding from '../openai/openaiEmbedding.js';
|
||||
|
||||
class LemonadeEmbedding extends OpenAIEmbedding {}
|
||||
|
||||
export default LemonadeEmbedding;
|
||||
@@ -0,0 +1,5 @@
|
||||
import OpenAILLM from '../openai/openaiLLM.js';
|
||||
|
||||
class LemonadeLLM extends OpenAILLM {}
|
||||
|
||||
export default LemonadeLLM;
|
||||
143
services/llm-svc/src/lib/models/providers/lmstudio/index.ts
Normal file
143
services/llm-svc/src/lib/models/providers/lmstudio/index.ts
Normal file
@@ -0,0 +1,143 @@
|
||||
import { UIConfigField } from '../../../config/types.js';
|
||||
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
|
||||
import BaseModelProvider from '../../base/provider.js';
|
||||
import { Model, ModelList, ProviderMetadata } from '../../types.js';
|
||||
import LMStudioLLM from './lmstudioLLM.js';
|
||||
import BaseLLM from '../../base/llm.js';
|
||||
import BaseEmbedding from '../../base/embedding.js';
|
||||
import LMStudioEmbedding from './lmstudioEmbedding.js';
|
||||
|
||||
interface LMStudioConfig {
|
||||
baseURL: string;
|
||||
}
|
||||
|
||||
const providerConfigFields: UIConfigField[] = [
|
||||
{
|
||||
type: 'string',
|
||||
name: 'Base URL',
|
||||
key: 'baseURL',
|
||||
description: 'The base URL for LM Studio server',
|
||||
required: true,
|
||||
placeholder: 'http://localhost:1234',
|
||||
env: 'LM_STUDIO_BASE_URL',
|
||||
scope: 'server',
|
||||
},
|
||||
];
|
||||
|
||||
class LMStudioProvider extends BaseModelProvider<LMStudioConfig> {
|
||||
constructor(id: string, name: string, config: LMStudioConfig) {
|
||||
super(id, name, config);
|
||||
}
|
||||
|
||||
private normalizeBaseURL(url: string): string {
|
||||
const trimmed = url.trim().replace(/\/+$/, '');
|
||||
return trimmed.endsWith('/v1') ? trimmed : `${trimmed}/v1`;
|
||||
}
|
||||
|
||||
async getDefaultModels(): Promise<ModelList> {
|
||||
try {
|
||||
const baseURL = this.normalizeBaseURL(this.config.baseURL);
|
||||
|
||||
const res = await fetch(`${baseURL}/models`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const data = await res.json();
|
||||
|
||||
const models: Model[] = data.data.map((m: any) => {
|
||||
return {
|
||||
name: m.id,
|
||||
key: m.id,
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
embedding: models,
|
||||
chat: models,
|
||||
};
|
||||
} catch (err) {
|
||||
if (err instanceof TypeError) {
|
||||
throw new Error(
|
||||
'Error connecting to LM Studio. Please ensure the base URL is correct and the LM Studio server is running.',
|
||||
);
|
||||
}
|
||||
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async getModelList(): Promise<ModelList> {
|
||||
const defaultModels = await this.getDefaultModels();
|
||||
const configProvider = getConfiguredModelProviderById(this.id)!;
|
||||
|
||||
return {
|
||||
embedding: [
|
||||
...defaultModels.embedding,
|
||||
...configProvider.embeddingModels,
|
||||
],
|
||||
chat: [...defaultModels.chat, ...configProvider.chatModels],
|
||||
};
|
||||
}
|
||||
|
||||
async loadChatModel(key: string): Promise<BaseLLM<any>> {
|
||||
const modelList = await this.getModelList();
|
||||
|
||||
const exists = modelList.chat.find((m) => m.key === key);
|
||||
|
||||
if (!exists) {
|
||||
throw new Error(
|
||||
'Error Loading LM Studio Chat Model. Invalid Model Selected',
|
||||
);
|
||||
}
|
||||
|
||||
return new LMStudioLLM({
|
||||
apiKey: 'lm-studio',
|
||||
model: key,
|
||||
baseURL: this.normalizeBaseURL(this.config.baseURL),
|
||||
});
|
||||
}
|
||||
|
||||
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
|
||||
const modelList = await this.getModelList();
|
||||
const exists = modelList.embedding.find((m) => m.key === key);
|
||||
|
||||
if (!exists) {
|
||||
throw new Error(
|
||||
'Error Loading LM Studio Embedding Model. Invalid Model Selected.',
|
||||
);
|
||||
}
|
||||
|
||||
return new LMStudioEmbedding({
|
||||
apiKey: 'lm-studio',
|
||||
model: key,
|
||||
baseURL: this.normalizeBaseURL(this.config.baseURL),
|
||||
});
|
||||
}
|
||||
|
||||
static parseAndValidate(raw: any): LMStudioConfig {
|
||||
if (!raw || typeof raw !== 'object')
|
||||
throw new Error('Invalid config provided. Expected object');
|
||||
if (!raw.baseURL)
|
||||
throw new Error('Invalid config provided. Base URL must be provided');
|
||||
|
||||
return {
|
||||
baseURL: String(raw.baseURL),
|
||||
};
|
||||
}
|
||||
|
||||
static getProviderConfigFields(): UIConfigField[] {
|
||||
return providerConfigFields;
|
||||
}
|
||||
|
||||
static getProviderMetadata(): ProviderMetadata {
|
||||
return {
|
||||
key: 'lmstudio',
|
||||
name: 'LM Studio',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export default LMStudioProvider;
|
||||
@@ -0,0 +1,5 @@
|
||||
import OpenAIEmbedding from '../openai/openaiEmbedding.js';
|
||||
|
||||
class LMStudioEmbedding extends OpenAIEmbedding {}
|
||||
|
||||
export default LMStudioEmbedding;
|
||||
@@ -0,0 +1,5 @@
|
||||
import OpenAILLM from '../openai/openaiLLM.js';
|
||||
|
||||
class LMStudioLLM extends OpenAILLM {}
|
||||
|
||||
export default LMStudioLLM;
|
||||
133
services/llm-svc/src/lib/models/providers/ollama/index.ts
Normal file
133
services/llm-svc/src/lib/models/providers/ollama/index.ts
Normal file
@@ -0,0 +1,133 @@
|
||||
import type { UIConfigField } from '../../../config/types.js';
|
||||
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
|
||||
import BaseModelProvider from '../../base/provider.js';
|
||||
import type { Model, ModelList, ProviderMetadata } from '../../types.js';
|
||||
import type BaseLLM from '../../base/llm.js';
|
||||
import type BaseEmbedding from '../../base/embedding.js';
|
||||
import OllamaLLM from './ollamaLLM.js';
|
||||
import OllamaEmbedding from './ollamaEmbedding.js';
|
||||
|
||||
interface OllamaConfig {
|
||||
baseURL: string;
|
||||
embeddingBaseURL?: string;
|
||||
}
|
||||
|
||||
const providerConfigFields: UIConfigField[] = [
|
||||
{
|
||||
type: 'string',
|
||||
name: 'Base URL',
|
||||
key: 'baseURL',
|
||||
description: 'The base URL for the Ollama',
|
||||
required: true,
|
||||
placeholder:
|
||||
process.env.DOCKER === '1'
|
||||
? 'http://host.docker.internal:11434'
|
||||
: 'http://localhost:11434',
|
||||
env: 'OLLAMA_BASE_URL',
|
||||
scope: 'server',
|
||||
},
|
||||
];
|
||||
|
||||
class OllamaProvider extends BaseModelProvider<OllamaConfig> {
|
||||
constructor(id: string, name: string, config: OllamaConfig) {
|
||||
super(id, name, config);
|
||||
}
|
||||
|
||||
private async fetchModels(baseURL: string): Promise<Model[]> {
|
||||
const res = await fetch(`${baseURL}/api/tags`, {
|
||||
method: 'GET',
|
||||
headers: { 'Content-type': 'application/json' },
|
||||
});
|
||||
const data = (await res.json()) as { models?: { name?: string; model?: string }[] };
|
||||
return (data.models ?? []).map((m) => ({
|
||||
name: m.model ?? m.name ?? '',
|
||||
key: m.model ?? m.name ?? '',
|
||||
}));
|
||||
}
|
||||
|
||||
async getDefaultModels(): Promise<ModelList> {
|
||||
try {
|
||||
const [chatModels, embeddingModels] = await Promise.all([
|
||||
this.fetchModels(this.config.baseURL),
|
||||
this.fetchModels(
|
||||
this.config.embeddingBaseURL ?? this.config.baseURL,
|
||||
),
|
||||
]);
|
||||
return { chat: chatModels, embedding: embeddingModels };
|
||||
} catch (err) {
|
||||
if (err instanceof TypeError) {
|
||||
throw new Error(
|
||||
'Error connecting to Ollama API. Please ensure the base URL is correct and the Ollama server is running.',
|
||||
);
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async getModelList(): Promise<ModelList> {
|
||||
const defaultModels = await this.getDefaultModels();
|
||||
const configProvider = getConfiguredModelProviderById(this.id);
|
||||
if (!configProvider) {
|
||||
return defaultModels;
|
||||
}
|
||||
return {
|
||||
embedding: [
|
||||
...defaultModels.embedding,
|
||||
...configProvider.embeddingModels,
|
||||
],
|
||||
chat: [...defaultModels.chat, ...configProvider.chatModels],
|
||||
};
|
||||
}
|
||||
|
||||
async loadChatModel(key: string): Promise<BaseLLM<unknown>> {
|
||||
const modelList = await this.getModelList();
|
||||
const exists = modelList.chat.find((m) => m.key === key);
|
||||
if (!exists) {
|
||||
throw new Error(
|
||||
'Error Loading Ollama Chat Model. Invalid Model Selected',
|
||||
);
|
||||
}
|
||||
return new OllamaLLM({
|
||||
baseURL: this.config.baseURL,
|
||||
model: key,
|
||||
});
|
||||
}
|
||||
|
||||
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<unknown>> {
|
||||
const modelList = await this.getModelList();
|
||||
const exists = modelList.embedding.find((m) => m.key === key);
|
||||
if (!exists) {
|
||||
throw new Error(
|
||||
'Error Loading Ollama Embedding Model. Invalid Model Selected.',
|
||||
);
|
||||
}
|
||||
return new OllamaEmbedding({
|
||||
model: key,
|
||||
baseURL: this.config.embeddingBaseURL ?? this.config.baseURL,
|
||||
});
|
||||
}
|
||||
|
||||
static parseAndValidate(raw: unknown): OllamaConfig {
|
||||
if (!raw || typeof raw !== 'object')
|
||||
throw new Error('Invalid config provided. Expected object');
|
||||
const obj = raw as Record<string, unknown>;
|
||||
if (!obj.baseURL)
|
||||
throw new Error('Invalid config provided. Base URL must be provided');
|
||||
return {
|
||||
baseURL: String(obj.baseURL),
|
||||
embeddingBaseURL: obj.embeddingBaseURL
|
||||
? String(obj.embeddingBaseURL)
|
||||
: undefined,
|
||||
};
|
||||
}
|
||||
|
||||
static getProviderConfigFields(): UIConfigField[] {
|
||||
return providerConfigFields;
|
||||
}
|
||||
|
||||
static getProviderMetadata(): ProviderMetadata {
|
||||
return { key: 'ollama', name: 'Ollama' };
|
||||
}
|
||||
}
|
||||
|
||||
export default OllamaProvider;
|
||||
@@ -0,0 +1,37 @@
|
||||
import { Ollama } from 'ollama';
|
||||
import BaseEmbedding from '../../base/embedding.js';
|
||||
import type { Chunk } from '../../../types.js';
|
||||
|
||||
type OllamaConfig = {
|
||||
model: string;
|
||||
baseURL?: string;
|
||||
};
|
||||
|
||||
class OllamaEmbedding extends BaseEmbedding<OllamaConfig> {
|
||||
ollamaClient: Ollama;
|
||||
|
||||
constructor(protected config: OllamaConfig) {
|
||||
super(config);
|
||||
this.ollamaClient = new Ollama({
|
||||
host: this.config.baseURL ?? 'http://localhost:11434',
|
||||
});
|
||||
}
|
||||
|
||||
async embedText(texts: string[]): Promise<number[][]> {
|
||||
const response = await this.ollamaClient.embed({
|
||||
input: texts,
|
||||
model: this.config.model,
|
||||
});
|
||||
return response.embeddings;
|
||||
}
|
||||
|
||||
async embedChunks(chunks: Chunk[]): Promise<number[][]> {
|
||||
const response = await this.ollamaClient.embed({
|
||||
input: chunks.map((c) => c.content),
|
||||
model: this.config.model,
|
||||
});
|
||||
return response.embeddings;
|
||||
}
|
||||
}
|
||||
|
||||
export default OllamaEmbedding;
|
||||
251
services/llm-svc/src/lib/models/providers/ollama/ollamaLLM.ts
Normal file
251
services/llm-svc/src/lib/models/providers/ollama/ollamaLLM.ts
Normal file
@@ -0,0 +1,251 @@
|
||||
import z from 'zod';
|
||||
import BaseLLM from '../../base/llm.js';
|
||||
import type {
|
||||
GenerateObjectInput,
|
||||
GenerateOptions,
|
||||
GenerateTextInput,
|
||||
GenerateTextOutput,
|
||||
StreamTextOutput,
|
||||
} from '../../types.js';
|
||||
import { Ollama, type Tool as OllamaTool, type Message as OllamaMessage } from 'ollama';
|
||||
import { parse } from 'partial-json';
|
||||
import crypto from 'node:crypto';
|
||||
import type { Message } from '../../../types.js';
|
||||
import { repairJson } from '@toolsycc/json-repair';
|
||||
|
||||
type OllamaConfig = {
|
||||
baseURL: string;
|
||||
model: string;
|
||||
options?: GenerateOptions;
|
||||
};
|
||||
|
||||
const reasoningModels = [
|
||||
'gpt-oss',
|
||||
'deepseek-r1',
|
||||
'qwen3',
|
||||
'deepseek-v3.1',
|
||||
'magistral',
|
||||
'nemotron-3-nano',
|
||||
];
|
||||
|
||||
class OllamaLLM extends BaseLLM<OllamaConfig> {
|
||||
ollamaClient: Ollama;
|
||||
|
||||
constructor(protected config: OllamaConfig) {
|
||||
super(config);
|
||||
this.ollamaClient = new Ollama({
|
||||
host: this.config.baseURL || 'http://localhost:11434',
|
||||
});
|
||||
}
|
||||
|
||||
convertToOllamaMessages(messages: Message[]): OllamaMessage[] {
|
||||
return messages.map((msg) => {
|
||||
if (msg.role === 'tool') {
|
||||
return {
|
||||
role: 'tool',
|
||||
tool_name: msg.name,
|
||||
content: msg.content,
|
||||
} as OllamaMessage;
|
||||
}
|
||||
if (msg.role === 'assistant') {
|
||||
return {
|
||||
role: 'assistant',
|
||||
content: msg.content,
|
||||
tool_calls:
|
||||
msg.tool_calls?.map((tc, i) => ({
|
||||
function: {
|
||||
index: i,
|
||||
name: tc.name,
|
||||
arguments: tc.arguments,
|
||||
},
|
||||
})) ?? [],
|
||||
};
|
||||
}
|
||||
return msg as OllamaMessage;
|
||||
});
|
||||
}
|
||||
|
||||
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
|
||||
const ollamaTools: OllamaTool[] = [];
|
||||
input.tools?.forEach((tool) => {
|
||||
ollamaTools.push({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: z.toJSONSchema(tool.schema).properties ?? {},
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
const res = await this.ollamaClient.chat({
|
||||
model: this.config.model,
|
||||
messages: this.convertToOllamaMessages(input.messages),
|
||||
tools: ollamaTools.length > 0 ? ollamaTools : undefined,
|
||||
...(reasoningModels.some((m) => this.config.model.includes(m))
|
||||
? { think: false }
|
||||
: {}),
|
||||
options: {
|
||||
top_p: input.options?.topP ?? this.config.options?.topP,
|
||||
temperature:
|
||||
input.options?.temperature ?? this.config.options?.temperature ?? 0.7,
|
||||
num_predict: input.options?.maxTokens ?? this.config.options?.maxTokens,
|
||||
num_ctx: 32000,
|
||||
frequency_penalty:
|
||||
input.options?.frequencyPenalty ??
|
||||
this.config.options?.frequencyPenalty,
|
||||
presence_penalty:
|
||||
input.options?.presencePenalty ??
|
||||
this.config.options?.presencePenalty,
|
||||
stop:
|
||||
input.options?.stopSequences ?? this.config.options?.stopSequences,
|
||||
},
|
||||
});
|
||||
|
||||
return {
|
||||
content: res.message.content ?? '',
|
||||
toolCalls:
|
||||
res.message.tool_calls?.map((tc) => ({
|
||||
id: crypto.randomUUID(),
|
||||
name: tc.function.name,
|
||||
arguments: (tc.function.arguments as Record<string, unknown>) ?? {},
|
||||
})) ?? [],
|
||||
additionalInfo: { reasoning: res.message.thinking },
|
||||
};
|
||||
}
|
||||
|
||||
async *streamText(
|
||||
input: GenerateTextInput,
|
||||
): AsyncGenerator<StreamTextOutput> {
|
||||
const ollamaTools: OllamaTool[] = [];
|
||||
input.tools?.forEach((tool) => {
|
||||
ollamaTools.push({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: z.toJSONSchema(tool.schema) as Record<string, unknown>,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
const stream = await this.ollamaClient.chat({
|
||||
model: this.config.model,
|
||||
messages: this.convertToOllamaMessages(input.messages),
|
||||
stream: true,
|
||||
...(reasoningModels.some((m) => this.config.model.includes(m))
|
||||
? { think: false }
|
||||
: {}),
|
||||
tools: ollamaTools.length > 0 ? ollamaTools : undefined,
|
||||
options: {
|
||||
top_p: input.options?.topP ?? this.config.options?.topP,
|
||||
temperature:
|
||||
input.options?.temperature ?? this.config.options?.temperature ?? 0.7,
|
||||
num_ctx: 32000,
|
||||
num_predict: input.options?.maxTokens ?? this.config.options?.maxTokens,
|
||||
frequency_penalty:
|
||||
input.options?.frequencyPenalty ??
|
||||
this.config.options?.frequencyPenalty,
|
||||
presence_penalty:
|
||||
input.options?.presencePenalty ??
|
||||
this.config.options?.presencePenalty,
|
||||
stop:
|
||||
input.options?.stopSequences ?? this.config.options?.stopSequences,
|
||||
},
|
||||
});
|
||||
|
||||
for await (const chunk of stream) {
|
||||
yield {
|
||||
contentChunk: chunk.message.content ?? '',
|
||||
toolCallChunk:
|
||||
chunk.message.tool_calls?.map((tc, i) => ({
|
||||
id: crypto
|
||||
.createHash('sha256')
|
||||
.update(`${i}-${tc.function.name}`)
|
||||
.digest('hex'),
|
||||
name: tc.function.name,
|
||||
arguments: (tc.function.arguments as Record<string, unknown>) ?? {},
|
||||
})) ?? [],
|
||||
done: chunk.done,
|
||||
additionalInfo: { reasoning: chunk.message.thinking },
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async generateObject<T>(input: GenerateObjectInput): Promise<T> {
|
||||
const response = await this.ollamaClient.chat({
|
||||
model: this.config.model,
|
||||
messages: this.convertToOllamaMessages(input.messages),
|
||||
format: z.toJSONSchema(input.schema),
|
||||
...(reasoningModels.some((m) => this.config.model.includes(m))
|
||||
? { think: false }
|
||||
: {}),
|
||||
options: {
|
||||
top_p: input.options?.topP ?? this.config.options?.topP,
|
||||
temperature:
|
||||
input.options?.temperature ?? this.config.options?.temperature ?? 0.7,
|
||||
num_predict: input.options?.maxTokens ?? this.config.options?.maxTokens,
|
||||
frequency_penalty:
|
||||
input.options?.frequencyPenalty ??
|
||||
this.config.options?.frequencyPenalty,
|
||||
presence_penalty:
|
||||
input.options?.presencePenalty ??
|
||||
this.config.options?.presencePenalty,
|
||||
stop:
|
||||
input.options?.stopSequences ?? this.config.options?.stopSequences,
|
||||
},
|
||||
});
|
||||
|
||||
try {
|
||||
return input.schema.parse(
|
||||
JSON.parse(
|
||||
repairJson(response.message.content ?? '{}', {
|
||||
extractJson: true,
|
||||
}) as string,
|
||||
),
|
||||
) as T;
|
||||
} catch (err) {
|
||||
throw new Error(`Error parsing response from Ollama: ${err}`);
|
||||
}
|
||||
}
|
||||
|
||||
async *streamObject<T>(
|
||||
input: GenerateObjectInput,
|
||||
): AsyncGenerator<Partial<T>> {
|
||||
let receivedObj = '';
|
||||
const stream = await this.ollamaClient.chat({
|
||||
model: this.config.model,
|
||||
messages: this.convertToOllamaMessages(input.messages),
|
||||
format: z.toJSONSchema(input.schema),
|
||||
stream: true,
|
||||
...(reasoningModels.some((m) => this.config.model.includes(m))
|
||||
? { think: false }
|
||||
: {}),
|
||||
options: {
|
||||
top_p: input.options?.topP ?? this.config.options?.topP,
|
||||
temperature:
|
||||
input.options?.temperature ?? this.config.options?.temperature ?? 0.7,
|
||||
num_predict: input.options?.maxTokens ?? this.config.options?.maxTokens,
|
||||
frequency_penalty:
|
||||
input.options?.frequencyPenalty ??
|
||||
this.config.options?.frequencyPenalty,
|
||||
presence_penalty:
|
||||
input.options?.presencePenalty ??
|
||||
this.config.options?.presencePenalty,
|
||||
stop:
|
||||
input.options?.stopSequences ?? this.config.options?.stopSequences,
|
||||
},
|
||||
});
|
||||
|
||||
for await (const chunk of stream) {
|
||||
receivedObj += chunk.message.content ?? '';
|
||||
try {
|
||||
yield parse(receivedObj) as Partial<T>;
|
||||
} catch {
|
||||
yield {} as Partial<T>;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default OllamaLLM;
|
||||
226
services/llm-svc/src/lib/models/providers/openai/index.ts
Normal file
226
services/llm-svc/src/lib/models/providers/openai/index.ts
Normal file
@@ -0,0 +1,226 @@
|
||||
import { UIConfigField } from '../../../config/types.js';
|
||||
import { getConfiguredModelProviderById } from '../../../config/serverRegistry.js';
|
||||
import { Model, ModelList, ProviderMetadata } from '../../types.js';
|
||||
import OpenAIEmbedding from './openaiEmbedding.js';
|
||||
import BaseEmbedding from '../../base/embedding.js';
|
||||
import BaseModelProvider from '../../base/provider.js';
|
||||
import BaseLLM from '../../base/llm.js';
|
||||
import OpenAILLM from './openaiLLM.js';
|
||||
|
||||
interface OpenAIConfig {
|
||||
apiKey: string;
|
||||
baseURL: string;
|
||||
}
|
||||
|
||||
const defaultChatModels: Model[] = [
|
||||
{
|
||||
name: 'GPT-3.5 Turbo',
|
||||
key: 'gpt-3.5-turbo',
|
||||
},
|
||||
{
|
||||
name: 'GPT-4',
|
||||
key: 'gpt-4',
|
||||
},
|
||||
{
|
||||
name: 'GPT-4 turbo',
|
||||
key: 'gpt-4-turbo',
|
||||
},
|
||||
{
|
||||
name: 'GPT-4 omni',
|
||||
key: 'gpt-4o',
|
||||
},
|
||||
{
|
||||
name: 'GPT-4o (2024-05-13)',
|
||||
key: 'gpt-4o-2024-05-13',
|
||||
},
|
||||
{
|
||||
name: 'GPT-4 omni mini',
|
||||
key: 'gpt-4o-mini',
|
||||
},
|
||||
{
|
||||
name: 'GPT 4.1 nano',
|
||||
key: 'gpt-4.1-nano',
|
||||
},
|
||||
{
|
||||
name: 'GPT 4.1 mini',
|
||||
key: 'gpt-4.1-mini',
|
||||
},
|
||||
{
|
||||
name: 'GPT 4.1',
|
||||
key: 'gpt-4.1',
|
||||
},
|
||||
{
|
||||
name: 'GPT 5 nano',
|
||||
key: 'gpt-5-nano',
|
||||
},
|
||||
{
|
||||
name: 'GPT 5',
|
||||
key: 'gpt-5',
|
||||
},
|
||||
{
|
||||
name: 'GPT 5 Mini',
|
||||
key: 'gpt-5-mini',
|
||||
},
|
||||
{
|
||||
name: 'GPT 5 Pro',
|
||||
key: 'gpt-5-pro',
|
||||
},
|
||||
{
|
||||
name: 'GPT 5.1',
|
||||
key: 'gpt-5.1',
|
||||
},
|
||||
{
|
||||
name: 'GPT 5.2',
|
||||
key: 'gpt-5.2',
|
||||
},
|
||||
{
|
||||
name: 'GPT 5.2 Pro',
|
||||
key: 'gpt-5.2-pro',
|
||||
},
|
||||
{
|
||||
name: 'o1',
|
||||
key: 'o1',
|
||||
},
|
||||
{
|
||||
name: 'o3',
|
||||
key: 'o3',
|
||||
},
|
||||
{
|
||||
name: 'o3 Mini',
|
||||
key: 'o3-mini',
|
||||
},
|
||||
{
|
||||
name: 'o4 Mini',
|
||||
key: 'o4-mini',
|
||||
},
|
||||
];
|
||||
|
||||
const defaultEmbeddingModels: Model[] = [
|
||||
{
|
||||
name: 'Text Embedding 3 Small',
|
||||
key: 'text-embedding-3-small',
|
||||
},
|
||||
{
|
||||
name: 'Text Embedding 3 Large',
|
||||
key: 'text-embedding-3-large',
|
||||
},
|
||||
];
|
||||
|
||||
const providerConfigFields: UIConfigField[] = [
|
||||
{
|
||||
type: 'password',
|
||||
name: 'API Key',
|
||||
key: 'apiKey',
|
||||
description: 'Your OpenAI API key',
|
||||
required: true,
|
||||
placeholder: 'OpenAI API Key',
|
||||
env: 'OPENAI_API_KEY',
|
||||
scope: 'server',
|
||||
},
|
||||
{
|
||||
type: 'string',
|
||||
name: 'Base URL',
|
||||
key: 'baseURL',
|
||||
description: 'The base URL for the OpenAI API',
|
||||
required: true,
|
||||
placeholder: 'OpenAI Base URL',
|
||||
default: 'https://api.openai.com/v1',
|
||||
env: 'OPENAI_BASE_URL',
|
||||
scope: 'server',
|
||||
},
|
||||
];
|
||||
|
||||
class OpenAIProvider extends BaseModelProvider<OpenAIConfig> {
|
||||
constructor(id: string, name: string, config: OpenAIConfig) {
|
||||
super(id, name, config);
|
||||
}
|
||||
|
||||
async getDefaultModels(): Promise<ModelList> {
|
||||
if (this.config.baseURL === 'https://api.openai.com/v1') {
|
||||
return {
|
||||
embedding: defaultEmbeddingModels,
|
||||
chat: defaultChatModels,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
embedding: [],
|
||||
chat: [],
|
||||
};
|
||||
}
|
||||
|
||||
async getModelList(): Promise<ModelList> {
|
||||
const defaultModels = await this.getDefaultModels();
|
||||
const configProvider = getConfiguredModelProviderById(this.id)!;
|
||||
|
||||
return {
|
||||
embedding: [
|
||||
...defaultModels.embedding,
|
||||
...configProvider.embeddingModels,
|
||||
],
|
||||
chat: [...defaultModels.chat, ...configProvider.chatModels],
|
||||
};
|
||||
}
|
||||
|
||||
async loadChatModel(key: string): Promise<BaseLLM<any>> {
|
||||
const modelList = await this.getModelList();
|
||||
|
||||
const exists = modelList.chat.find((m) => m.key === key);
|
||||
|
||||
if (!exists) {
|
||||
throw new Error(
|
||||
'Error Loading OpenAI Chat Model. Invalid Model Selected',
|
||||
);
|
||||
}
|
||||
|
||||
return new OpenAILLM({
|
||||
apiKey: this.config.apiKey,
|
||||
model: key,
|
||||
baseURL: this.config.baseURL,
|
||||
});
|
||||
}
|
||||
|
||||
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
|
||||
const modelList = await this.getModelList();
|
||||
const exists = modelList.embedding.find((m) => m.key === key);
|
||||
|
||||
if (!exists) {
|
||||
throw new Error(
|
||||
'Error Loading OpenAI Embedding Model. Invalid Model Selected.',
|
||||
);
|
||||
}
|
||||
|
||||
return new OpenAIEmbedding({
|
||||
apiKey: this.config.apiKey,
|
||||
model: key,
|
||||
baseURL: this.config.baseURL,
|
||||
});
|
||||
}
|
||||
|
||||
static parseAndValidate(raw: any): OpenAIConfig {
|
||||
if (!raw || typeof raw !== 'object')
|
||||
throw new Error('Invalid config provided. Expected object');
|
||||
if (!raw.apiKey || !raw.baseURL)
|
||||
throw new Error(
|
||||
'Invalid config provided. API key and base URL must be provided',
|
||||
);
|
||||
|
||||
return {
|
||||
apiKey: String(raw.apiKey),
|
||||
baseURL: String(raw.baseURL),
|
||||
};
|
||||
}
|
||||
|
||||
static getProviderConfigFields(): UIConfigField[] {
|
||||
return providerConfigFields;
|
||||
}
|
||||
|
||||
static getProviderMetadata(): ProviderMetadata {
|
||||
return {
|
||||
key: 'openai',
|
||||
name: 'OpenAI',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export default OpenAIProvider;
|
||||
@@ -0,0 +1,42 @@
|
||||
import OpenAI from 'openai';
|
||||
import BaseEmbedding from '../../base/embedding.js';
|
||||
import { Chunk } from '../../../types.js';
|
||||
|
||||
type OpenAIConfig = {
|
||||
apiKey: string;
|
||||
model: string;
|
||||
baseURL?: string;
|
||||
};
|
||||
|
||||
class OpenAIEmbedding extends BaseEmbedding<OpenAIConfig> {
|
||||
openAIClient: OpenAI;
|
||||
|
||||
constructor(protected config: OpenAIConfig) {
|
||||
super(config);
|
||||
|
||||
this.openAIClient = new OpenAI({
|
||||
apiKey: config.apiKey,
|
||||
baseURL: config.baseURL,
|
||||
});
|
||||
}
|
||||
|
||||
async embedText(texts: string[]): Promise<number[][]> {
|
||||
const response = await this.openAIClient.embeddings.create({
|
||||
model: this.config.model,
|
||||
input: texts,
|
||||
});
|
||||
|
||||
return response.data.map((embedding) => embedding.embedding);
|
||||
}
|
||||
|
||||
async embedChunks(chunks: Chunk[]): Promise<number[][]> {
|
||||
const response = await this.openAIClient.embeddings.create({
|
||||
model: this.config.model,
|
||||
input: chunks.map((c) => c.content),
|
||||
});
|
||||
|
||||
return response.data.map((embedding) => embedding.embedding);
|
||||
}
|
||||
}
|
||||
|
||||
export default OpenAIEmbedding;
|
||||
275
services/llm-svc/src/lib/models/providers/openai/openaiLLM.ts
Normal file
275
services/llm-svc/src/lib/models/providers/openai/openaiLLM.ts
Normal file
@@ -0,0 +1,275 @@
|
||||
import OpenAI from 'openai';
|
||||
import BaseLLM from '../../base/llm.js';
|
||||
import { zodTextFormat, zodResponseFormat } from 'openai/helpers/zod';
|
||||
import {
|
||||
GenerateObjectInput,
|
||||
GenerateOptions,
|
||||
GenerateTextInput,
|
||||
GenerateTextOutput,
|
||||
StreamTextOutput,
|
||||
ToolCall,
|
||||
} from '../../types.js';
|
||||
import { parse } from 'partial-json';
|
||||
import z from 'zod';
|
||||
import {
|
||||
ChatCompletionAssistantMessageParam,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionTool,
|
||||
ChatCompletionToolMessageParam,
|
||||
} from 'openai/resources/index.mjs';
|
||||
import { Message } from '../../../types.js';
|
||||
import { repairJson } from '@toolsycc/json-repair';
|
||||
|
||||
type OpenAIConfig = {
|
||||
apiKey: string;
|
||||
model: string;
|
||||
baseURL?: string;
|
||||
options?: GenerateOptions;
|
||||
};
|
||||
|
||||
class OpenAILLM extends BaseLLM<OpenAIConfig> {
|
||||
openAIClient: OpenAI;
|
||||
|
||||
constructor(protected config: OpenAIConfig) {
|
||||
super(config);
|
||||
|
||||
this.openAIClient = new OpenAI({
|
||||
apiKey: this.config.apiKey,
|
||||
baseURL: this.config.baseURL || 'https://api.openai.com/v1',
|
||||
});
|
||||
}
|
||||
|
||||
convertToOpenAIMessages(messages: Message[]): ChatCompletionMessageParam[] {
|
||||
return messages.map((msg) => {
|
||||
if (msg.role === 'tool') {
|
||||
return {
|
||||
role: 'tool',
|
||||
tool_call_id: msg.id,
|
||||
content: msg.content,
|
||||
} as ChatCompletionToolMessageParam;
|
||||
} else if (msg.role === 'assistant') {
|
||||
return {
|
||||
role: 'assistant',
|
||||
content: msg.content,
|
||||
...(msg.tool_calls &&
|
||||
msg.tool_calls.length > 0 && {
|
||||
tool_calls: msg.tool_calls?.map((tc) => ({
|
||||
id: tc.id,
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tc.name,
|
||||
arguments: JSON.stringify(tc.arguments),
|
||||
},
|
||||
})),
|
||||
}),
|
||||
} as ChatCompletionAssistantMessageParam;
|
||||
}
|
||||
|
||||
return msg;
|
||||
});
|
||||
}
|
||||
|
||||
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
|
||||
const openaiTools: ChatCompletionTool[] = [];
|
||||
|
||||
input.tools?.forEach((tool) => {
|
||||
openaiTools.push({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: z.toJSONSchema(tool.schema),
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
const response = await this.openAIClient.chat.completions.create({
|
||||
model: this.config.model,
|
||||
tools: openaiTools.length > 0 ? openaiTools : undefined,
|
||||
messages: this.convertToOpenAIMessages(input.messages),
|
||||
temperature:
|
||||
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
|
||||
top_p: input.options?.topP ?? this.config.options?.topP,
|
||||
max_completion_tokens:
|
||||
input.options?.maxTokens ?? this.config.options?.maxTokens,
|
||||
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
|
||||
frequency_penalty:
|
||||
input.options?.frequencyPenalty ??
|
||||
this.config.options?.frequencyPenalty,
|
||||
presence_penalty:
|
||||
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
|
||||
});
|
||||
|
||||
if (response.choices && response.choices.length > 0) {
|
||||
return {
|
||||
content: response.choices[0].message.content!,
|
||||
toolCalls:
|
||||
response.choices[0].message.tool_calls
|
||||
?.map((tc) => {
|
||||
if (tc.type === 'function') {
|
||||
return {
|
||||
name: tc.function.name,
|
||||
id: tc.id,
|
||||
arguments: JSON.parse(tc.function.arguments),
|
||||
};
|
||||
}
|
||||
})
|
||||
.filter((tc) => tc !== undefined) || [],
|
||||
additionalInfo: {
|
||||
finishReason: response.choices[0].finish_reason,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
throw new Error('No response from OpenAI');
|
||||
}
|
||||
|
||||
async *streamText(
|
||||
input: GenerateTextInput,
|
||||
): AsyncGenerator<StreamTextOutput> {
|
||||
const openaiTools: ChatCompletionTool[] = [];
|
||||
|
||||
input.tools?.forEach((tool) => {
|
||||
openaiTools.push({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: z.toJSONSchema(tool.schema),
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
const stream = await this.openAIClient.chat.completions.create({
|
||||
model: this.config.model,
|
||||
messages: this.convertToOpenAIMessages(input.messages),
|
||||
tools: openaiTools.length > 0 ? openaiTools : undefined,
|
||||
temperature:
|
||||
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
|
||||
top_p: input.options?.topP ?? this.config.options?.topP,
|
||||
max_completion_tokens:
|
||||
input.options?.maxTokens ?? this.config.options?.maxTokens,
|
||||
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
|
||||
frequency_penalty:
|
||||
input.options?.frequencyPenalty ??
|
||||
this.config.options?.frequencyPenalty,
|
||||
presence_penalty:
|
||||
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
|
||||
stream: true,
|
||||
});
|
||||
|
||||
let recievedToolCalls: { name: string; id: string; arguments: string }[] =
|
||||
[];
|
||||
|
||||
for await (const chunk of stream) {
|
||||
if (chunk.choices && chunk.choices.length > 0) {
|
||||
const toolCalls = chunk.choices[0].delta.tool_calls;
|
||||
yield {
|
||||
contentChunk: chunk.choices[0].delta.content || '',
|
||||
toolCallChunk:
|
||||
toolCalls?.map((tc) => {
|
||||
if (!recievedToolCalls[tc.index]) {
|
||||
const call = {
|
||||
name: tc.function?.name!,
|
||||
id: tc.id!,
|
||||
arguments: tc.function?.arguments || '',
|
||||
};
|
||||
recievedToolCalls.push(call);
|
||||
return { ...call, arguments: parse(call.arguments || '{}') };
|
||||
} else {
|
||||
const existingCall = recievedToolCalls[tc.index];
|
||||
existingCall.arguments += tc.function?.arguments || '';
|
||||
return {
|
||||
...existingCall,
|
||||
arguments: parse(existingCall.arguments),
|
||||
};
|
||||
}
|
||||
}) || [],
|
||||
done: chunk.choices[0].finish_reason !== null,
|
||||
additionalInfo: {
|
||||
finishReason: chunk.choices[0].finish_reason,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async generateObject<T>(input: GenerateObjectInput): Promise<T> {
|
||||
const response = await this.openAIClient.chat.completions.parse({
|
||||
messages: this.convertToOpenAIMessages(input.messages),
|
||||
model: this.config.model,
|
||||
temperature:
|
||||
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
|
||||
top_p: input.options?.topP ?? this.config.options?.topP,
|
||||
max_completion_tokens:
|
||||
input.options?.maxTokens ?? this.config.options?.maxTokens,
|
||||
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
|
||||
frequency_penalty:
|
||||
input.options?.frequencyPenalty ??
|
||||
this.config.options?.frequencyPenalty,
|
||||
presence_penalty:
|
||||
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
|
||||
response_format: zodResponseFormat(input.schema, 'object'),
|
||||
});
|
||||
|
||||
if (response.choices && response.choices.length > 0) {
|
||||
try {
|
||||
return input.schema.parse(
|
||||
JSON.parse(
|
||||
repairJson(response.choices[0].message.content!, {
|
||||
extractJson: true,
|
||||
}) as string,
|
||||
),
|
||||
) as T;
|
||||
} catch (err) {
|
||||
throw new Error(`Error parsing response from OpenAI: ${err}`);
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('No response from OpenAI');
|
||||
}
|
||||
|
||||
async *streamObject<T>(input: GenerateObjectInput): AsyncGenerator<T> {
|
||||
let recievedObj: string = '';
|
||||
|
||||
const stream = this.openAIClient.responses.stream({
|
||||
model: this.config.model,
|
||||
input: input.messages,
|
||||
temperature:
|
||||
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
|
||||
top_p: input.options?.topP ?? this.config.options?.topP,
|
||||
max_completion_tokens:
|
||||
input.options?.maxTokens ?? this.config.options?.maxTokens,
|
||||
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
|
||||
frequency_penalty:
|
||||
input.options?.frequencyPenalty ??
|
||||
this.config.options?.frequencyPenalty,
|
||||
presence_penalty:
|
||||
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
|
||||
text: {
|
||||
format: zodTextFormat(input.schema, 'object'),
|
||||
},
|
||||
});
|
||||
|
||||
for await (const chunk of stream) {
|
||||
if (chunk.type === 'response.output_text.delta' && chunk.delta) {
|
||||
recievedObj += chunk.delta;
|
||||
|
||||
try {
|
||||
yield parse(recievedObj) as T;
|
||||
} catch (err) {
|
||||
console.log('Error parsing partial object from OpenAI:', err);
|
||||
yield {} as T;
|
||||
}
|
||||
} else if (chunk.type === 'response.output_text.done' && chunk.text) {
|
||||
try {
|
||||
yield parse(chunk.text) as T;
|
||||
} catch (err) {
|
||||
throw new Error(`Error parsing response from OpenAI: ${err}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default OpenAILLM;
|
||||
165
services/llm-svc/src/lib/models/providers/timeweb/index.ts
Normal file
165
services/llm-svc/src/lib/models/providers/timeweb/index.ts
Normal file
@@ -0,0 +1,165 @@
|
||||
import type { UIConfigField } from '../../../config/types.js';
|
||||
import BaseModelProvider from '../../base/provider.js';
|
||||
import type { Model, ModelList, ProviderMetadata } from '../../types.js';
|
||||
import type BaseLLM from '../../base/llm.js';
|
||||
import type BaseEmbedding from '../../base/embedding.js';
|
||||
import TimewebLLM from './timewebLLM.js';
|
||||
|
||||
interface TimewebConfig {
|
||||
baseURL: string;
|
||||
agentAccessId: string;
|
||||
apiKey: string;
|
||||
model: string;
|
||||
xProxySource?: string;
|
||||
}
|
||||
|
||||
const providerConfigFields: UIConfigField[] = [
|
||||
{
|
||||
type: 'string',
|
||||
name: 'API Base URL',
|
||||
key: 'baseURL',
|
||||
description: 'Timeweb Cloud AI API base URL',
|
||||
required: true,
|
||||
placeholder: 'https://api.timeweb.cloud',
|
||||
env: 'TIMEWEB_API_BASE_URL',
|
||||
scope: 'server',
|
||||
},
|
||||
{
|
||||
type: 'string',
|
||||
name: 'Agent Access ID',
|
||||
key: 'agentAccessId',
|
||||
description: 'Agent access ID from Timeweb Cloud AI',
|
||||
required: true,
|
||||
placeholder: '',
|
||||
env: 'TIMEWEB_AGENT_ACCESS_ID',
|
||||
scope: 'server',
|
||||
},
|
||||
{
|
||||
type: 'password',
|
||||
name: 'API Key',
|
||||
key: 'apiKey',
|
||||
description: 'Bearer token for Timeweb Cloud AI',
|
||||
required: true,
|
||||
placeholder: '',
|
||||
env: 'TIMEWEB_API_KEY',
|
||||
scope: 'server',
|
||||
},
|
||||
{
|
||||
type: 'string',
|
||||
name: 'Model',
|
||||
key: 'model',
|
||||
description: 'Model key (e.g. gpt-4)',
|
||||
required: true,
|
||||
placeholder: 'gpt-4',
|
||||
env: 'LLM_CHAT_MODEL',
|
||||
scope: 'server',
|
||||
},
|
||||
{
|
||||
type: 'string',
|
||||
name: 'X-Proxy-Source',
|
||||
key: 'xProxySource',
|
||||
description: 'Optional header for Timeweb API',
|
||||
required: false,
|
||||
placeholder: '',
|
||||
env: 'TIMEWEB_X_PROXY_SOURCE',
|
||||
scope: 'server',
|
||||
},
|
||||
];
|
||||
|
||||
class TimewebProvider extends BaseModelProvider<TimewebConfig> {
|
||||
constructor(id: string, name: string, config: TimewebConfig) {
|
||||
super(id, name, config);
|
||||
}
|
||||
|
||||
getTimewebBaseURL(): string {
|
||||
const base = this.config.baseURL.replace(/\/$/, '');
|
||||
return `${base}/api/v1/cloud-ai/agents/${this.config.agentAccessId}/v1`;
|
||||
}
|
||||
|
||||
async getDefaultModels(): Promise<ModelList> {
|
||||
try {
|
||||
const url = `${this.getTimewebBaseURL()}/models`;
|
||||
const res = await fetch(url, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${this.config.apiKey}`,
|
||||
'Content-Type': 'application/json',
|
||||
...(this.config.xProxySource && {
|
||||
'x-proxy-source': this.config.xProxySource,
|
||||
}),
|
||||
},
|
||||
});
|
||||
|
||||
if (!res.ok) {
|
||||
throw new Error(`Timeweb API error: ${res.status} ${res.statusText}`);
|
||||
}
|
||||
|
||||
const data = (await res.json()) as { data?: { id: string }[] };
|
||||
const models: Model[] = (data.data ?? []).map((m) => ({
|
||||
name: m.id,
|
||||
key: m.id,
|
||||
}));
|
||||
|
||||
const chat =
|
||||
models.length > 0
|
||||
? models
|
||||
: [{ name: this.config.model, key: this.config.model }];
|
||||
|
||||
return { chat, embedding: [] };
|
||||
} catch {
|
||||
return {
|
||||
chat: [{ name: this.config.model, key: this.config.model }],
|
||||
embedding: [],
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async getModelList(): Promise<ModelList> {
|
||||
return this.getDefaultModels();
|
||||
}
|
||||
|
||||
async loadChatModel(key: string): Promise<BaseLLM<unknown>> {
|
||||
return new TimewebLLM({
|
||||
apiKey: this.config.apiKey,
|
||||
baseURL: this.getTimewebBaseURL(),
|
||||
model: key,
|
||||
defaultHeaders: this.config.xProxySource
|
||||
? { 'x-proxy-source': this.config.xProxySource }
|
||||
: undefined,
|
||||
});
|
||||
}
|
||||
|
||||
async loadEmbeddingModel(_key: string): Promise<BaseEmbedding<unknown>> {
|
||||
throw new Error(
|
||||
'Timeweb Cloud AI does not provide embedding models. Use Ollama for embeddings.',
|
||||
);
|
||||
}
|
||||
|
||||
static parseAndValidate(raw: unknown): TimewebConfig {
|
||||
if (!raw || typeof raw !== 'object')
|
||||
throw new Error('Invalid config provided. Expected object');
|
||||
|
||||
const obj = raw as Record<string, unknown>;
|
||||
if (!obj.baseURL || !obj.agentAccessId || !obj.apiKey)
|
||||
throw new Error(
|
||||
'Invalid config. baseURL, agentAccessId and apiKey are required',
|
||||
);
|
||||
|
||||
return {
|
||||
baseURL: String(obj.baseURL),
|
||||
agentAccessId: String(obj.agentAccessId),
|
||||
apiKey: String(obj.apiKey),
|
||||
model: String(obj.model ?? 'gpt-4'),
|
||||
xProxySource: obj.xProxySource ? String(obj.xProxySource) : undefined,
|
||||
};
|
||||
}
|
||||
|
||||
static getProviderConfigFields(): UIConfigField[] {
|
||||
return providerConfigFields;
|
||||
}
|
||||
|
||||
static getProviderMetadata(): ProviderMetadata {
|
||||
return { key: 'timeweb', name: 'Timeweb Cloud AI' };
|
||||
}
|
||||
}
|
||||
|
||||
export default TimewebProvider;
|
||||
245
services/llm-svc/src/lib/models/providers/timeweb/timewebLLM.ts
Normal file
245
services/llm-svc/src/lib/models/providers/timeweb/timewebLLM.ts
Normal file
@@ -0,0 +1,245 @@
|
||||
import OpenAI from 'openai';
|
||||
import BaseLLM from '../../base/llm.js';
|
||||
import { zodResponseFormat } from 'openai/helpers/zod';
|
||||
import type {
|
||||
GenerateObjectInput,
|
||||
GenerateOptions,
|
||||
GenerateTextInput,
|
||||
GenerateTextOutput,
|
||||
StreamTextOutput,
|
||||
} from '../../types.js';
|
||||
import { parse } from 'partial-json';
|
||||
import z from 'zod';
|
||||
import type {
|
||||
ChatCompletionAssistantMessageParam,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionTool,
|
||||
ChatCompletionToolMessageParam,
|
||||
} from 'openai/resources/index.mjs';
|
||||
import type { Message } from '../../../types.js';
|
||||
import { repairJson } from '@toolsycc/json-repair';
|
||||
|
||||
type TimewebConfig = {
|
||||
apiKey: string;
|
||||
baseURL: string;
|
||||
model: string;
|
||||
options?: GenerateOptions;
|
||||
defaultHeaders?: Record<string, string>;
|
||||
};
|
||||
|
||||
class TimewebLLM extends BaseLLM<TimewebConfig> {
|
||||
openAIClient: OpenAI;
|
||||
|
||||
constructor(protected config: TimewebConfig) {
|
||||
super(config);
|
||||
this.openAIClient = new OpenAI({
|
||||
apiKey: this.config.apiKey,
|
||||
baseURL: this.config.baseURL,
|
||||
defaultHeaders: this.config.defaultHeaders,
|
||||
});
|
||||
}
|
||||
|
||||
convertToOpenAIMessages(messages: Message[]): ChatCompletionMessageParam[] {
|
||||
return messages.map((msg) => {
|
||||
if (msg.role === 'tool') {
|
||||
return {
|
||||
role: 'tool',
|
||||
tool_call_id: msg.id,
|
||||
content: msg.content,
|
||||
} as ChatCompletionToolMessageParam;
|
||||
}
|
||||
if (msg.role === 'assistant') {
|
||||
return {
|
||||
role: 'assistant',
|
||||
content: msg.content,
|
||||
...(msg.tool_calls &&
|
||||
msg.tool_calls.length > 0 && {
|
||||
tool_calls: msg.tool_calls.map((tc) => ({
|
||||
id: tc.id,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: tc.name,
|
||||
arguments: JSON.stringify(tc.arguments),
|
||||
},
|
||||
})),
|
||||
}),
|
||||
} as ChatCompletionAssistantMessageParam;
|
||||
}
|
||||
return msg;
|
||||
});
|
||||
}
|
||||
|
||||
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
|
||||
const openaiTools: ChatCompletionTool[] = [];
|
||||
input.tools?.forEach((tool) => {
|
||||
openaiTools.push({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: z.toJSONSchema(tool.schema),
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
const response = await this.openAIClient.chat.completions.create({
|
||||
model: this.config.model,
|
||||
tools: openaiTools.length > 0 ? openaiTools : undefined,
|
||||
messages: this.convertToOpenAIMessages(input.messages),
|
||||
temperature:
|
||||
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
|
||||
top_p: input.options?.topP ?? this.config.options?.topP,
|
||||
max_completion_tokens:
|
||||
input.options?.maxTokens ?? this.config.options?.maxTokens,
|
||||
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
|
||||
frequency_penalty:
|
||||
input.options?.frequencyPenalty ??
|
||||
this.config.options?.frequencyPenalty,
|
||||
presence_penalty:
|
||||
input.options?.presencePenalty ??
|
||||
this.config.options?.presencePenalty,
|
||||
});
|
||||
|
||||
if (response.choices && response.choices.length > 0) {
|
||||
return {
|
||||
content: response.choices[0].message.content ?? '',
|
||||
toolCalls:
|
||||
response.choices[0].message.tool_calls
|
||||
?.map((tc) => {
|
||||
if (tc.type === 'function') {
|
||||
return {
|
||||
name: tc.function.name,
|
||||
id: tc.id!,
|
||||
arguments: JSON.parse(tc.function.arguments ?? '{}'),
|
||||
};
|
||||
}
|
||||
return undefined;
|
||||
})
|
||||
.filter((tc): tc is NonNullable<typeof tc> => tc !== undefined) ??
|
||||
[],
|
||||
additionalInfo: {
|
||||
finishReason: response.choices[0].finish_reason ?? undefined,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
throw new Error('No response from Timeweb');
|
||||
}
|
||||
|
||||
async *streamText(
|
||||
input: GenerateTextInput,
|
||||
): AsyncGenerator<StreamTextOutput> {
|
||||
const openaiTools: ChatCompletionTool[] = [];
|
||||
input.tools?.forEach((tool) => {
|
||||
openaiTools.push({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: z.toJSONSchema(tool.schema),
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
const stream = await this.openAIClient.chat.completions.create({
|
||||
model: this.config.model,
|
||||
messages: this.convertToOpenAIMessages(input.messages),
|
||||
tools: openaiTools.length > 0 ? openaiTools : undefined,
|
||||
temperature:
|
||||
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
|
||||
top_p: input.options?.topP ?? this.config.options?.topP,
|
||||
max_completion_tokens:
|
||||
input.options?.maxTokens ?? this.config.options?.maxTokens,
|
||||
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
|
||||
frequency_penalty:
|
||||
input.options?.frequencyPenalty ??
|
||||
this.config.options?.frequencyPenalty,
|
||||
presence_penalty:
|
||||
input.options?.presencePenalty ??
|
||||
this.config.options?.presencePenalty,
|
||||
stream: true,
|
||||
});
|
||||
|
||||
const receivedToolCalls: {
|
||||
name: string;
|
||||
id: string;
|
||||
arguments: string;
|
||||
}[] = [];
|
||||
|
||||
for await (const chunk of stream) {
|
||||
if (chunk.choices && chunk.choices.length > 0) {
|
||||
const toolCalls = chunk.choices[0].delta.tool_calls;
|
||||
yield {
|
||||
contentChunk: chunk.choices[0].delta.content ?? '',
|
||||
toolCallChunk:
|
||||
toolCalls?.map((tc) => {
|
||||
if (!receivedToolCalls[tc.index!]) {
|
||||
const call = {
|
||||
name: tc.function?.name ?? '',
|
||||
id: tc.id ?? '',
|
||||
arguments: tc.function?.arguments ?? '',
|
||||
};
|
||||
receivedToolCalls[tc.index!] = call;
|
||||
return { ...call, arguments: parse(call.arguments || '{}') };
|
||||
}
|
||||
const existingCall = receivedToolCalls[tc.index!];
|
||||
existingCall.arguments += tc.function?.arguments ?? '';
|
||||
return {
|
||||
...existingCall,
|
||||
arguments: parse(existingCall.arguments),
|
||||
};
|
||||
}) ?? [],
|
||||
done: chunk.choices[0].finish_reason !== null,
|
||||
additionalInfo: {
|
||||
finishReason: chunk.choices[0].finish_reason ?? undefined,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async generateObject<T>(input: GenerateObjectInput): Promise<T> {
|
||||
const response = await this.openAIClient.chat.completions.create({
|
||||
messages: this.convertToOpenAIMessages(input.messages),
|
||||
model: this.config.model,
|
||||
temperature:
|
||||
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
|
||||
top_p: input.options?.topP ?? this.config.options?.topP,
|
||||
max_completion_tokens:
|
||||
input.options?.maxTokens ?? this.config.options?.maxTokens,
|
||||
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
|
||||
frequency_penalty:
|
||||
input.options?.frequencyPenalty ??
|
||||
this.config.options?.frequencyPenalty,
|
||||
presence_penalty:
|
||||
input.options?.presencePenalty ??
|
||||
this.config.options?.presencePenalty,
|
||||
response_format: zodResponseFormat(input.schema, 'object'),
|
||||
});
|
||||
|
||||
if (response.choices && response.choices.length > 0) {
|
||||
try {
|
||||
return input.schema.parse(
|
||||
JSON.parse(
|
||||
repairJson(response.choices[0].message.content ?? '{}', {
|
||||
extractJson: true,
|
||||
}) as string,
|
||||
),
|
||||
) as T;
|
||||
} catch (err) {
|
||||
throw new Error(`Error parsing response from Timeweb: ${err}`);
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('No response from Timeweb');
|
||||
}
|
||||
|
||||
async *streamObject<T>(
|
||||
input: GenerateObjectInput,
|
||||
): AsyncGenerator<Partial<T>> {
|
||||
const result = await this.generateObject<T>(input);
|
||||
yield result;
|
||||
}
|
||||
}
|
||||
|
||||
export default TimewebLLM;
|
||||
Reference in New Issue
Block a user