Files
deardiary/backend/src/services/ai/lmstudio.ts
2026-03-26 19:57:20 +00:00

53 lines
1.4 KiB
TypeScript

import type { AIProvider, AIProviderConfig } from './provider';
export class LMStudioProvider implements AIProvider {
provider = 'lmstudio' as const;
private baseUrl: string;
private model: string;
constructor(config: AIProviderConfig) {
this.baseUrl = config.baseUrl || 'http://localhost:1234/v1';
this.model = config.model || 'local-model';
}
async generate(prompt: string, systemPrompt?: string): Promise<string> {
const messages: Array<{ role: string; content: string }> = [];
if (systemPrompt) {
messages.push({ role: 'system', content: systemPrompt });
}
messages.push({ role: 'user', content: prompt });
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: this.model,
messages,
temperature: 0.7,
max_tokens: 2000,
}),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`LM Studio API error: ${response.status} ${error}`);
}
const data = await response.json() as { choices: Array<{ message: { content: string } }> };
return data.choices[0]?.message?.content || '';
}
async validate(): Promise<boolean> {
try {
const response = await fetch(`${this.baseUrl}/models`);
return response.ok;
} catch {
return false;
}
}
}