- Add Custom provider for any OpenAI-compatible API - All providers now support custom headers and parameters - providerSettings stored per-provider with headers and parameters - Example: disable thinking in Ollama, add tools, etc.
88 lines
2.4 KiB
TypeScript
88 lines
2.4 KiB
TypeScript
import type { AIProvider, AIProviderConfig, AIProviderResult } from './provider';
|
|
import { ProviderSettings } from './openai';
|
|
|
|
export class LMStudioProvider implements AIProvider {
|
|
provider = 'lmstudio' as const;
|
|
private baseUrl: string;
|
|
private model: string;
|
|
private customHeaders?: Record<string, string>;
|
|
private customParameters?: Record<string, unknown>;
|
|
|
|
constructor(config: AIProviderConfig, settings?: ProviderSettings) {
|
|
this.baseUrl = config.baseUrl || 'http://localhost:1234/v1';
|
|
this.model = config.model || 'local-model';
|
|
this.customHeaders = settings?.headers;
|
|
this.customParameters = settings?.parameters;
|
|
}
|
|
|
|
async generate(prompt: string, systemPrompt?: string, options?: { jsonMode?: boolean }): Promise<AIProviderResult> {
|
|
const messages: Array<{ role: string; content: string }> = [];
|
|
|
|
if (systemPrompt) {
|
|
messages.push({ role: 'system', content: systemPrompt });
|
|
}
|
|
|
|
messages.push({ role: 'user', content: prompt });
|
|
|
|
const requestBody: Record<string, unknown> = {
|
|
model: this.model,
|
|
messages,
|
|
temperature: 0.7,
|
|
max_tokens: 2000,
|
|
...this.customParameters,
|
|
};
|
|
|
|
if (options?.jsonMode) {
|
|
requestBody.response_format = { type: 'json_object' };
|
|
}
|
|
|
|
const headers: Record<string, string> = {
|
|
'Content-Type': 'application/json',
|
|
...this.customHeaders,
|
|
};
|
|
|
|
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
|
method: 'POST',
|
|
headers,
|
|
body: JSON.stringify(requestBody),
|
|
});
|
|
|
|
const responseData = await response.json();
|
|
|
|
if (!response.ok) {
|
|
throw new Error(`LM Studio API error: ${response.status} ${JSON.stringify(responseData)}`);
|
|
}
|
|
|
|
let content = responseData.choices?.[0]?.message?.content || '';
|
|
let title: string | undefined;
|
|
|
|
if (options?.jsonMode) {
|
|
try {
|
|
const parsed = JSON.parse(content);
|
|
title = parsed.title;
|
|
content = parsed.content || content;
|
|
} catch {
|
|
// If JSON parsing fails, use content as-is
|
|
}
|
|
}
|
|
|
|
return {
|
|
content,
|
|
title,
|
|
request: requestBody,
|
|
response: responseData,
|
|
};
|
|
}
|
|
|
|
async validate(): Promise<boolean> {
|
|
try {
|
|
const response = await fetch(`${this.baseUrl}/models`, {
|
|
headers: this.customHeaders,
|
|
});
|
|
return response.ok;
|
|
} catch {
|
|
return false;
|
|
}
|
|
}
|
|
}
|