Initial commit: deardiary project setup
This commit is contained in:
64
backend/src/services/ai/anthropic.ts
Normal file
64
backend/src/services/ai/anthropic.ts
Normal file
@@ -0,0 +1,64 @@
|
||||
import type { AIProvider, AIProviderConfig } from './provider';
|
||||
|
||||
export class AnthropicProvider implements AIProvider {
|
||||
provider = 'anthropic' as const;
|
||||
private apiKey: string;
|
||||
private model: string;
|
||||
private baseUrl: string;
|
||||
|
||||
constructor(config: AIProviderConfig) {
|
||||
this.apiKey = config.apiKey;
|
||||
this.model = config.model || 'claude-3-sonnet-20240229';
|
||||
this.baseUrl = config.baseUrl || 'https://api.anthropic.com/v1';
|
||||
}
|
||||
|
||||
async generate(prompt: string, systemPrompt?: string): Promise<string> {
|
||||
const response = await fetch(`${this.baseUrl}/messages`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': this.apiKey,
|
||||
'anthropic-version': '2023-06-01',
|
||||
'anthropic-dangerous-direct-browser-access': 'true',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: this.model,
|
||||
max_tokens: 2000,
|
||||
system: systemPrompt,
|
||||
messages: [
|
||||
{ role: 'user', content: prompt }
|
||||
],
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.text();
|
||||
throw new Error(`Anthropic API error: ${response.status} ${error}`);
|
||||
}
|
||||
|
||||
const data = await response.json() as { content: Array<{ text: string }> };
|
||||
return data.content[0]?.text || '';
|
||||
}
|
||||
|
||||
async validate(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${this.baseUrl}/messages`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': this.apiKey,
|
||||
'anthropic-version': '2023-06-01',
|
||||
'anthropic-dangerous-direct-browser-access': 'true',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: this.model,
|
||||
max_tokens: 1,
|
||||
messages: [{ role: 'user', content: 'hi' }],
|
||||
}),
|
||||
});
|
||||
return response.ok;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
52
backend/src/services/ai/lmstudio.ts
Normal file
52
backend/src/services/ai/lmstudio.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
import type { AIProvider, AIProviderConfig } from './provider';
|
||||
|
||||
export class LMStudioProvider implements AIProvider {
|
||||
provider = 'lmstudio' as const;
|
||||
private baseUrl: string;
|
||||
private model: string;
|
||||
|
||||
constructor(config: AIProviderConfig) {
|
||||
this.baseUrl = config.baseUrl || 'http://localhost:1234/v1';
|
||||
this.model = config.model || 'local-model';
|
||||
}
|
||||
|
||||
async generate(prompt: string, systemPrompt?: string): Promise<string> {
|
||||
const messages: Array<{ role: string; content: string }> = [];
|
||||
|
||||
if (systemPrompt) {
|
||||
messages.push({ role: 'system', content: systemPrompt });
|
||||
}
|
||||
|
||||
messages.push({ role: 'user', content: prompt });
|
||||
|
||||
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: this.model,
|
||||
messages,
|
||||
temperature: 0.7,
|
||||
max_tokens: 2000,
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.text();
|
||||
throw new Error(`LM Studio API error: ${response.status} ${error}`);
|
||||
}
|
||||
|
||||
const data = await response.json() as { choices: Array<{ message: { content: string } }> };
|
||||
return data.choices[0]?.message?.content || '';
|
||||
}
|
||||
|
||||
async validate(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${this.baseUrl}/models`);
|
||||
return response.ok;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
46
backend/src/services/ai/ollama.ts
Normal file
46
backend/src/services/ai/ollama.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import type { AIProvider, AIProviderConfig } from './provider';
|
||||
|
||||
export class OllamaProvider implements AIProvider {
|
||||
provider = 'ollama' as const;
|
||||
private baseUrl: string;
|
||||
private model: string;
|
||||
|
||||
constructor(config: AIProviderConfig) {
|
||||
this.baseUrl = config.baseUrl || 'http://localhost:11434';
|
||||
this.model = config.model || 'llama3.2';
|
||||
}
|
||||
|
||||
async generate(prompt: string, systemPrompt?: string): Promise<string> {
|
||||
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: this.model,
|
||||
stream: false,
|
||||
messages: [
|
||||
...(systemPrompt ? [{ role: 'system', content: systemPrompt }] : []),
|
||||
{ role: 'user', content: prompt },
|
||||
],
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.text();
|
||||
throw new Error(`Ollama API error: ${response.status} ${error}`);
|
||||
}
|
||||
|
||||
const data = await response.json() as { message: { content: string } };
|
||||
return data.message?.content || '';
|
||||
}
|
||||
|
||||
async validate(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${this.baseUrl}/api/tags`);
|
||||
return response.ok;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
59
backend/src/services/ai/openai.ts
Normal file
59
backend/src/services/ai/openai.ts
Normal file
@@ -0,0 +1,59 @@
|
||||
import type { AIProvider, AIProviderConfig } from './provider';
|
||||
|
||||
export class OpenAIProvider implements AIProvider {
|
||||
provider = 'openai' as const;
|
||||
private apiKey: string;
|
||||
private model: string;
|
||||
private baseUrl: string;
|
||||
|
||||
constructor(config: AIProviderConfig) {
|
||||
this.apiKey = config.apiKey;
|
||||
this.model = config.model || 'gpt-4';
|
||||
this.baseUrl = config.baseUrl || 'https://api.openai.com/v1';
|
||||
}
|
||||
|
||||
async generate(prompt: string, systemPrompt?: string): Promise<string> {
|
||||
const messages: Array<{ role: string; content: string }> = [];
|
||||
|
||||
if (systemPrompt) {
|
||||
messages.push({ role: 'system', content: systemPrompt });
|
||||
}
|
||||
|
||||
messages.push({ role: 'user', content: prompt });
|
||||
|
||||
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${this.apiKey}`,
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: this.model,
|
||||
messages,
|
||||
temperature: 0.7,
|
||||
max_tokens: 2000,
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.text();
|
||||
throw new Error(`OpenAI API error: ${response.status} ${error}`);
|
||||
}
|
||||
|
||||
const data = await response.json() as { choices: Array<{ message: { content: string } }> };
|
||||
return data.choices[0]?.message?.content || '';
|
||||
}
|
||||
|
||||
async validate(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${this.baseUrl}/models`, {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${this.apiKey}`,
|
||||
},
|
||||
});
|
||||
return response.ok;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
32
backend/src/services/ai/provider.ts
Normal file
32
backend/src/services/ai/provider.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
export interface AIProvider {
|
||||
provider: 'openai' | 'anthropic' | 'ollama' | 'lmstudio';
|
||||
generate(prompt: string, systemPrompt?: string): Promise<string>;
|
||||
validate?(): Promise<boolean>;
|
||||
}
|
||||
|
||||
export interface AIProviderConfig {
|
||||
provider: 'openai' | 'anthropic' | 'ollama' | 'lmstudio';
|
||||
apiKey: string;
|
||||
model?: string;
|
||||
baseUrl?: string;
|
||||
}
|
||||
|
||||
import { OpenAIProvider } from './openai';
|
||||
import { AnthropicProvider } from './anthropic';
|
||||
import { OllamaProvider } from './ollama';
|
||||
import { LMStudioProvider } from './lmstudio';
|
||||
|
||||
export function createAIProvider(config: AIProviderConfig): AIProvider {
|
||||
switch (config.provider) {
|
||||
case 'openai':
|
||||
return new OpenAIProvider(config);
|
||||
case 'anthropic':
|
||||
return new AnthropicProvider(config);
|
||||
case 'ollama':
|
||||
return new OllamaProvider(config);
|
||||
case 'lmstudio':
|
||||
return new LMStudioProvider(config);
|
||||
default:
|
||||
throw new Error(`Unknown AI provider: ${config.provider}`);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user