feat: v0.0.1 - Groq provider, timezone, journal context, test connection, task logging

Added:
- Groq AI provider (free, fast with llama-3.3-70b-versatile)
- Timezone setting (22 timezones)
- Journal context: include previous journals (3/7/14/30 days)
- Test connection button for AI providers
- Per-provider settings (API key, model, base URL remembered)
- Detailed task logging (full prompts and responses)
- Tasks page with expandable details
- Progress modal with steps and AI output details

Fixed:
- Groq API endpoint (https://api.groq.com/openai/v1/chat/completions)
- Ollama baseUrl leaking to other providers
- Database schema references
- Proper Prisma migrations (data-safe)

Changed:
- Default AI: OpenAI → Groq
- Project renamed: TotalRecall → DearDiary
- Strict anti-hallucination prompt
- Docker uses prisma migrate deploy (non-destructive)
This commit is contained in:
lotherk
2026-03-26 21:56:29 +00:00
parent 37871271cc
commit 5c217853de
27 changed files with 1026 additions and 260 deletions

View File

@@ -7,6 +7,7 @@ import bcrypt from 'bcryptjs';
import { createHash, randomBytes } from 'crypto';
import * as jose from 'jose';
import { Prisma } from '@prisma/client';
import { createAIProvider } from './services/ai/provider';
const app = new Hono();
@@ -19,7 +20,7 @@ app.use('*', cors({
}));
const prisma = new PrismaClient({
datasourceUrl: envVars.DATABASE_URL || 'file:./data/totalrecall.db',
datasourceUrl: envVars.DATABASE_URL || 'file:./data/deardiary.db',
});
app.use('*', async (c, next) => {
@@ -285,9 +286,9 @@ app.post('/api/v1/journal/generate/:date', async (c) => {
return c.json({ data: null, error: { code: 'NO_ENTRIES', message: 'No entries found for this date' } }, 400);
}
const provider = settings?.aiProvider || 'openai';
const provider = settings?.aiProvider || 'groq';
if ((provider === 'openai' || provider === 'anthropic') && !settings?.aiApiKey) {
if ((provider === 'openai' || provider === 'anthropic' || provider === 'groq') && !settings?.aiApiKey) {
return c.json({ data: null, error: { code: 'NO_AI_CONFIG', message: 'AI not configured. Please set up your API key in settings.' } }, 400);
}
@@ -303,14 +304,43 @@ app.post('/api/v1/journal/generate/:date', async (c) => {
return text;
}).join('\n\n');
// Get previous journals for context
const contextDays = settings?.journalContextDays || 0;
let previousJournalsText = '';
if (contextDays > 0) {
const contextStartDate = new Date(date);
contextStartDate.setDate(contextStartDate.getDate() - contextDays);
const previousJournals = await prisma.journal.findMany({
where: {
userId,
date: {
gte: contextStartDate.toISOString().split('T')[0],
lt: date,
},
},
orderBy: { date: 'desc' },
select: { date: true, content: true, generatedAt: true },
});
if (previousJournals.length > 0) {
previousJournalsText = `\n\nPREVIOUS JOURNAL SUMMARY (last ${contextDays} days for context):\n${previousJournals.map(j =>
`[${j.date}]\n${j.content}`
).join('\n\n')}\n`;
}
}
const systemPrompt = settings?.journalPrompt || 'You are a thoughtful journal writer.';
const userPrompt = `The following entries were captured throughout the day (${date}). Write a thoughtful, reflective journal entry.
const userPrompt = `${previousJournalsText}The following entries were captured throughout the day (${date}). Write a thoughtful, reflective journal entry.
ENTRIES:
${entriesText}
JOURNAL:`;
console.log(`[Journal Generate] Date: ${date}, Context days: ${contextDays}, Entries: ${entries.length}`);
// Create placeholder journal and task
const placeholderJournal = await prisma.journal.create({
data: { userId, date, content: 'Generating...', entryCount: entries.length },
@@ -324,7 +354,8 @@ JOURNAL:`;
status: 'pending',
provider,
model: settings?.aiModel,
prompt: `${systemPrompt}\n\n${userPrompt}`,
prompt: userPrompt,
request: systemPrompt,
},
});
@@ -334,128 +365,34 @@ JOURNAL:`;
data: { id: placeholderJournal.id },
});
let requestBody: any = null;
let responseBody: any = null;
let content = '';
try {
if (provider === 'openai') {
requestBody = {
model: settings?.aiModel || 'gpt-4',
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userPrompt }
],
temperature: 0.7,
max_tokens: 2000,
};
const response = await fetch('https://api.openai.com/v1/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${settings?.aiApiKey}`,
},
body: JSON.stringify(requestBody),
});
responseBody = await response.json();
if (!response.ok) {
throw new Error(`OpenAI API error: ${response.status} ${JSON.stringify(responseBody)}`);
}
content = responseBody.choices?.[0]?.message?.content || '';
} else if (provider === 'anthropic') {
requestBody = {
model: settings?.aiModel || 'claude-3-sonnet-20240229',
max_tokens: 2000,
system: systemPrompt,
messages: [{ role: 'user', content: userPrompt }],
};
const response = await fetch('https://api.anthropic.com/v1/messages', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': settings?.aiApiKey,
'anthropic-version': '2023-06-01',
},
body: JSON.stringify(requestBody),
});
responseBody = await response.json();
if (!response.ok) {
throw new Error(`Anthropic API error: ${response.status} ${JSON.stringify(responseBody)}`);
}
content = responseBody.content?.[0]?.text || '';
} else if (provider === 'ollama') {
const baseUrl = settings?.aiBaseUrl || 'http://localhost:11434';
requestBody = {
model: settings?.aiModel || 'llama3.2',
stream: false,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userPrompt }
],
};
const response = await fetch(`${baseUrl}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(requestBody),
});
responseBody = await response.json();
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status} ${JSON.stringify(responseBody)}`);
}
content = responseBody.message?.content || '';
} else if (provider === 'lmstudio') {
const baseUrl = settings?.aiBaseUrl || 'http://localhost:1234/v1';
requestBody = {
model: settings?.aiModel || 'local-model',
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userPrompt }
],
temperature: 0.7,
max_tokens: 2000,
};
const response = await fetch(`${baseUrl}/chat/completions`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(requestBody),
});
responseBody = await response.json();
if (!response.ok) {
throw new Error(`LM Studio API error: ${response.status} ${JSON.stringify(responseBody)}`);
}
content = responseBody.choices?.[0]?.message?.content || '';
}
console.log(`[Journal Generate] Using provider: ${provider}`);
const aiProvider = createAIProvider({
provider: provider as 'openai' | 'anthropic' | 'ollama' | 'lmstudio' | 'groq',
apiKey: settings?.aiApiKey || '',
model: settings?.aiModel || undefined,
baseUrl: (provider === 'ollama' || provider === 'lmstudio') ? settings?.aiBaseUrl || undefined : undefined,
});
console.log(`[Journal Generate] AI Provider created: ${aiProvider.provider}`);
content = await aiProvider.generate(userPrompt, systemPrompt);
if (!content) {
throw new Error('No content generated from AI');
}
// Update task with success
console.log(`[Journal Generate] Success! Content length: ${content.length}`);
// Update task with success - store full prompt and response
await prisma.task.update({
where: { id: task.id },
data: {
status: 'completed',
request: JSON.stringify(requestBody, null, 2),
response: JSON.stringify(responseBody, null, 2),
response: content,
completedAt: new Date(),
},
});
@@ -554,15 +491,17 @@ app.put('/api/v1/settings', async (c) => {
if (!userId) return c.json({ data: null, error: { code: 'UNAUTHORIZED', message: 'Invalid API key' } }, 401);
const body = await c.req.json();
const { aiProvider, aiApiKey, aiModel, aiBaseUrl, journalPrompt, language } = body;
const { aiProvider, aiApiKey, aiModel, aiBaseUrl, journalPrompt, language, providerSettings, journalContextDays } = body;
const data: Record<string, unknown> = {};
if (aiProvider !== undefined) data.aiProvider = aiProvider;
if (aiApiKey !== undefined) data.aiApiKey = aiApiKey;
if (aiModel !== undefined) data.aiModel = aiModel || 'gpt-4';
if (aiModel !== undefined) data.aiModel = aiModel;
if (aiBaseUrl !== undefined) data.aiBaseUrl = aiBaseUrl;
if (journalPrompt !== undefined) data.journalPrompt = journalPrompt;
if (language !== undefined) data.language = language;
if (providerSettings !== undefined) data.providerSettings = JSON.stringify(providerSettings);
if (journalContextDays !== undefined) data.journalContextDays = journalContextDays;
const settings = await prisma.settings.upsert({
where: { userId },
@@ -570,9 +509,57 @@ app.put('/api/v1/settings', async (c) => {
update: data,
});
if (settings.providerSettings) {
try {
(settings as any).providerSettings = JSON.parse(settings.providerSettings as string);
} catch {
(settings as any).providerSettings = {};
}
}
return c.json({ data: settings, error: null });
});
app.post('/api/v1/ai/test', async (c) => {
const userId = await getUserId(c);
if (!userId) return c.json({ data: null, error: { code: 'UNAUTHORIZED', message: 'Invalid API key' } }, 401);
const { provider, apiKey, model, baseUrl } = await c.req.json();
console.log(`[AI Test] Provider: ${provider}, Model: ${model || 'default'}, BaseURL: ${baseUrl || 'default'}`);
console.log(`[AI Test] API Key set: ${!!apiKey}, Length: ${apiKey?.length || 0}`);
if (!provider) {
return c.json({ data: null, error: { code: 'VALIDATION_ERROR', message: 'provider is required' } }, 400);
}
try {
const aiProvider = createAIProvider({
provider,
apiKey: apiKey || '',
model: model || undefined,
baseUrl: baseUrl || undefined,
});
console.log(`[AI Test] Creating provider: ${aiProvider.provider}`);
const result = await aiProvider.generate('Say "OK" if you can read this.', 'You are a test assistant. Respond with just "OK".');
console.log(`[AI Test] Success! Response length: ${result.length}`);
if (result.toLowerCase().includes('ok')) {
return c.json({ data: { valid: true, message: 'Connection successful!' }, error: null });
} else {
return c.json({ data: { valid: false }, error: { code: 'TEST_FAILED', message: 'Model responded but with unexpected output' } });
}
} catch (err) {
const message = err instanceof Error ? err.message : 'Connection failed';
console.error(`[AI Test] Error: ${message}`);
console.error(`[AI Test] Stack: ${err instanceof Error ? err.stack : 'N/A'}`);
return c.json({ data: { valid: false }, error: { code: 'TEST_FAILED', message } });
}
});
app.notFound((c) => c.json({ data: null, error: { code: 'NOT_FOUND', message: 'Not found' } }, 404));
app.onError((err, c) => {
console.error('Unhandled error:', err);

View File

@@ -6,6 +6,7 @@ import * as jose from 'jose';
function createAuthRoutes() {
const app = new Hono<HonoEnv>();
const authRoutes = app;
authRoutes.post('/register', async (c) => {
const { email, password } = await c.req.json();
@@ -113,3 +114,8 @@ authRoutes.post('/api-key', async (c) => {
return c.json({ data: { apiKey, id: keyRecord.id, name: keyRecord.name }, error: null }, 201);
});
return app;
}
export default createAuthRoutes;

View File

@@ -31,7 +31,7 @@ settingsRoutes.put('/', async (c) => {
const data: Record<string, unknown> = {};
if (aiProvider !== undefined) data.aiProvider = aiProvider;
if (aiApiKey !== undefined) data.aiApiKey = aiApiKey;
if (aiModel !== undefined) data.aiModel = aiModel || 'gpt-4';
if (aiModel !== undefined) data.aiModel = aiModel;
if (aiBaseUrl !== undefined) data.aiBaseUrl = aiBaseUrl;
if (journalPrompt !== undefined) data.journalPrompt = journalPrompt;
if (language !== undefined) data.language = language;
@@ -67,3 +67,34 @@ settingsRoutes.post('/validate-key', async (c) => {
return c.json({ data: { valid: false }, error: null });
}
});
settingsRoutes.post('/test', async (c) => {
const body = await c.req.json();
const { provider, apiKey, model, baseUrl } = body;
if (!provider) {
return c.json({ data: null, error: { code: 'VALIDATION_ERROR', message: 'provider is required' } }, 400);
}
const { createAIProvider } = await import('../services/ai/provider');
try {
const aiProvider = createAIProvider({
provider,
apiKey: apiKey || '',
model: model || undefined,
baseUrl: baseUrl || undefined,
});
const result = await aiProvider.generate('Say "OK" if you can read this.', 'You are a test assistant. Respond with just "OK".');
if (result.toLowerCase().includes('ok')) {
return c.json({ data: { valid: true, message: 'Connection successful!' }, error: null });
} else {
return c.json({ data: { valid: false }, error: { code: 'TEST_FAILED', message: 'Model responded but with unexpected output' } });
}
} catch (err) {
const message = err instanceof Error ? err.message : 'Connection failed';
return c.json({ data: { valid: false }, error: { code: 'TEST_FAILED', message } });
}
});

View File

@@ -0,0 +1,66 @@
import type { AIProvider, AIProviderConfig } from './provider';
export class GroqProvider implements AIProvider {
provider = 'groq' as const;
private apiKey: string;
private model: string;
private baseUrl: string;
constructor(config: AIProviderConfig) {
this.apiKey = config.apiKey;
this.model = config.model || 'llama-3.3-70b-versatile';
this.baseUrl = config.baseUrl || 'https://api.groq.com/openai/v1';
}
async generate(prompt: string, systemPrompt?: string): Promise<string> {
const messages: Array<{ role: string; content: string }> = [];
if (systemPrompt) {
messages.push({ role: 'system', content: systemPrompt });
}
messages.push({ role: 'user', content: prompt });
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
},
body: JSON.stringify({
model: this.model,
messages,
temperature: 0.7,
max_tokens: 2000,
}),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Groq API error: ${response.status} ${error}`);
}
const data = await response.json() as { choices: Array<{ message: { content: string } }> };
return data.choices[0]?.message?.content || '';
}
async validate(): Promise<boolean> {
try {
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
},
body: JSON.stringify({
model: this.model,
messages: [{ role: 'user', content: 'test' }],
max_tokens: 5,
}),
});
return response.ok || response.status === 400;
} catch {
return false;
}
}
}

View File

@@ -1,11 +1,11 @@
export interface AIProvider {
provider: 'openai' | 'anthropic' | 'ollama' | 'lmstudio';
provider: 'openai' | 'anthropic' | 'ollama' | 'lmstudio' | 'groq';
generate(prompt: string, systemPrompt?: string): Promise<string>;
validate?(): Promise<boolean>;
}
export interface AIProviderConfig {
provider: 'openai' | 'anthropic' | 'ollama' | 'lmstudio';
provider: 'openai' | 'anthropic' | 'ollama' | 'lmstudio' | 'groq';
apiKey: string;
model?: string;
baseUrl?: string;
@@ -15,6 +15,7 @@ import { OpenAIProvider } from './openai';
import { AnthropicProvider } from './anthropic';
import { OllamaProvider } from './ollama';
import { LMStudioProvider } from './lmstudio';
import { GroqProvider } from './groq';
export function createAIProvider(config: AIProviderConfig): AIProvider {
switch (config.provider) {
@@ -26,6 +27,8 @@ export function createAIProvider(config: AIProviderConfig): AIProvider {
return new OllamaProvider(config);
case 'lmstudio':
return new LMStudioProvider(config);
case 'groq':
return new GroqProvider(config);
default:
throw new Error(`Unknown AI provider: ${config.provider}`);
}