feat: immutable entries + full task logging

Entries now immutable once journal is generated:
- Edit/delete returns ENTRY_IMMUTABLE error if journal exists
- Frontend shows lock message and hides delete button
- Delete Journal button to unlock entries

Task logging now stores full JSON:
- request: full JSON request sent to AI provider
- response: full JSON response from AI provider
- prompt: formatted human-readable prompt

Prompt structure:
1. System prompt
2. Previous diary entries (journals)
3. Today's entries
This commit is contained in:
lotherk
2026-03-26 22:05:52 +00:00
parent 5c217853de
commit 754fea73c6
10 changed files with 197 additions and 98 deletions

View File

@@ -1,4 +1,4 @@
import type { AIProvider, AIProviderConfig } from './provider';
import type { AIProvider, AIProviderConfig, AIProviderResult } from './provider';
export class AnthropicProvider implements AIProvider {
provider = 'anthropic' as const;
@@ -12,7 +12,16 @@ export class AnthropicProvider implements AIProvider {
this.baseUrl = config.baseUrl || 'https://api.anthropic.com/v1';
}
async generate(prompt: string, systemPrompt?: string): Promise<string> {
async generate(prompt: string, systemPrompt?: string): Promise<AIProviderResult> {
const requestBody = {
model: this.model,
max_tokens: 2000,
system: systemPrompt,
messages: [
{ role: 'user', content: prompt }
],
};
const response = await fetch(`${this.baseUrl}/messages`, {
method: 'POST',
headers: {
@@ -21,23 +30,22 @@ export class AnthropicProvider implements AIProvider {
'anthropic-version': '2023-06-01',
'anthropic-dangerous-direct-browser-access': 'true',
},
body: JSON.stringify({
model: this.model,
max_tokens: 2000,
system: systemPrompt,
messages: [
{ role: 'user', content: prompt }
],
}),
body: JSON.stringify(requestBody),
});
const responseData = await response.json();
if (!response.ok) {
const error = await response.text();
throw new Error(`Anthropic API error: ${response.status} ${error}`);
throw new Error(`Anthropic API error: ${response.status} ${JSON.stringify(responseData)}`);
}
const data = await response.json() as { content: Array<{ text: string }> };
return data.content[0]?.text || '';
const content = responseData.content?.[0]?.text || '';
return {
content,
request: requestBody,
response: responseData,
};
}
async validate(): Promise<boolean> {

View File

@@ -1,4 +1,4 @@
import type { AIProvider, AIProviderConfig } from './provider';
import type { AIProvider, AIProviderConfig, AIProviderResult } from './provider';
export class GroqProvider implements AIProvider {
provider = 'groq' as const;
@@ -12,7 +12,7 @@ export class GroqProvider implements AIProvider {
this.baseUrl = config.baseUrl || 'https://api.groq.com/openai/v1';
}
async generate(prompt: string, systemPrompt?: string): Promise<string> {
async generate(prompt: string, systemPrompt?: string): Promise<AIProviderResult> {
const messages: Array<{ role: string; content: string }> = [];
if (systemPrompt) {
@@ -21,27 +21,36 @@ export class GroqProvider implements AIProvider {
messages.push({ role: 'user', content: prompt });
const requestBody = {
model: this.model,
messages,
temperature: 0.7,
max_tokens: 2000,
};
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
},
body: JSON.stringify({
model: this.model,
messages,
temperature: 0.7,
max_tokens: 2000,
}),
body: JSON.stringify(requestBody),
});
const responseText = await response.text();
if (!response.ok) {
const error = await response.text();
throw new Error(`Groq API error: ${response.status} ${error}`);
throw new Error(`Groq API error: ${response.status} ${responseText}`);
}
const data = await response.json() as { choices: Array<{ message: { content: string } }> };
return data.choices[0]?.message?.content || '';
const responseData = JSON.parse(responseText);
const content = responseData.choices?.[0]?.message?.content || '';
return {
content,
request: requestBody,
response: responseData,
};
}
async validate(): Promise<boolean> {

View File

@@ -1,4 +1,4 @@
import type { AIProvider, AIProviderConfig } from './provider';
import type { AIProvider, AIProviderConfig, AIProviderResult } from './provider';
export class LMStudioProvider implements AIProvider {
provider = 'lmstudio' as const;
@@ -10,7 +10,7 @@ export class LMStudioProvider implements AIProvider {
this.model = config.model || 'local-model';
}
async generate(prompt: string, systemPrompt?: string): Promise<string> {
async generate(prompt: string, systemPrompt?: string): Promise<AIProviderResult> {
const messages: Array<{ role: string; content: string }> = [];
if (systemPrompt) {
@@ -19,26 +19,34 @@ export class LMStudioProvider implements AIProvider {
messages.push({ role: 'user', content: prompt });
const requestBody = {
model: this.model,
messages,
temperature: 0.7,
max_tokens: 2000,
};
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: this.model,
messages,
temperature: 0.7,
max_tokens: 2000,
}),
body: JSON.stringify(requestBody),
});
const responseData = await response.json();
if (!response.ok) {
const error = await response.text();
throw new Error(`LM Studio API error: ${response.status} ${error}`);
throw new Error(`LM Studio API error: ${response.status} ${JSON.stringify(responseData)}`);
}
const data = await response.json() as { choices: Array<{ message: { content: string } }> };
return data.choices[0]?.message?.content || '';
const content = responseData.choices?.[0]?.message?.content || '';
return {
content,
request: requestBody,
response: responseData,
};
}
async validate(): Promise<boolean> {

View File

@@ -1,4 +1,4 @@
import type { AIProvider, AIProviderConfig } from './provider';
import type { AIProvider, AIProviderConfig, AIProviderResult } from './provider';
export class OllamaProvider implements AIProvider {
provider = 'ollama' as const;
@@ -10,29 +10,37 @@ export class OllamaProvider implements AIProvider {
this.model = config.model || 'llama3.2';
}
async generate(prompt: string, systemPrompt?: string): Promise<string> {
async generate(prompt: string, systemPrompt?: string): Promise<AIProviderResult> {
const requestBody = {
model: this.model,
stream: false,
messages: [
...(systemPrompt ? [{ role: 'system', content: systemPrompt }] : []),
{ role: 'user', content: prompt },
],
};
const response = await fetch(`${this.baseUrl}/api/chat`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: this.model,
stream: false,
messages: [
...(systemPrompt ? [{ role: 'system', content: systemPrompt }] : []),
{ role: 'user', content: prompt },
],
}),
body: JSON.stringify(requestBody),
});
const responseData = await response.json();
if (!response.ok) {
const error = await response.text();
throw new Error(`Ollama API error: ${response.status} ${error}`);
throw new Error(`Ollama API error: ${response.status} ${JSON.stringify(responseData)}`);
}
const data = await response.json() as { message: { content: string } };
return data.message?.content || '';
const content = responseData.message?.content || '';
return {
content,
request: requestBody,
response: responseData,
};
}
async validate(): Promise<boolean> {

View File

@@ -1,4 +1,4 @@
import type { AIProvider, AIProviderConfig } from './provider';
import type { AIProvider, AIProviderConfig, AIProviderResult } from './provider';
export class OpenAIProvider implements AIProvider {
provider = 'openai' as const;
@@ -12,7 +12,7 @@ export class OpenAIProvider implements AIProvider {
this.baseUrl = config.baseUrl || 'https://api.openai.com/v1';
}
async generate(prompt: string, systemPrompt?: string): Promise<string> {
async generate(prompt: string, systemPrompt?: string): Promise<AIProviderResult> {
const messages: Array<{ role: string; content: string }> = [];
if (systemPrompt) {
@@ -21,27 +21,35 @@ export class OpenAIProvider implements AIProvider {
messages.push({ role: 'user', content: prompt });
const requestBody = {
model: this.model,
messages,
temperature: 0.7,
max_tokens: 2000,
};
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
},
body: JSON.stringify({
model: this.model,
messages,
temperature: 0.7,
max_tokens: 2000,
}),
body: JSON.stringify(requestBody),
});
const responseData = await response.json();
if (!response.ok) {
const error = await response.text();
throw new Error(`OpenAI API error: ${response.status} ${error}`);
throw new Error(`OpenAI API error: ${response.status} ${JSON.stringify(responseData)}`);
}
const data = await response.json() as { choices: Array<{ message: { content: string } }> };
return data.choices[0]?.message?.content || '';
const content = responseData.choices?.[0]?.message?.content || '';
return {
content,
request: requestBody,
response: responseData,
};
}
async validate(): Promise<boolean> {

View File

@@ -1,6 +1,12 @@
export interface AIProviderResult {
content: string;
request: Record<string, unknown>;
response: Record<string, unknown>;
}
export interface AIProvider {
provider: 'openai' | 'anthropic' | 'ollama' | 'lmstudio' | 'groq';
generate(prompt: string, systemPrompt?: string): Promise<string>;
generate(prompt: string, systemPrompt?: string): Promise<AIProviderResult>;
validate?(): Promise<boolean>;
}