#!/usr/bin/env python3
"""
Script to generate HTML pages for LLM conversations listed in result.md
This script reads from the LLM logs database and generates static HTML files
that can be served at the URLs referenced in result.md.
"""
import sqlite3
import re
import os
from pathlib import Path
from datetime import datetime
import html
import json
def extract_conversation_ids_from_result_md(result_md_path):
"""Extract conversation IDs from result.md file"""
conversation_ids = []
with open(result_md_path, 'r') as f:
content = f.read()
# Find all URLs that match the pattern
pattern = r'https://danny\.spesh\.com/ai/datasette/llm/conversations/([a-z0-9]+)\.html'
matches = re.findall(pattern, content)
for match in matches:
conversation_ids.append(match)
return conversation_ids
def get_conversation_data(db_path, conversation_id):
"""Get conversation data from the database"""
conn = sqlite3.connect(db_path)
conn.row_factory = sqlite3.Row
# Get conversation info
conv_query = "SELECT id, name, model FROM conversations WHERE id = ?"
conv_row = conn.execute(conv_query, (conversation_id,)).fetchone()
if not conv_row:
return None
# Get all responses for this conversation
responses_query = """
SELECT id, model, prompt, response, datetime_utc, input_tokens, output_tokens
FROM responses
WHERE conversation_id = ?
ORDER BY datetime_utc
"""
responses = conn.execute(responses_query, (conversation_id,)).fetchall()
conn.close()
return {
'conversation': dict(conv_row),
'responses': [dict(row) for row in responses]
}
def generate_html(conversation_data):
"""Generate HTML for a conversation"""
conv = conversation_data['conversation']
responses = conversation_data['responses']
# Create HTML content
html_content = f"""
{html.escape(conv['name'])}
"""
return html_content
def main():
result_md_path = "result.md"
db_path = "/Users/danny/Library/Application Support/io.datasette.llm/logs.db" # Adjust this path as needed
output_dir = "./datasette/llm/conversations" # Mirror the URL structure
# Create output directory
Path(output_dir).mkdir(parents=True, exist_ok=True)
# Extract conversation IDs from result.md
conversation_ids = extract_conversation_ids_from_result_md(result_md_path)
print(f"Found {len(conversation_ids)} conversation IDs in result.md")
# Generate HTML for each conversation
for conv_id in conversation_ids:
print(f"Processing conversation {conv_id}...")
# Get conversation data
conv_data = get_conversation_data(db_path, conv_id)
if not conv_data:
print(f" ā ļø Conversation {conv_id} not found in database")
continue
# Generate HTML
html_content = generate_html(conv_data)
# Write to file
output_file = Path(output_dir) / f"{conv_id}.html"
with open(output_file, 'w', encoding='utf-8') as f:
f.write(html_content)
print(f" ā Generated {output_file}")
print(f"\nš Generated {len(conversation_ids)} HTML files in {output_dir}/")
print(f"\nTo serve these files, you can:")
print(f"1. Copy the 'ai' directory to your web server's document root")
print(f"2. Or use a simple HTTP server: python -m http.server 8000")
if __name__ == "__main__":
main()