diff --git a/CHANGELOG.md b/CHANGELOG.md
index d2431c7..79ee566 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,16 @@
# Changelog
+## 2026-04-27
+
+- Add Daily Activities view: per-day, per-project bulleted activity summaries inferred by Haiku via the local `claude` CLI
+- Summaries are inferred lazily on demand: clicking a day fans out one `/api/cell-summary` request per project so they stream in parallel
+- Cache invalidated by sha256 hash of the day's user prompts
+- Day-row cost matches the sum of per-cell costs (turn-based attribution; sessions that span multiple days no longer pile onto their last day)
+- Recent Sessions table now paginates the full filtered list (50 per page) instead of capping at 20
+- Click a session row to expand inline activity bullets summarizing what happened in that session (cached via new `session_summaries` table)
+- New env var: `SUMMARY_MODEL` (default: `haiku`)
+- New `daily_summaries` and `session_summaries` tables (auto-created via `CREATE TABLE IF NOT EXISTS`)
+
## 2026-04-26
- Add "Setup for non-technical users (macOS)" section to README
diff --git a/cli.py b/cli.py
index 680fca4..67dfc31 100644
--- a/cli.py
+++ b/cli.py
@@ -390,7 +390,7 @@ def cmd_stats():
def cmd_dashboard(projects_dir=None, host=None, port=None):
import webbrowser
import threading
- import time
+ import time as _time
print("Running scan first...")
cmd_scan(projects_dir=projects_dir)
@@ -402,7 +402,7 @@ def cmd_dashboard(projects_dir=None, host=None, port=None):
port = int(port or os.environ.get("PORT", "8080"))
def open_browser():
- time.sleep(1.0)
+ _time.sleep(1.0)
webbrowser.open(f"http://{host}:{port}")
t = threading.Thread(target=open_browser, daemon=True)
diff --git a/dashboard.py b/dashboard.py
index 7aac064..99489bb 100644
--- a/dashboard.py
+++ b/dashboard.py
@@ -106,6 +106,7 @@ def get_dashboard_data(db_path=DB_PATH):
duration_min = 0
sessions_all.append({
"session_id": r["session_id"][:8],
+ "session_id_full": r["session_id"],
"project": r["project_name"] or "unknown",
"branch": r["git_branch"] or "",
"last": (r["last_timestamp"] or "")[:16].replace("T", " "),
@@ -143,6 +144,159 @@ def get_dashboard_data(db_path=DB_PATH):
}
+def _day_cell_costs_and_cached(date, db_path):
+ """Return (cell_costs: {cwd: usd}, cached: {cwd: activities}) for a
+ single date. Shared between the day-level and cell-level routes."""
+ from cli import calc_cost
+ conn = sqlite3.connect(db_path)
+ conn.row_factory = sqlite3.Row
+ try:
+ rows = conn.execute("""
+ SELECT cwd, model,
+ SUM(input_tokens) AS inp,
+ SUM(output_tokens) AS out,
+ SUM(cache_read_tokens) AS cr,
+ SUM(cache_creation_tokens) AS cw
+ FROM turns
+ WHERE substr(timestamp, 1, 10) = ?
+ AND cwd IS NOT NULL AND cwd != ''
+ GROUP BY cwd, model
+ """, (date,)).fetchall()
+ cell_costs = {}
+ for r in rows:
+ cost = calc_cost(r["model"], r["inp"] or 0, r["out"] or 0,
+ r["cr"] or 0, r["cw"] or 0)
+ cell_costs[r["cwd"]] = cell_costs.get(r["cwd"], 0.0) + cost
+
+ if _table_exists(conn, "daily_summaries"):
+ cached_rows = conn.execute("""
+ SELECT project_path, activities
+ FROM daily_summaries
+ WHERE summary_date = ?
+ """, (date,)).fetchall()
+ cached = {r["project_path"]: json.loads(r["activities"])
+ for r in cached_rows}
+ else:
+ cached = {}
+ finally:
+ conn.close()
+ return cell_costs, cached
+
+
+def get_daily_summaries(date, db_path=None, projects_dirs=None):
+ """
+ Return the day's cell list immediately. Cached cells include their
+ activities; cells without a cached summary are returned with
+ pending=True so the client can fetch each one in parallel via
+ /api/cell-summary. This keeps the day-level endpoint instant and lets
+ summaries stream in instead of blocking on a sequential 30-60 s per
+ cell synchronous loop.
+ """
+ import scanner
+ if db_path is None:
+ db_path = DB_PATH
+ if projects_dirs is None:
+ projects_dirs = scanner.DEFAULT_PROJECTS_DIRS
+ if not _date_is_valid(date):
+ return {"date": date, "cells": [], "error": "invalid_date"}
+
+ cell_costs, cached = _day_cell_costs_and_cached(date, db_path)
+ all_cwds = sorted(set(cell_costs.keys()) | set(cached.keys()))
+
+ cells = []
+ for cwd in all_cwds:
+ cost = cell_costs.get(cwd, 0.0)
+ if cwd in cached:
+ cells.append({
+ "project": cwd, "cost": round(cost, 4),
+ "activities": cached[cwd], "error": None,
+ "pending": False,
+ })
+ else:
+ cells.append({
+ "project": cwd, "cost": round(cost, 4),
+ "activities": None, "error": None,
+ "pending": True,
+ })
+ return {"date": date, "cells": cells}
+
+
+def get_session_summary(session_id, db_path=None, projects_dirs=None):
+ """
+ Run summarize_session for one session_id and return the activity bullets.
+ Mirrors get_cell_summary so the frontend can fetch per-row on expand.
+ """
+ import summarizer, scanner
+ if db_path is None:
+ db_path = DB_PATH
+ if projects_dirs is None:
+ projects_dirs = scanner.DEFAULT_PROJECTS_DIRS
+ if not isinstance(session_id, str) or not session_id.strip():
+ return {"session_id": session_id, "error": "invalid_session_id"}
+ # Reject anything that doesn't look like a UUID-ish session id; the
+ # value is interpolated into a JSONL filename, so a path-traversal
+ # attempt here would otherwise let a request escape projects_dirs.
+ if not all(c.isalnum() or c in "-_" for c in session_id) or len(session_id) > 64:
+ return {"session_id": session_id, "error": "invalid_session_id"}
+ result = summarizer.summarize_session(
+ session_id=session_id,
+ db_path=db_path,
+ projects_dirs=projects_dirs,
+ )
+ return {
+ "session_id": session_id,
+ "activities": result["activities"],
+ "error": result["error"],
+ "pending": False,
+ }
+
+
+def get_cell_summary(date, cwd, db_path=None, projects_dirs=None):
+ """
+ Run summarize_cell for one (date, cwd) and return the single cell.
+ The day-level endpoint defers to this so the client can parallelize.
+ """
+ import summarizer, scanner
+ if db_path is None:
+ db_path = DB_PATH
+ if projects_dirs is None:
+ projects_dirs = scanner.DEFAULT_PROJECTS_DIRS
+ if not _date_is_valid(date):
+ return {"date": date, "project": cwd, "error": "invalid_date"}
+ if not isinstance(cwd, str) or not cwd.strip():
+ return {"date": date, "project": cwd, "error": "invalid_cwd"}
+
+ cell_costs, cached = _day_cell_costs_and_cached(date, db_path)
+ cost = cell_costs.get(cwd, 0.0)
+
+ if cwd in cached:
+ return {
+ "date": date, "project": cwd, "cost": round(cost, 4),
+ "activities": cached[cwd], "error": None,
+ "pending": False,
+ }
+ result = summarizer.summarize_cell(
+ date=date, cwd=cwd, cost_usd=cost,
+ db_path=db_path, projects_dirs=projects_dirs,
+ )
+ return {
+ "date": date, "project": cwd, "cost": round(cost, 4),
+ "activities": result["activities"],
+ "error": result["error"],
+ "pending": False,
+ }
+
+
+def _date_is_valid(date):
+ if not isinstance(date, str) or len(date) != 10:
+ return False
+ try:
+ datetime.strptime(date, "%Y-%m-%d")
+ return True
+ except ValueError:
+ return False
+
+
HTML_TEMPLATE = r"""
@@ -235,6 +389,18 @@ def get_dashboard_data(db_path=DB_PATH):
.section-header .section-title { margin-bottom: 0; }
.export-btn { background: var(--card); border: 1px solid var(--border); color: var(--muted); padding: 3px 10px; border-radius: 5px; cursor: pointer; font-size: 11px; }
.export-btn:hover { color: var(--text); border-color: var(--accent); }
+ .pager { display: flex; gap: 8px; align-items: center; justify-content: flex-end; margin-top: 10px; color: var(--muted); font-size: 12px; }
+ .pager button { background: var(--card); border: 1px solid var(--border); color: var(--text); padding: 3px 9px; border-radius: 5px; cursor: pointer; font-size: 12px; }
+ .pager button:disabled { opacity: 0.4; cursor: default; }
+ .pager button:not(:disabled):hover { border-color: var(--accent); }
+ tr.session-row { cursor: pointer; }
+ tr.session-row:hover td { background: var(--hover, rgba(255,255,255,0.03)); }
+ tr.session-detail-row td { background: var(--card); padding: 10px 14px 12px 38px; border-top: none; }
+ tr.session-detail-row .activities { margin: 0; padding-left: 18px; }
+ tr.session-detail-row .activities li { margin: 2px 0; }
+ tr.session-detail-row .spinner { color: var(--muted); font-style: italic; }
+ tr.session-detail-row .err { color: #c0392b; }
+ tr.session-detail-row .err button { margin-left: 8px; font-size: 0.85em; }
.table-card { background: var(--card); border: 1px solid var(--border); border-radius: 8px; padding: 20px; margin-bottom: 24px; overflow-x: auto; }
footer { border-top: 1px solid var(--border); padding: 20px 24px; margin-top: 8px; }
@@ -245,6 +411,25 @@ def get_dashboard_data(db_path=DB_PATH):
.footer-content a:hover { text-decoration: underline; }
@media (max-width: 768px) { .charts-grid { grid-template-columns: 1fr; } .chart-card.wide { grid-column: 1; } }
+
+#daily-activities { margin-top: 32px; }
+#daily-activities h2 { margin-bottom: 12px; }
+#daily-activities .day-row { border: 1px solid var(--border); border-radius: 4px; margin-bottom: 8px; padding: 0; background: var(--card); }
+#daily-activities .day-row summary { padding: 10px 14px; cursor: pointer; font-weight: 500; display: flex; gap: 12px; align-items: center; }
+#daily-activities .day-row summary::-webkit-details-marker { display: none; }
+#daily-activities .day-row summary::before { content: "▶"; font-size: 0.7em; color: var(--muted); transition: transform 0.15s; }
+#daily-activities .day-row[open] summary::before { transform: rotate(90deg); }
+#daily-activities .day-meta { color: var(--muted); font-weight: normal; font-size: 0.9em; }
+#daily-activities .day-cost { margin-left: auto; font-variant-numeric: tabular-nums; }
+#daily-activities .project-block { padding: 8px 14px 8px 32px; border-top: 1px solid var(--border); }
+#daily-activities .project-name { font-weight: 500; display: flex; align-items: center; gap: 6px; }
+#daily-activities .project-cost { color: var(--muted); font-variant-numeric: tabular-nums; margin-left: auto; }
+#daily-activities ul.activities { margin: 6px 0 0 0; padding-left: 20px; }
+#daily-activities ul.activities li { margin: 2px 0; }
+#daily-activities .spinner { color: var(--muted); font-style: italic; padding: 4px 0; }
+#daily-activities .err { color: #c0392b; padding: 4px 0; }
+#daily-activities .err button { margin-left: 8px; font-size: 0.85em; }
+#daily-activities .banner { padding: 10px 14px; background: #fff3cd; border: 1px solid #ffe599; border-radius: 4px; margin-bottom: 12px; }
@@ -328,6 +513,14 @@ def get_dashboard_data(db_path=DB_PATH):
@@ -413,6 +607,9 @@ def get_dashboard_data(db_path=DB_PATH):
let lastByProject = [];
let lastByProjectBranch = [];
let sessionSortDir = 'desc';
+const SESSIONS_PAGE_SIZE = 50;
+let sessionPage = 1;
+const sessionState = { fetched: new Set(), inFlight: new Set() };
let hourlyTZ = 'local'; // 'local' or 'utc'
// ── Peak-hour config ───────────────────────────────────────────────────────
@@ -855,10 +1052,18 @@ def get_dashboard_data(db_path=DB_PATH):
lastFilteredSessions = sortSessions(filteredSessions);
lastByProject = sortProjects(byProject);
lastByProjectBranch = sortProjectBranch(byProjectBranch);
- renderSessionsTable(lastFilteredSessions.slice(0, 20));
+ // Reset pagination when the underlying list changes (range/model
+ // filters, sort, auto-refresh) — keeping a stale page number could
+ // jump the user to an empty page.
+ sessionPage = 1;
+ renderSessionsPage();
renderModelCostTable(byModel);
renderProjectCostTable(lastByProject.slice(0, 20));
renderProjectBranchCostTable(lastByProjectBranch.slice(0, 20));
+ renderDailyList(buildDailyDataFromCharts({
+ sessions: lastFilteredSessions,
+ daily: filteredDaily,
+ }));
}
// ── Renderers ──────────────────────────────────────────────────────────────
@@ -1051,13 +1256,19 @@ def get_dashboard_data(db_path=DB_PATH):
});
}
-function renderSessionsTable(sessions) {
- document.getElementById('sessions-body').innerHTML = sessions.map(s => {
+function renderSessionsPage() {
+ const total = lastFilteredSessions.length;
+ const totalPages = Math.max(1, Math.ceil(total / SESSIONS_PAGE_SIZE));
+ if (sessionPage > totalPages) sessionPage = totalPages;
+ const startIdx = (sessionPage - 1) * SESSIONS_PAGE_SIZE;
+ const slice = lastFilteredSessions.slice(startIdx, startIdx + SESSIONS_PAGE_SIZE);
+ document.getElementById('sessions-body').innerHTML = slice.map(s => {
const cost = calcCost(s.model, s.input, s.output, s.cache_read, s.cache_creation);
const costCell = isBillable(s.model)
? `
${fmtCost(cost)} `
: `
n/a `;
- return `
+ const fullId = s.session_id_full || '';
+ return `
${esc(s.session_id)}…
${esc(s.project)}
${esc(s.last)}
@@ -1069,6 +1280,96 @@ def get_dashboard_data(db_path=DB_PATH):
${costCell}
`;
}).join('');
+ renderSessionsPager(totalPages, total);
+}
+
+function renderSessionsPager(totalPages, total) {
+ const pager = document.getElementById('sessions-pager');
+ if (!pager) return;
+ if (total === 0) { pager.innerHTML = ''; return; }
+ const startIdx = (sessionPage - 1) * SESSIONS_PAGE_SIZE + 1;
+ const endIdx = Math.min(sessionPage * SESSIONS_PAGE_SIZE, total);
+ pager.innerHTML = `
+
${startIdx}\u2013${endIdx} of ${total}
+
\u00ab
+
\u2039
+
Page ${sessionPage} / ${totalPages}
+
= totalPages ? 'disabled' : ''}>\u203a
+
= totalPages ? 'disabled' : ''}>\u00bb
+ `;
+}
+
+function setSessionPage(p) {
+ sessionPage = Math.max(1, p);
+ renderSessionsPage();
+}
+
+function toggleSessionRow(sessionId) {
+ if (!sessionId) return;
+ const row = document.querySelector(`tr.session-row[data-id="${sessionId}"]`);
+ if (!row) return;
+ const next = row.nextElementSibling;
+ if (next && next.classList.contains('session-detail-row') && next.dataset.id === sessionId) {
+ next.remove();
+ return;
+ }
+ // Collapse any other open detail row first — keeping multiple rows
+ // expanded clutters the table and the user can only read one at a time.
+ document.querySelectorAll('tr.session-detail-row').forEach(r => r.remove());
+ const detail = document.createElement('tr');
+ detail.className = 'session-detail-row';
+ detail.dataset.id = sessionId;
+ detail.innerHTML = `
Summarizing\u2026
`;
+ row.after(detail);
+ fetchSessionSummary(sessionId, detail);
+}
+
+async function fetchSessionSummary(sessionId, detailEl) {
+ if (sessionState.inFlight.has(sessionId)) return;
+ sessionState.inFlight.add(sessionId);
+ try {
+ const resp = await fetch('/api/session-summary?id=' + encodeURIComponent(sessionId));
+ const data = await resp.json();
+ renderSessionDetail(detailEl, sessionId, data);
+ sessionState.fetched.add(sessionId);
+ } catch (e) {
+ renderSessionDetail(detailEl, sessionId, { activities: null, error: e.message });
+ } finally {
+ sessionState.inFlight.delete(sessionId);
+ }
+}
+
+function renderSessionDetail(detailEl, sessionId, data) {
+ if (!detailEl || !detailEl.isConnected) return;
+ const td = detailEl.querySelector('td');
+ if (!td) return;
+ if (data.error === 'claude_not_installed') {
+ td.innerHTML = `
Session summaries require the claude CLI on PATH.
`;
+ return;
+ }
+ if (data.error === 'no_prompts') {
+ td.innerHTML = `
No user prompts found for this session.
`;
+ return;
+ }
+ if (data.error) {
+ td.innerHTML = `
Summary unavailable: ${esc(data.error)}
+ Retry
`;
+ return;
+ }
+ const acts = data.activities || [];
+ if (!acts.length) {
+ td.innerHTML = `
No activities inferred.
`;
+ return;
+ }
+ td.innerHTML = `
${acts.map(a => `${esc(a)} `).join('')} `;
+}
+
+function retrySessionSummary(sessionId) {
+ const detail = document.querySelector(`tr.session-detail-row[data-id="${sessionId}"]`);
+ if (!detail) return;
+ detail.querySelector('td').innerHTML = `
Summarizing\u2026
`;
+ sessionState.fetched.delete(sessionId);
+ fetchSessionSummary(sessionId, detail);
}
function setModelSort(col) {
@@ -1397,6 +1698,205 @@ def get_dashboard_data(db_path=DB_PATH):
loadData();
scheduleAutoRefresh();
+
+const dailyState = { fetchedDates: new Set(), inFlight: new Map() };
+
+function renderDailyList(data) {
+ const list = document.getElementById('daily-list');
+ if (!data.days.length) {
+ list.innerHTML = '
No activity in the selected range.
';
+ dailyState.fetchedDates.clear();
+ dailyState.inFlight.clear();
+ return;
+ }
+ // Auto-refresh fires every 30 s when the range includes today. Rebuilding
+ // the whole list would collapse any open day and abandon in-flight
+ // /api/cell-summary requests, which is exactly the "it closes itself
+ // after a while + No activities inferred" bug. Update metadata in place
+ // for existing days; only build fresh DOM for genuinely new dates.
+ const newDates = new Set(data.days.map(d => d.date));
+ // Drop fetch state and DOM for dates that fell out of the range.
+ list.querySelectorAll('details.day-row').forEach(d => {
+ if (!newDates.has(d.dataset.date)) {
+ dailyState.fetchedDates.delete(d.dataset.date);
+ dailyState.inFlight.delete(d.dataset.date);
+ d.remove();
+ }
+ });
+ data.days.forEach((day, idx) => {
+ const existing = list.querySelector(
+ `details.day-row[data-date="${day.date}"]`,
+ );
+ if (existing) {
+ // Update summary metadata in place; do NOT touch the open state or
+ // the body, so any expanded day stays expanded with its rendered
+ // (or in-progress) cell blocks.
+ const summary = existing.querySelector('summary');
+ if (summary) {
+ summary.innerHTML = `
+
${day.date}
+
${day.project_count} project${day.project_count === 1 ? '' : 's'}
+
$${day.cost.toFixed(2)}
+ `;
+ }
+ // Reposition to keep the new sort order.
+ const ref = list.children[idx];
+ if (ref && ref !== existing) list.insertBefore(existing, ref);
+ } else {
+ const tmp = document.createElement('div');
+ tmp.innerHTML = `
+
+
+ ${day.date}
+ ${day.project_count} project${day.project_count === 1 ? '' : 's'}
+ $${day.cost.toFixed(2)}
+
+
+
Click to load activities…
+
+
+ `;
+ const node = tmp.firstElementChild;
+ node.addEventListener('toggle', () => {
+ if (node.open) loadDayActivities(node);
+ });
+ const ref = list.children[idx];
+ if (ref) list.insertBefore(node, ref); else list.appendChild(node);
+ }
+ });
+}
+
+async function loadDayActivities(detailsEl) {
+ const date = detailsEl.dataset.date;
+ if (dailyState.fetchedDates.has(date)) return;
+ if (dailyState.inFlight.has(date)) return;
+ dailyState.inFlight.set(date, true);
+ const body = detailsEl.querySelector('.day-body');
+ body.innerHTML = '
Loading day…
';
+ try {
+ const resp = await fetch(`/api/daily-summaries?date=${encodeURIComponent(date)}`);
+ if (!resp.ok) throw new Error(`Server error ${resp.status}`);
+ const data = await resp.json();
+ data.cells.forEach(c => { c.__date = date; });
+ body.innerHTML = data.cells.map(c => renderProjectBlock(c)).join('');
+ dailyState.fetchedDates.add(date);
+ // Fire one /api/cell-summary per pending cell in parallel; replace the
+ // pending block as each one resolves so the user sees progress instead
+ // of one long blocking spinner.
+ data.cells.filter(c => c.pending).forEach(c => fetchCellSummary(detailsEl, date, c.project));
+ } catch (e) {
+ body.innerHTML = `
Failed to load: ${escapeHtml(e.message)}
`;
+ } finally {
+ dailyState.inFlight.delete(date);
+ }
+}
+
+async function fetchCellSummary(detailsEl, date, cwd) {
+ try {
+ const url = `/api/cell-summary?date=${encodeURIComponent(date)}&cwd=${encodeURIComponent(cwd)}`;
+ const resp = await fetch(url);
+ if (!resp.ok) throw new Error(`Server error ${resp.status}`);
+ const cell = await resp.json();
+ cell.__date = date;
+ cell.project = cell.project || cwd;
+ replaceCellBlock(detailsEl, cwd, cell);
+ } catch (e) {
+ replaceCellBlock(detailsEl, cwd, {
+ project: cwd, cost: 0, activities: null,
+ error: e.message, pending: false, __date: date,
+ });
+ }
+}
+
+function replaceCellBlock(detailsEl, cwd, cell) {
+ const body = detailsEl.querySelector('.day-body');
+ if (!body) return;
+ const blocks = body.querySelectorAll('.project-block');
+ for (const b of blocks) {
+ if (b.dataset.cwd === cwd) {
+ const tmp = document.createElement('div');
+ tmp.innerHTML = renderProjectBlock(cell);
+ b.replaceWith(tmp.firstElementChild);
+ return;
+ }
+ }
+}
+
+function renderProjectBlock(cell) {
+ const cwdAttr = `data-cwd="${escapeHtml(cell.project || '')}"`;
+ const head = `
${escapeHtml(cell.project)}$${(cell.cost || 0).toFixed(2)}
`;
+ if (cell.pending) {
+ return `
+ ${head}
+
Summarizing…
+
`;
+ }
+ if (cell.error === 'claude_not_installed') {
+ return `
+ ${head}
+
Daily Activities requires the claude CLI on PATH.
+
`;
+ }
+ if (cell.error) {
+ const date = cell.__date || '';
+ return `
+ ${head}
+
Summary unavailable: ${escapeHtml(cell.error)}
+ Retry
+
`;
+ }
+ if (!cell.activities || !cell.activities.length) {
+ return `
+ ${head}
+
No activities inferred.
+
`;
+ }
+ const bullets = cell.activities.map(a => `
${escapeHtml(a)} `).join('');
+ return `
`;
+}
+
+function escapeHtml(s) {
+ return String(s).replace(/[&<>"']/g, c => ({
+ '&': '&', '<': '<', '>': '>', '"': '"', "'": ''',
+ }[c]));
+}
+
+function retryDay(date) {
+ dailyState.fetchedDates.delete(date);
+ const detailsEl = document.querySelector(
+ `#daily-list details.day-row[data-date="${date}"]`,
+ );
+ if (detailsEl && detailsEl.open) loadDayActivities(detailsEl);
+}
+
+function buildDailyDataFromCharts(rangeData) {
+ // Cost must be turn-based so the day header equals the sum of per-cell
+ // costs the user sees on expand. Session-based attribution credits an
+ // entire session to its last_date, which over-counts days that absorb
+ // turns from earlier days of the same session.
+ const dayMap = new Map();
+ for (const r of rangeData.daily || []) {
+ if (!r.day) continue;
+ if (!dayMap.has(r.day)) dayMap.set(r.day, { date: r.day, projects: new Set(), cost: 0 });
+ dayMap.get(r.day).cost += calcCost(r.model, r.input, r.output, r.cache_read, r.cache_creation);
+ }
+ // Sessions still drive the project count — the day header just shows
+ // how many distinct projects worked that day, which sessions express
+ // directly without needing per-turn cwd grouping.
+ for (const s of rangeData.sessions || []) {
+ const d = s.last_date;
+ if (!d) continue;
+ if (!dayMap.has(d)) dayMap.set(d, { date: d, projects: new Set(), cost: 0 });
+ dayMap.get(d).projects.add(s.project);
+ }
+ const days = Array.from(dayMap.values())
+ .map(d => ({ date: d.date, project_count: d.projects.size, cost: d.cost }))
+ .sort((a, b) => b.date.localeCompare(a.date));
+ return { days };
+}
@@ -1424,6 +1924,43 @@ def do_GET(self):
self.end_headers()
self.wfile.write(body)
+ elif path == "/api/daily-summaries":
+ from urllib.parse import urlparse, parse_qs
+ qs = parse_qs(urlparse(self.path).query)
+ date = qs.get("date", [""])[0]
+ data = get_daily_summaries(date)
+ body = json.dumps(data).encode("utf-8")
+ self.send_response(200)
+ self.send_header("Content-Type", "application/json")
+ self.send_header("Content-Length", str(len(body)))
+ self.end_headers()
+ self.wfile.write(body)
+
+ elif path == "/api/cell-summary":
+ from urllib.parse import urlparse, parse_qs
+ qs = parse_qs(urlparse(self.path).query)
+ date = qs.get("date", [""])[0]
+ cwd = qs.get("cwd", [""])[0]
+ data = get_cell_summary(date, cwd)
+ body = json.dumps(data).encode("utf-8")
+ self.send_response(200)
+ self.send_header("Content-Type", "application/json")
+ self.send_header("Content-Length", str(len(body)))
+ self.end_headers()
+ self.wfile.write(body)
+
+ elif path == "/api/session-summary":
+ from urllib.parse import urlparse, parse_qs
+ qs = parse_qs(urlparse(self.path).query)
+ session_id = qs.get("id", [""])[0]
+ data = get_session_summary(session_id)
+ body = json.dumps(data).encode("utf-8")
+ self.send_response(200)
+ self.send_header("Content-Type", "application/json")
+ self.send_header("Content-Length", str(len(body)))
+ self.end_headers()
+ self.wfile.write(body)
+
else:
self.send_response(404)
self.end_headers()
diff --git a/docs/superpowers/plans/2026-04-27-daily-activity-summaries-plan.md b/docs/superpowers/plans/2026-04-27-daily-activity-summaries-plan.md
new file mode 100644
index 0000000..66cce5b
--- /dev/null
+++ b/docs/superpowers/plans/2026-04-27-daily-activity-summaries-plan.md
@@ -0,0 +1,1634 @@
+# Daily Activity Summaries — Implementation Plan
+
+> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
+
+**Goal:** Add a "Daily Activities" view that uses the local `claude` CLI to summarize each day's user prompts per project as 2–5 bulleted activities, cached in SQLite.
+
+**Architecture:** New `summarizer.py` module wraps a `claude -p` subprocess call. Eager pass at `cli.py dashboard` startup summarizes the top-20% (day, project) cells by cost (capped at 50). Lazy pass on `/api/daily-summaries` for cells the user expands. Cache invalidated by sha256 hash of input prompts.
+
+**Tech Stack:** Python 3.8 stdlib (sqlite3, hashlib, subprocess, http.server), embedded vanilla JavaScript, the existing `claude` CLI on PATH. No new pip dependencies.
+
+**Spec:** `docs/superpowers/specs/2026-04-27-daily-activity-summaries-design.md`
+
+---
+
+## File Structure
+
+| File | What changes |
+|------|--------------|
+| `scanner.py` | Add `daily_summaries` table to `init_db()` |
+| `summarizer.py` | **New module** — `prompt_hash`, `collect_prompts`, `rank_cells_by_cost`, `run_claude`, `summarize_cell` |
+| `cli.py` | `cmd_dashboard` runs eager summarizer pass after the scan with TTY progress |
+| `dashboard.py` | New `/api/daily-summaries` endpoint + new HTML section + new JS |
+| `tests/test_scanner.py` | New test for `daily_summaries` table |
+| `tests/test_summarizer.py` | **New file** — unit tests for every public function in `summarizer.py` |
+| `tests/test_cli.py` | New test for eager pass progress callback in `cmd_dashboard` |
+| `tests/test_dashboard.py` | New tests for `/api/daily-summaries` endpoint |
+| `CHANGELOG.md` | New section for v0.3.0-launchmetrics.1 |
+
+Each task lands as one commit. Order ensures every commit leaves the test suite green.
+
+---
+
+## Task 1: scanner.py — `daily_summaries` table
+
+**Files:**
+- Modify: `scanner.py` (`init_db` function)
+- Test: `tests/test_scanner.py`
+
+- [ ] **Step 1: Write the failing test**
+
+Add to `tests/test_scanner.py`:
+
+```python
+def test_init_db_creates_daily_summaries_table(tmp_path):
+ db_path = tmp_path / "test.db"
+ conn = scanner.init_db(db_path)
+ cols = {row[1] for row in conn.execute("PRAGMA table_info(daily_summaries)")}
+ assert cols == {
+ "summary_date", "project_path", "prompt_hash",
+ "activities", "cost_usd", "created_at",
+ }
+ conn.close()
+
+
+def test_init_db_daily_summaries_idempotent(tmp_path):
+ db_path = tmp_path / "test.db"
+ scanner.init_db(db_path).close()
+ conn = scanner.init_db(db_path) # second call must not raise
+ conn.execute("SELECT 1 FROM daily_summaries").fetchall()
+ conn.close()
+```
+
+- [ ] **Step 2: Run test to verify it fails**
+
+Run: `python3 -m pytest tests/test_scanner.py::test_init_db_creates_daily_summaries_table -v`
+Expected: FAIL with `sqlite3.OperationalError: no such table: daily_summaries`
+
+- [ ] **Step 3: Add the table to `init_db`**
+
+In `scanner.py`, find the multi-statement `executescript` call inside `init_db()` (around line 44–86, the block that contains `CREATE TABLE IF NOT EXISTS scan_meta`). Add this CREATE TABLE statement at the end of that script (just before the closing `"""`):
+
+```sql
+ CREATE TABLE IF NOT EXISTS daily_summaries (
+ summary_date TEXT NOT NULL,
+ project_path TEXT NOT NULL,
+ prompt_hash TEXT NOT NULL,
+ activities TEXT NOT NULL,
+ cost_usd REAL NOT NULL,
+ created_at REAL NOT NULL,
+ PRIMARY KEY (summary_date, project_path)
+ );
+```
+
+- [ ] **Step 4: Run test to verify it passes**
+
+Run: `python3 -m pytest tests/test_scanner.py -k daily_summaries -v`
+Expected: 2 passed.
+
+- [ ] **Step 5: Run full suite to confirm no regressions**
+
+Run: `python3 -m pytest tests/ -q`
+Expected: all 103 prior tests still pass + 2 new tests = 105 passed.
+
+- [ ] **Step 6: Commit**
+
+```bash
+git add scanner.py tests/test_scanner.py
+git commit -m "feat(scanner): add daily_summaries table for activity summaries"
+```
+
+---
+
+## Task 2: summarizer.py — `prompt_hash` function
+
+**Files:**
+- Create: `summarizer.py`
+- Test: `tests/test_summarizer.py`
+
+- [ ] **Step 1: Write the failing test**
+
+Create `tests/test_summarizer.py` with:
+
+```python
+import summarizer
+
+
+def test_prompt_hash_is_deterministic():
+ assert summarizer.prompt_hash("hello") == summarizer.prompt_hash("hello")
+
+
+def test_prompt_hash_differs_on_change():
+ assert summarizer.prompt_hash("hello") != summarizer.prompt_hash("hello!")
+
+
+def test_prompt_hash_returns_hex_string():
+ h = summarizer.prompt_hash("hello")
+ assert isinstance(h, str)
+ assert len(h) == 64 # sha256 hex digest length
+ int(h, 16) # valid hex
+
+
+def test_prompt_hash_handles_unicode():
+ summarizer.prompt_hash("hola — què tal?") # must not raise
+```
+
+- [ ] **Step 2: Run test to verify it fails**
+
+Run: `python3 -m pytest tests/test_summarizer.py -v`
+Expected: FAIL with `ModuleNotFoundError: No module named 'summarizer'`
+
+- [ ] **Step 3: Create the module skeleton + `prompt_hash`**
+
+Create `summarizer.py`:
+
+```python
+"""
+summarizer.py - Generate per-day activity summaries by calling the local
+`claude` CLI on the day's user prompts. Cached in usage.db.
+"""
+
+import hashlib
+import json
+import os
+import sqlite3
+import subprocess
+import time
+from pathlib import Path
+
+# ── Constants ────────────────────────────────────────────────────────────────
+
+NOISE_SKIPLIST = {
+ "yes", "no", "ok", "okay", "exit", "y", "n",
+ "continue", "thanks", "thank you", "great", "alright",
+}
+MIN_PROMPT_LENGTH = 5
+MAX_INPUT_BYTES = 4096
+DEFAULT_MAX_CELLS = 50
+DEFAULT_PERCENTILE = 80
+DEFAULT_MODEL = "haiku"
+SUBPROCESS_TIMEOUT = 60
+
+SYSTEM_PROMPT = (
+ "You analyze user prompts from one day's work in one project and infer "
+ "the main activities. Output 2 to 5 concrete activity bullets describing "
+ "features, topics, or goals — not file names or implementation minutiae. "
+ "No fluff, no greetings, no meta-commentary."
+)
+
+SUMMARY_SCHEMA = {
+ "type": "object",
+ "properties": {
+ "activities": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1,
+ "maxItems": 5,
+ }
+ },
+ "required": ["activities"],
+}
+
+
+# ── Public functions ─────────────────────────────────────────────────────────
+
+def prompt_hash(text: str) -> str:
+ """Stable sha256 hex digest of the prompt text — cache invalidation key."""
+ return hashlib.sha256(text.encode("utf-8")).hexdigest()
+```
+
+- [ ] **Step 4: Run test to verify it passes**
+
+Run: `python3 -m pytest tests/test_summarizer.py -v`
+Expected: 4 passed.
+
+- [ ] **Step 5: Commit**
+
+```bash
+git add summarizer.py tests/test_summarizer.py
+git commit -m "feat(summarizer): add module skeleton and prompt_hash"
+```
+
+---
+
+## Task 3: summarizer.py — `collect_prompts`
+
+**Files:**
+- Modify: `summarizer.py`
+- Modify: `tests/test_summarizer.py`
+
+- [ ] **Step 1: Write the failing test**
+
+Add to `tests/test_summarizer.py`:
+
+```python
+import json
+
+
+def _write_jsonl(path, records):
+ path.write_text("\n".join(json.dumps(r) for r in records))
+
+
+def test_collect_prompts_filters_noise_and_dedupes(tmp_path):
+ proj_dir = tmp_path / "-Users-test-myproj"
+ proj_dir.mkdir()
+ _write_jsonl(proj_dir / "session.jsonl", [
+ {"type": "user", "timestamp": "2026-04-25T10:00:00Z",
+ "message": {"content": "refactor the epic correlation script"}},
+ {"type": "user", "timestamp": "2026-04-25T10:05:00Z",
+ "message": {"content": "yes"}}, # noise: skiplist
+ {"type": "user", "timestamp": "2026-04-25T10:10:00Z",
+ "message": {"content": "hi"}}, # noise: too short
+ {"type": "user", "timestamp": "2026-04-25T10:15:00Z",
+ "message": {"content": "refactor the epic correlation script"}}, # dup
+ {"type": "user", "timestamp": "2026-04-25T10:20:00Z",
+ "message": {"content": "add unit tests for the new endpoint"}},
+ {"type": "assistant", "timestamp": "2026-04-25T10:30:00Z",
+ "message": {"content": "should not be included"}}, # wrong type
+ ])
+ text = summarizer.collect_prompts(
+ date="2026-04-25", cwd="/Users/test/myproj", projects_dirs=[tmp_path],
+ )
+ lines = text.split("\n")
+ assert "refactor the epic correlation script" in lines
+ assert "add unit tests for the new endpoint" in lines
+ assert "yes" not in lines
+ assert "hi" not in lines
+ assert "should not be included" not in lines
+ # Dedup: each prompt appears exactly once
+ assert lines.count("refactor the epic correlation script") == 1
+
+
+def test_collect_prompts_extracts_from_content_list(tmp_path):
+ proj_dir = tmp_path / "-Users-test-myproj"
+ proj_dir.mkdir()
+ _write_jsonl(proj_dir / "session.jsonl", [
+ {"type": "user", "timestamp": "2026-04-25T10:00:00Z",
+ "message": {"content": [
+ {"type": "text", "text": "build a calendar picker for the dashboard"},
+ ]}},
+ ])
+ text = summarizer.collect_prompts(
+ date="2026-04-25", cwd="/Users/test/myproj", projects_dirs=[tmp_path],
+ )
+ assert text == "build a calendar picker for the dashboard"
+
+
+def test_collect_prompts_filters_by_date(tmp_path):
+ proj_dir = tmp_path / "-Users-test-myproj"
+ proj_dir.mkdir()
+ _write_jsonl(proj_dir / "session.jsonl", [
+ {"type": "user", "timestamp": "2026-04-24T23:59:59Z",
+ "message": {"content": "from yesterday morning"}},
+ {"type": "user", "timestamp": "2026-04-25T00:00:00Z",
+ "message": {"content": "from today midnight"}},
+ ])
+ text = summarizer.collect_prompts(
+ date="2026-04-25", cwd="/Users/test/myproj", projects_dirs=[tmp_path],
+ )
+ assert text == "from today midnight"
+
+
+def test_collect_prompts_caps_at_4kb(tmp_path):
+ proj_dir = tmp_path / "-Users-test-myproj"
+ proj_dir.mkdir()
+ long_prompt = "x" * 1000
+ records = [
+ {"type": "user", "timestamp": "2026-04-25T10:00:00Z",
+ "message": {"content": f"{long_prompt} {i}"}}
+ for i in range(10)
+ ]
+ _write_jsonl(proj_dir / "s.jsonl", records)
+ text = summarizer.collect_prompts(
+ date="2026-04-25", cwd="/Users/test/myproj", projects_dirs=[tmp_path],
+ )
+ assert len(text.encode("utf-8")) <= summarizer.MAX_INPUT_BYTES
+
+
+def test_collect_prompts_returns_empty_when_no_matches(tmp_path):
+ text = summarizer.collect_prompts(
+ date="2026-04-25", cwd="/Users/test/nonexistent",
+ projects_dirs=[tmp_path],
+ )
+ assert text == ""
+```
+
+- [ ] **Step 2: Run test to verify it fails**
+
+Run: `python3 -m pytest tests/test_summarizer.py::test_collect_prompts_filters_noise_and_dedupes -v`
+Expected: FAIL with `AttributeError: module 'summarizer' has no attribute 'collect_prompts'`
+
+- [ ] **Step 3: Implement `collect_prompts`**
+
+Append to `summarizer.py` (after `prompt_hash`):
+
+```python
+def _is_noise(text: str) -> bool:
+ t = text.strip().lower()
+ return len(t) < MIN_PROMPT_LENGTH or t in NOISE_SKIPLIST
+
+
+def _extract_prompt_text(rec: dict) -> str:
+ msg = rec.get("message")
+ if not isinstance(msg, dict):
+ return ""
+ content = msg.get("content")
+ if isinstance(content, str):
+ return content
+ if isinstance(content, list):
+ for part in content:
+ if isinstance(part, dict) and part.get("type") == "text":
+ return part.get("text", "")
+ return ""
+
+
+def _encoded_dirname(cwd: str) -> str:
+ """The convention Claude Code uses to name per-project subdirectories."""
+ return cwd.replace("/", "-")
+
+
+def collect_prompts(date: str, cwd: str, projects_dirs) -> str:
+ """
+ Walk JSONLs under each projects_dir/
/ and collect type=user
+ prompts whose timestamp starts with `date`. Filter noise, dedupe exact
+ matches, sort for determinism, concat with newlines, cap at MAX_INPUT_BYTES.
+ """
+ dirname = _encoded_dirname(cwd)
+ prompts = set()
+ for root in projects_dirs:
+ target = Path(root) / dirname
+ if not target.exists():
+ continue
+ for jsonl in sorted(target.glob("*.jsonl")):
+ try:
+ with jsonl.open() as f:
+ for line in f:
+ try:
+ rec = json.loads(line)
+ except json.JSONDecodeError:
+ continue
+ if rec.get("type") != "user":
+ continue
+ ts = rec.get("timestamp", "")
+ if not isinstance(ts, str) or not ts.startswith(date):
+ continue
+ text = _extract_prompt_text(rec)
+ if not text or _is_noise(text):
+ continue
+ prompts.add(text.strip())
+ except OSError:
+ continue
+ if not prompts:
+ return ""
+ sorted_prompts = sorted(prompts)
+ out, size = [], 0
+ for p in sorted_prompts:
+ encoded = p.encode("utf-8")
+ # +1 for newline separator (none for first item, but worst-case bound)
+ if size + len(encoded) + 1 > MAX_INPUT_BYTES:
+ break
+ out.append(p)
+ size += len(encoded) + 1
+ return "\n".join(out)
+```
+
+- [ ] **Step 4: Run tests to verify they pass**
+
+Run: `python3 -m pytest tests/test_summarizer.py -v`
+Expected: 4 prior + 5 new = 9 passed.
+
+- [ ] **Step 5: Commit**
+
+```bash
+git add summarizer.py tests/test_summarizer.py
+git commit -m "feat(summarizer): implement collect_prompts with noise filtering"
+```
+
+---
+
+## Task 4: summarizer.py — `rank_cells_by_cost`
+
+**Files:**
+- Modify: `summarizer.py`
+- Modify: `tests/test_summarizer.py`
+
+- [ ] **Step 1: Write the failing test**
+
+Add to `tests/test_summarizer.py`:
+
+```python
+def _seed_turns(db_path, rows):
+ """rows: list of (timestamp, cwd, model, input, output, cache_read, cache_write)"""
+ import scanner
+ scanner.init_db(db_path).close()
+ conn = sqlite3.connect(db_path)
+ for ts, cwd, model, inp, out, cr, cw in rows:
+ conn.execute("""
+ INSERT INTO turns
+ (session_id, timestamp, model, input_tokens, output_tokens,
+ cache_read_tokens, cache_creation_tokens, cwd)
+ VALUES ('s1', ?, ?, ?, ?, ?, ?, ?)
+ """, (ts, model, inp, out, cr, cw, cwd))
+ conn.commit()
+ conn.close()
+
+
+def test_rank_cells_groups_by_day_and_cwd(tmp_path):
+ db = tmp_path / "u.db"
+ _seed_turns(db, [
+ ("2026-04-25T10:00:00Z", "/proj/A", "claude-haiku-4-5", 1_000_000, 0, 0, 0),
+ ("2026-04-25T11:00:00Z", "/proj/A", "claude-haiku-4-5", 1_000_000, 0, 0, 0),
+ ("2026-04-25T12:00:00Z", "/proj/B", "claude-haiku-4-5", 500_000, 0, 0, 0),
+ ])
+ cells = summarizer.rank_cells_by_cost(db, max_cells=10, percentile=0)
+ by_key = {(d, c): cost for d, c, cost in cells}
+ assert by_key[("2026-04-25", "/proj/A")] == pytest.approx(2.0, rel=0.01)
+ assert by_key[("2026-04-25", "/proj/B")] == pytest.approx(0.5, rel=0.01)
+
+
+def test_rank_cells_applies_percentile_threshold(tmp_path):
+ db = tmp_path / "u.db"
+ rows = []
+ # 10 cells with linearly increasing cost
+ for i in range(10):
+ rows.append(
+ (f"2026-04-{i+1:02d}T10:00:00Z", f"/proj/{i}",
+ "claude-haiku-4-5", (i + 1) * 1_000_000, 0, 0, 0)
+ )
+ _seed_turns(db, rows)
+ cells = summarizer.rank_cells_by_cost(db, max_cells=100, percentile=80)
+ # 80th percentile of 10 items: top 20% = 2 items (indexes 8, 9)
+ assert len(cells) == 2
+ # sorted descending
+ assert cells[0][2] > cells[1][2]
+
+
+def test_rank_cells_caps_at_max_cells(tmp_path):
+ db = tmp_path / "u.db"
+ rows = [
+ (f"2026-04-{i+1:02d}T10:00:00Z", f"/proj/{i}",
+ "claude-haiku-4-5", 1_000_000, 0, 0, 0)
+ for i in range(20)
+ ]
+ _seed_turns(db, rows)
+ cells = summarizer.rank_cells_by_cost(db, max_cells=3, percentile=0)
+ assert len(cells) == 3
+
+
+def test_rank_cells_skips_zero_cost(tmp_path):
+ db = tmp_path / "u.db"
+ _seed_turns(db, [
+ ("2026-04-25T10:00:00Z", "/proj/A", "unknown-model", 1_000_000, 0, 0, 0),
+ ("2026-04-25T11:00:00Z", "/proj/B", "claude-haiku-4-5", 1_000_000, 0, 0, 0),
+ ])
+ cells = summarizer.rank_cells_by_cost(db, max_cells=10, percentile=0)
+ cwds = {c[1] for c in cells}
+ assert "/proj/A" not in cwds
+ assert "/proj/B" in cwds
+
+
+def test_rank_cells_empty_db(tmp_path):
+ import scanner
+ db = tmp_path / "u.db"
+ scanner.init_db(db).close()
+ assert summarizer.rank_cells_by_cost(db, max_cells=10) == []
+```
+
+Add `import pytest` and `import sqlite3` at the top of `tests/test_summarizer.py` if not already present.
+
+- [ ] **Step 2: Run test to verify it fails**
+
+Run: `python3 -m pytest tests/test_summarizer.py::test_rank_cells_groups_by_day_and_cwd -v`
+Expected: FAIL with `AttributeError`.
+
+- [ ] **Step 3: Implement `rank_cells_by_cost`**
+
+Append to `summarizer.py`:
+
+```python
+def rank_cells_by_cost(db_path, max_cells=None, percentile=None):
+ """
+ Returns a sorted list of (date, cwd, cost_usd) tuples for the eager set —
+ cells whose cost is at or above the given percentile, capped at max_cells,
+ sorted descending by cost. Skips cells with cost == 0 (unknown models).
+ """
+ if max_cells is None:
+ max_cells = int(os.environ.get("SUMMARY_MAX_CELLS", str(DEFAULT_MAX_CELLS)))
+ if percentile is None:
+ percentile = DEFAULT_PERCENTILE
+ from cli import calc_cost
+ conn = sqlite3.connect(db_path)
+ conn.row_factory = sqlite3.Row
+ rows = conn.execute("""
+ SELECT
+ substr(timestamp, 1, 10) AS day,
+ cwd,
+ model,
+ input_tokens, output_tokens,
+ cache_read_tokens, cache_creation_tokens
+ FROM turns
+ WHERE cwd IS NOT NULL AND cwd != ''
+ """).fetchall()
+ conn.close()
+ cells = {}
+ for r in rows:
+ cost = calc_cost(
+ r["model"],
+ r["input_tokens"] or 0,
+ r["output_tokens"] or 0,
+ r["cache_read_tokens"] or 0,
+ r["cache_creation_tokens"] or 0,
+ )
+ if cost <= 0:
+ continue
+ key = (r["day"], r["cwd"])
+ cells[key] = cells.get(key, 0.0) + cost
+ items = [(d, c, cost) for (d, c), cost in cells.items() if cost > 0]
+ if not items:
+ return []
+ costs = sorted(cost for _, _, cost in items)
+ pct_idx = min(int(len(costs) * (percentile / 100)), len(costs) - 1)
+ threshold = costs[pct_idx]
+ eager = [item for item in items if item[2] >= threshold]
+ eager.sort(key=lambda c: -c[2])
+ return eager[:max_cells]
+```
+
+- [ ] **Step 4: Run tests**
+
+Run: `python3 -m pytest tests/test_summarizer.py -v`
+Expected: 9 prior + 5 new = 14 passed.
+
+- [ ] **Step 5: Commit**
+
+```bash
+git add summarizer.py tests/test_summarizer.py
+git commit -m "feat(summarizer): add rank_cells_by_cost with percentile + cap"
+```
+
+---
+
+## Task 5: summarizer.py — `run_claude` (subprocess + parsing)
+
+**Files:**
+- Modify: `summarizer.py`
+- Modify: `tests/test_summarizer.py`
+
+- [ ] **Step 1: Write the failing test**
+
+Add to `tests/test_summarizer.py`:
+
+```python
+import subprocess
+from unittest.mock import patch, MagicMock
+
+
+def _mock_claude_response(stdout, returncode=0):
+ return MagicMock(returncode=returncode, stdout=stdout, stderr="")
+
+
+def test_run_claude_parses_successful_json(monkeypatch):
+ response = json.dumps({"result": json.dumps({
+ "activities": ["Refactored X", "Added tests for Y"],
+ })})
+ with patch("subprocess.run", return_value=_mock_claude_response(response)):
+ activities, err = summarizer.run_claude("some prompt", model="haiku")
+ assert err is None
+ assert activities == ["Refactored X", "Added tests for Y"]
+
+
+def test_run_claude_constructs_argv_correctly(monkeypatch):
+ response = json.dumps({"result": json.dumps({"activities": ["A"]})})
+ with patch("subprocess.run", return_value=_mock_claude_response(response)) as m:
+ summarizer.run_claude("hello", model="haiku")
+ argv = m.call_args[0][0]
+ assert argv[0] == "claude"
+ assert "-p" in argv
+ assert "hello" in argv
+ assert "--model" in argv and "haiku" in argv
+ assert "--no-session-persistence" in argv
+ assert "--disable-slash-commands" in argv
+ assert "--output-format" in argv and "json" in argv
+ assert "--system-prompt" in argv
+
+
+def test_run_claude_handles_file_not_found(monkeypatch):
+ with patch("subprocess.run", side_effect=FileNotFoundError):
+ activities, err = summarizer.run_claude("hi", model="haiku")
+ assert activities is None
+ assert err == "claude_not_installed"
+
+
+def test_run_claude_handles_timeout(monkeypatch):
+ with patch("subprocess.run",
+ side_effect=subprocess.TimeoutExpired(cmd="claude", timeout=60)):
+ activities, err = summarizer.run_claude("hi", model="haiku")
+ assert activities is None
+ assert err == "timeout"
+
+
+def test_run_claude_handles_nonzero_exit(monkeypatch):
+ bad = MagicMock(returncode=1, stdout="", stderr="auth failed")
+ with patch("subprocess.run", return_value=bad):
+ activities, err = summarizer.run_claude("hi", model="haiku")
+ assert activities is None
+ assert err.startswith("cli_error:")
+ assert "auth failed" in err
+
+
+def test_run_claude_handles_invalid_json(monkeypatch):
+ with patch("subprocess.run",
+ return_value=_mock_claude_response("not json at all")):
+ activities, err = summarizer.run_claude("hi", model="haiku")
+ assert activities is None
+ assert err == "parse_error"
+
+
+def test_run_claude_handles_missing_activities_key(monkeypatch):
+ response = json.dumps({"result": json.dumps({"unrelated": "field"})})
+ with patch("subprocess.run", return_value=_mock_claude_response(response)):
+ activities, err = summarizer.run_claude("hi", model="haiku")
+ assert activities is None
+ assert err == "parse_error"
+```
+
+- [ ] **Step 2: Run test to verify it fails**
+
+Run: `python3 -m pytest tests/test_summarizer.py::test_run_claude_parses_successful_json -v`
+Expected: FAIL with `AttributeError`.
+
+- [ ] **Step 3: Implement `run_claude`**
+
+Append to `summarizer.py`:
+
+```python
+def run_claude(prompt_text, model=None, timeout=SUBPROCESS_TIMEOUT):
+ """
+ Invoke `claude -p` with the given prompt text and structured-output schema.
+ Returns (activities_list, None) on success or (None, error_code) on failure.
+ Never raises.
+ """
+ if model is None:
+ model = os.environ.get("SUMMARY_MODEL", DEFAULT_MODEL)
+ argv = [
+ "claude", "-p", prompt_text,
+ "--model", model,
+ "--output-format", "json",
+ "--json-schema", json.dumps(SUMMARY_SCHEMA),
+ "--no-session-persistence",
+ "--disable-slash-commands",
+ "--system-prompt", SYSTEM_PROMPT,
+ ]
+ try:
+ proc = subprocess.run(
+ argv, capture_output=True, text=True, timeout=timeout,
+ )
+ except FileNotFoundError:
+ return None, "claude_not_installed"
+ except subprocess.TimeoutExpired:
+ return None, "timeout"
+ if proc.returncode != 0:
+ first_err_line = (proc.stderr or "").strip().splitlines()
+ msg = first_err_line[0] if first_err_line else f"exit {proc.returncode}"
+ return None, f"cli_error: {msg}"
+ try:
+ outer = json.loads(proc.stdout)
+ # `claude -p --output-format json` returns {"result": ""}
+ inner_raw = outer.get("result")
+ if not isinstance(inner_raw, str):
+ return None, "parse_error"
+ inner = json.loads(inner_raw)
+ activities = inner.get("activities")
+ if not isinstance(activities, list) or not activities:
+ return None, "parse_error"
+ return [str(a) for a in activities], None
+ except (json.JSONDecodeError, AttributeError):
+ return None, "parse_error"
+```
+
+- [ ] **Step 4: Run tests**
+
+Run: `python3 -m pytest tests/test_summarizer.py -v`
+Expected: 14 prior + 7 new = 21 passed.
+
+- [ ] **Step 5: Commit**
+
+```bash
+git add summarizer.py tests/test_summarizer.py
+git commit -m "feat(summarizer): add run_claude with structured output parsing"
+```
+
+---
+
+## Task 6: summarizer.py — `summarize_cell` orchestrator
+
+**Files:**
+- Modify: `summarizer.py`
+- Modify: `tests/test_summarizer.py`
+
+- [ ] **Step 1: Write the failing test**
+
+Add to `tests/test_summarizer.py`:
+
+```python
+def _seed_jsonl_for_cell(projects_dir, cwd, date, prompts):
+ proj_dir = projects_dir / cwd.replace("/", "-")
+ proj_dir.mkdir(parents=True, exist_ok=True)
+ records = [
+ {"type": "user",
+ "timestamp": f"{date}T10:0{i}:00Z",
+ "message": {"content": p}}
+ for i, p in enumerate(prompts)
+ ]
+ (proj_dir / "session.jsonl").write_text(
+ "\n".join(json.dumps(r) for r in records),
+ )
+
+
+def test_summarize_cell_calls_claude_and_writes_cache(tmp_path):
+ import scanner
+ db = tmp_path / "u.db"
+ scanner.init_db(db).close()
+ proj = tmp_path / "projects"
+ proj.mkdir()
+ _seed_jsonl_for_cell(proj, "/Users/x/myproj", "2026-04-25",
+ ["refactor the api", "add tests for the new endpoint"])
+ fake = json.dumps({"result": json.dumps({"activities": ["Refactored API"]})})
+ with patch("subprocess.run", return_value=_mock_claude_response(fake)):
+ result = summarizer.summarize_cell(
+ date="2026-04-25", cwd="/Users/x/myproj", cost_usd=1.23,
+ db_path=db, projects_dirs=[proj],
+ )
+ assert result["activities"] == ["Refactored API"]
+ assert result["cached"] is False
+ assert result["error"] is None
+ # Verify written to DB
+ conn = sqlite3.connect(db)
+ row = conn.execute(
+ "SELECT activities, cost_usd FROM daily_summaries WHERE summary_date=?",
+ ("2026-04-25",),
+ ).fetchone()
+ conn.close()
+ assert json.loads(row[0]) == ["Refactored API"]
+ assert row[1] == 1.23
+
+
+def test_summarize_cell_returns_cache_hit(tmp_path):
+ import scanner
+ db = tmp_path / "u.db"
+ scanner.init_db(db).close()
+ proj = tmp_path / "projects"
+ proj.mkdir()
+ _seed_jsonl_for_cell(proj, "/Users/x/myproj", "2026-04-25",
+ ["refactor the api"])
+ text = summarizer.collect_prompts("2026-04-25", "/Users/x/myproj", [proj])
+ h = summarizer.prompt_hash(text)
+ conn = sqlite3.connect(db)
+ conn.execute("""
+ INSERT INTO daily_summaries
+ (summary_date, project_path, prompt_hash, activities, cost_usd, created_at)
+ VALUES (?, ?, ?, ?, ?, ?)
+ """, ("2026-04-25", "/Users/x/myproj", h,
+ json.dumps(["Cached activity"]), 1.0, time.time()))
+ conn.commit()
+ conn.close()
+ with patch("subprocess.run") as m: # must not be called
+ result = summarizer.summarize_cell(
+ date="2026-04-25", cwd="/Users/x/myproj", cost_usd=1.0,
+ db_path=db, projects_dirs=[proj],
+ )
+ assert result["cached"] is True
+ assert result["activities"] == ["Cached activity"]
+ m.assert_not_called()
+
+
+def test_summarize_cell_invalidates_on_hash_mismatch(tmp_path):
+ import scanner
+ db = tmp_path / "u.db"
+ scanner.init_db(db).close()
+ proj = tmp_path / "projects"
+ proj.mkdir()
+ _seed_jsonl_for_cell(proj, "/Users/x/myproj", "2026-04-25",
+ ["original prompt"])
+ # Cache with stale hash
+ conn = sqlite3.connect(db)
+ conn.execute("""
+ INSERT INTO daily_summaries
+ (summary_date, project_path, prompt_hash, activities, cost_usd, created_at)
+ VALUES (?, ?, ?, ?, ?, ?)
+ """, ("2026-04-25", "/Users/x/myproj", "stale-hash",
+ json.dumps(["old"]), 1.0, time.time()))
+ conn.commit()
+ conn.close()
+ fake = json.dumps({"result": json.dumps({"activities": ["fresh"]})})
+ with patch("subprocess.run", return_value=_mock_claude_response(fake)):
+ result = summarizer.summarize_cell(
+ date="2026-04-25", cwd="/Users/x/myproj", cost_usd=1.0,
+ db_path=db, projects_dirs=[proj],
+ )
+ assert result["cached"] is False
+ assert result["activities"] == ["fresh"]
+
+
+def test_summarize_cell_does_not_cache_errors(tmp_path):
+ import scanner
+ db = tmp_path / "u.db"
+ scanner.init_db(db).close()
+ proj = tmp_path / "projects"
+ proj.mkdir()
+ _seed_jsonl_for_cell(proj, "/Users/x/myproj", "2026-04-25",
+ ["a real prompt"])
+ with patch("subprocess.run", side_effect=FileNotFoundError):
+ result = summarizer.summarize_cell(
+ date="2026-04-25", cwd="/Users/x/myproj", cost_usd=1.0,
+ db_path=db, projects_dirs=[proj],
+ )
+ assert result["error"] == "claude_not_installed"
+ assert result["activities"] is None
+ # Verify nothing was written
+ conn = sqlite3.connect(db)
+ rows = conn.execute("SELECT * FROM daily_summaries").fetchall()
+ conn.close()
+ assert rows == []
+
+
+def test_summarize_cell_skips_when_no_prompts(tmp_path):
+ import scanner
+ db = tmp_path / "u.db"
+ scanner.init_db(db).close()
+ proj = tmp_path / "projects"
+ proj.mkdir()
+ with patch("subprocess.run") as m:
+ result = summarizer.summarize_cell(
+ date="2026-04-25", cwd="/Users/x/empty", cost_usd=1.0,
+ db_path=db, projects_dirs=[proj],
+ )
+ assert result["error"] == "no_prompts"
+ assert result["activities"] is None
+ m.assert_not_called()
+```
+
+- [ ] **Step 2: Run test to verify it fails**
+
+Run: `python3 -m pytest tests/test_summarizer.py::test_summarize_cell_calls_claude_and_writes_cache -v`
+Expected: FAIL with `AttributeError`.
+
+- [ ] **Step 3: Implement `summarize_cell`**
+
+Append to `summarizer.py`:
+
+```python
+def summarize_cell(date, cwd, cost_usd, db_path, projects_dirs, model=None):
+ """
+ Orchestrate one (date, cwd) summary: collect prompts, check cache,
+ invoke claude if needed, persist result. Errors are returned, not raised.
+ """
+ text = collect_prompts(date, cwd, projects_dirs)
+ if not text:
+ return {"activities": None, "cached": False, "error": "no_prompts"}
+ h = prompt_hash(text)
+ conn = sqlite3.connect(db_path)
+ try:
+ row = conn.execute(
+ "SELECT prompt_hash, activities FROM daily_summaries "
+ "WHERE summary_date=? AND project_path=?",
+ (date, cwd),
+ ).fetchone()
+ if row is not None and row[0] == h:
+ return {
+ "activities": json.loads(row[1]),
+ "cached": True,
+ "error": None,
+ }
+ activities, err = run_claude(text, model=model)
+ if err is not None:
+ return {"activities": None, "cached": False, "error": err}
+ conn.execute("""
+ INSERT OR REPLACE INTO daily_summaries
+ (summary_date, project_path, prompt_hash,
+ activities, cost_usd, created_at)
+ VALUES (?, ?, ?, ?, ?, ?)
+ """, (date, cwd, h, json.dumps(activities), cost_usd, time.time()))
+ conn.commit()
+ return {"activities": activities, "cached": False, "error": None}
+ finally:
+ conn.close()
+```
+
+- [ ] **Step 4: Run tests**
+
+Run: `python3 -m pytest tests/test_summarizer.py -v`
+Expected: 21 prior + 5 new = 26 passed.
+
+- [ ] **Step 5: Run full suite**
+
+Run: `python3 -m pytest tests/ -q`
+Expected: 105 prior + 26 new (which includes the 2 from Task 1) = 130 passed. (Adjust this expectation if you've split tests differently — the count must equal previous total + new tests added.)
+
+- [ ] **Step 6: Commit**
+
+```bash
+git add summarizer.py tests/test_summarizer.py
+git commit -m "feat(summarizer): add summarize_cell orchestrator with cache"
+```
+
+---
+
+## Task 7: cli.py — eager pass after scan
+
+**Files:**
+- Modify: `cli.py` (`cmd_dashboard` function around line 390)
+- Test: `tests/test_cli.py`
+
+- [ ] **Step 1: Write the failing test**
+
+Add to `tests/test_cli.py`:
+
+```python
+def test_cmd_dashboard_runs_eager_summarizer_pass(tmp_path, monkeypatch, capsys):
+ """cmd_dashboard should call summarizer.run_eager_pass after the scan."""
+ import cli, summarizer
+ db = tmp_path / "u.db"
+ proj = tmp_path / "projects"
+ proj.mkdir()
+ monkeypatch.setattr(cli, "DB_PATH", db)
+
+ # Stub cmd_scan, serve, and webbrowser so we don't scan, start a server,
+ # or open a browser tab on the developer's machine
+ monkeypatch.setattr(cli, "cmd_scan", lambda **kw: None)
+ monkeypatch.setattr(
+ "dashboard.serve",
+ lambda host=None, port=None: None,
+ raising=False,
+ )
+ monkeypatch.setattr("webbrowser.open", lambda *a, **kw: None)
+
+ called = {"count": 0, "args": None}
+ def fake_eager(db_path, projects_dirs, progress_callback=None):
+ called["count"] += 1
+ called["args"] = (db_path, projects_dirs)
+ if progress_callback:
+ progress_callback(1, 1)
+ return {"summarized": 1, "skipped": 0, "errors": 0}
+ monkeypatch.setattr(summarizer, "run_eager_pass", fake_eager)
+
+ cli.cmd_dashboard(projects_dir=str(proj))
+ assert called["count"] == 1
+ assert called["args"][0] == db
+
+
+def test_cmd_dashboard_eager_pass_writes_progress_to_stderr(monkeypatch, capsys, tmp_path):
+ import cli, summarizer
+ db = tmp_path / "u.db"
+ proj = tmp_path / "projects"
+ proj.mkdir()
+ monkeypatch.setattr(cli, "DB_PATH", db)
+ monkeypatch.setattr(cli, "cmd_scan", lambda **kw: None)
+ monkeypatch.setattr(
+ "dashboard.serve",
+ lambda host=None, port=None: None,
+ raising=False,
+ )
+ monkeypatch.setattr("webbrowser.open", lambda *a, **kw: None)
+ def fake_eager(db_path, projects_dirs, progress_callback=None):
+ progress_callback(1, 3)
+ progress_callback(2, 3)
+ progress_callback(3, 3)
+ return {"summarized": 3, "skipped": 0, "errors": 0}
+ monkeypatch.setattr(summarizer, "run_eager_pass", fake_eager)
+
+ cli.cmd_dashboard(projects_dir=str(proj))
+ captured = capsys.readouterr()
+ assert "Summarizing" in captured.err
+```
+
+- [ ] **Step 2: Run test to verify it fails**
+
+Run: `python3 -m pytest tests/test_cli.py::test_cmd_dashboard_runs_eager_summarizer_pass -v`
+Expected: FAIL — `summarizer.run_eager_pass` doesn't exist yet.
+
+- [ ] **Step 3: Implement `run_eager_pass` in summarizer.py**
+
+Append to `summarizer.py`:
+
+```python
+def run_eager_pass(db_path, projects_dirs, progress_callback=None):
+ """
+ Summarize the eager set: top-20% (date, cwd) cells by cost, capped at
+ SUMMARY_MAX_CELLS. Returns a dict with summary counts.
+ """
+ cells = rank_cells_by_cost(db_path)
+ total = len(cells)
+ counts = {"summarized": 0, "skipped": 0, "errors": 0}
+ for i, (date, cwd, cost) in enumerate(cells, start=1):
+ result = summarize_cell(
+ date=date, cwd=cwd, cost_usd=cost,
+ db_path=db_path, projects_dirs=projects_dirs,
+ )
+ if result["error"]:
+ counts["errors"] += 1
+ elif result["cached"]:
+ counts["skipped"] += 1
+ else:
+ counts["summarized"] += 1
+ if progress_callback is not None:
+ progress_callback(i, total)
+ return counts
+```
+
+- [ ] **Step 4: Wire it into `cmd_dashboard`**
+
+In `cli.py`, replace the body of `cmd_dashboard` (around lines 390–410) with:
+
+```python
+def cmd_dashboard(projects_dir=None, host=None, port=None):
+ import webbrowser
+ import threading
+ import time as _time
+ import sys
+ import scanner, summarizer
+
+ print("Running scan first...")
+ cmd_scan(projects_dir=projects_dir)
+
+ print("\nGenerating activity summaries...")
+ is_tty = sys.stderr.isatty()
+ def progress(done, total):
+ if total == 0:
+ return
+ if is_tty:
+ pct = 100 * done // total
+ sys.stderr.write(f"\rSummarizing… {done} / {total} cells ({pct}%)")
+ sys.stderr.flush()
+ else:
+ if done == 1 or done == total or done % 5 == 0:
+ sys.stderr.write(f"Summarizing… {done} / {total} cells\n")
+ projects_dirs = (
+ [projects_dir] if projects_dir else scanner.DEFAULT_PROJECTS_DIRS
+ )
+ counts = summarizer.run_eager_pass(
+ db_path=DB_PATH,
+ projects_dirs=projects_dirs,
+ progress_callback=progress,
+ )
+ if is_tty:
+ sys.stderr.write("\n")
+ print(f" {counts['summarized']} summarized, "
+ f"{counts['skipped']} cached, {counts['errors']} errors")
+
+ print("\nStarting dashboard server...")
+ from dashboard import serve
+
+ host = host or os.environ.get("HOST", "localhost")
+ port = int(port or os.environ.get("PORT", "8080"))
+
+ def open_browser():
+ _time.sleep(1.0)
+ webbrowser.open(f"http://{host}:{port}")
+
+ t = threading.Thread(target=open_browser, daemon=True)
+ t.start()
+ serve(host=host, port=port)
+```
+
+- [ ] **Step 5: Run tests**
+
+Run: `python3 -m pytest tests/test_cli.py -v`
+Expected: prior tests + 2 new = all pass.
+
+- [ ] **Step 6: Run full suite**
+
+Run: `python3 -m pytest tests/ -q`
+Expected: all tests pass.
+
+- [ ] **Step 7: Commit**
+
+```bash
+git add cli.py summarizer.py tests/test_cli.py
+git commit -m "feat(cli): run eager summarizer pass after scan in cmd_dashboard"
+```
+
+---
+
+## Task 8: dashboard.py — `/api/daily-summaries` endpoint
+
+**Files:**
+- Modify: `dashboard.py` (`DashboardHandler.do_GET` around line 1410)
+- Test: `tests/test_dashboard.py`
+
+- [ ] **Step 1: Write the failing test**
+
+Add to `tests/test_dashboard.py`:
+
+```python
+def test_api_daily_summaries_returns_cached_cells(tmp_path, monkeypatch):
+ import dashboard, scanner, summarizer
+ db = tmp_path / "u.db"
+ scanner.init_db(db).close()
+ monkeypatch.setattr(dashboard, "DB_PATH", db)
+
+ # Seed two turns and two cached summaries for 2026-04-25
+ conn = sqlite3.connect(db)
+ conn.execute("""
+ INSERT INTO turns (session_id, timestamp, model, input_tokens, cwd)
+ VALUES ('s1', '2026-04-25T10:00:00Z', 'claude-haiku-4-5', 1000000, '/p/A')
+ """)
+ for cwd, acts, cost in [
+ ("/p/A", ["Did A1", "Did A2"], 1.5),
+ ("/p/B", ["Did B"], 0.5),
+ ]:
+ conn.execute("""
+ INSERT INTO daily_summaries
+ (summary_date, project_path, prompt_hash, activities, cost_usd, created_at)
+ VALUES (?, ?, ?, ?, ?, ?)
+ """, ("2026-04-25", cwd, "h", json.dumps(acts), cost, 0.0))
+ conn.commit()
+ conn.close()
+
+ # Mock summarize_cell so the lazy path doesn't actually call claude
+ monkeypatch.setattr(
+ summarizer, "summarize_cell",
+ lambda **kw: {"activities": None, "cached": False, "error": "stub"},
+ )
+
+ response = dashboard.get_daily_summaries("2026-04-25", db_path=db,
+ projects_dirs=[tmp_path])
+ assert response["date"] == "2026-04-25"
+ cells_by_proj = {c["project"]: c for c in response["cells"]}
+ assert cells_by_proj["/p/A"]["activities"] == ["Did A1", "Did A2"]
+ assert cells_by_proj["/p/A"]["error"] is None
+ assert cells_by_proj["/p/B"]["activities"] == ["Did B"]
+
+
+def test_api_daily_summaries_triggers_lazy_summarization(tmp_path, monkeypatch):
+ import dashboard, scanner, summarizer
+ db = tmp_path / "u.db"
+ scanner.init_db(db).close()
+ # One turn but no cached summary → triggers lazy path
+ conn = sqlite3.connect(db)
+ conn.execute("""
+ INSERT INTO turns (session_id, timestamp, model, input_tokens, cwd)
+ VALUES ('s1', '2026-04-25T10:00:00Z', 'claude-haiku-4-5', 1000000, '/p/A')
+ """)
+ conn.commit()
+ conn.close()
+
+ called = {"count": 0}
+ def fake_summarize(date, cwd, cost_usd, db_path, projects_dirs, model=None):
+ called["count"] += 1
+ return {"activities": ["lazy result"], "cached": False, "error": None}
+ monkeypatch.setattr(summarizer, "summarize_cell", fake_summarize)
+
+ response = dashboard.get_daily_summaries("2026-04-25", db_path=db,
+ projects_dirs=[tmp_path])
+ assert called["count"] == 1
+ cells_by_proj = {c["project"]: c for c in response["cells"]}
+ assert cells_by_proj["/p/A"]["activities"] == ["lazy result"]
+
+
+def test_api_daily_summaries_endpoint_serves_json(tmp_path, monkeypatch):
+ """Smoke test the actual HTTP route returns JSON."""
+ import dashboard, scanner, summarizer
+ from http.server import HTTPServer
+ import threading, urllib.request
+
+ db = tmp_path / "u.db"
+ scanner.init_db(db).close()
+ monkeypatch.setattr(dashboard, "DB_PATH", db)
+ monkeypatch.setattr(
+ summarizer, "summarize_cell",
+ lambda **kw: {"activities": None, "cached": False, "error": "stub"},
+ )
+
+ server = HTTPServer(("127.0.0.1", 0), dashboard.DashboardHandler)
+ port = server.server_address[1]
+ t = threading.Thread(target=server.serve_forever, daemon=True)
+ t.start()
+ try:
+ with urllib.request.urlopen(
+ f"http://127.0.0.1:{port}/api/daily-summaries?date=2026-04-25",
+ ) as r:
+ body = json.loads(r.read())
+ assert body["date"] == "2026-04-25"
+ assert body["cells"] == []
+ finally:
+ server.shutdown()
+```
+
+- [ ] **Step 2: Run test to verify it fails**
+
+Run: `python3 -m pytest tests/test_dashboard.py::test_api_daily_summaries_returns_cached_cells -v`
+Expected: FAIL — `dashboard.get_daily_summaries` doesn't exist yet.
+
+- [ ] **Step 3: Implement `get_daily_summaries`**
+
+In `dashboard.py`, near the existing `get_dashboard_data` function (around line 24), add:
+
+```python
+def get_daily_summaries(date, db_path=None, projects_dirs=None):
+ """
+ Return cached + lazily-summarized cells for a single date. Triggers
+ summarize_cell synchronously for any (date, cwd) with activity but no
+ cached summary. Relies on ThreadingHTTPServer so other requests aren't
+ blocked while a lazy summary runs.
+ """
+ import summarizer, scanner
+ if db_path is None:
+ db_path = DB_PATH
+ if projects_dirs is None:
+ projects_dirs = scanner.DEFAULT_PROJECTS_DIRS
+ if not _date_is_valid(date):
+ return {"date": date, "cells": [], "error": "invalid_date"}
+
+ conn = sqlite3.connect(db_path)
+ conn.row_factory = sqlite3.Row
+ try:
+ # All cells with activity that day
+ rows = conn.execute("""
+ SELECT cwd, model,
+ SUM(input_tokens) AS inp,
+ SUM(output_tokens) AS out,
+ SUM(cache_read_tokens) AS cr,
+ SUM(cache_creation_tokens) AS cw
+ FROM turns
+ WHERE substr(timestamp, 1, 10) = ?
+ AND cwd IS NOT NULL AND cwd != ''
+ GROUP BY cwd, model
+ """, (date,)).fetchall()
+ cell_costs = {}
+ from cli import calc_cost
+ for r in rows:
+ cost = calc_cost(r["model"], r["inp"] or 0, r["out"] or 0,
+ r["cr"] or 0, r["cw"] or 0)
+ cell_costs[r["cwd"]] = cell_costs.get(r["cwd"], 0.0) + cost
+
+ cached_rows = conn.execute("""
+ SELECT project_path, activities
+ FROM daily_summaries
+ WHERE summary_date = ?
+ """, (date,)).fetchall()
+ cached = {r["project_path"]: json.loads(r["activities"])
+ for r in cached_rows}
+
+ eager_set = {(d, c) for d, c, _ in summarizer.rank_cells_by_cost(db_path)}
+ finally:
+ conn.close()
+
+ cells = []
+ for cwd in sorted(cell_costs.keys()):
+ cost = cell_costs[cwd]
+ is_eager = (date, cwd) in eager_set
+ if cwd in cached:
+ cells.append({
+ "project": cwd, "cost": round(cost, 4),
+ "activities": cached[cwd], "error": None, "eager": is_eager,
+ })
+ else:
+ result = summarizer.summarize_cell(
+ date=date, cwd=cwd, cost_usd=cost,
+ db_path=db_path, projects_dirs=projects_dirs,
+ )
+ cells.append({
+ "project": cwd, "cost": round(cost, 4),
+ "activities": result["activities"],
+ "error": result["error"], "eager": is_eager,
+ })
+ return {"date": date, "cells": cells}
+
+
+def _date_is_valid(date):
+ if not isinstance(date, str) or len(date) != 10:
+ return False
+ try:
+ datetime.strptime(date, "%Y-%m-%d")
+ return True
+ except ValueError:
+ return False
+```
+
+Make sure `from datetime import datetime` is already imported at the top of `dashboard.py` (it is — but verify).
+
+- [ ] **Step 4: Add the route to `do_GET`**
+
+In `dashboard.py`, inside `DashboardHandler.do_GET` (around line 1410), add a new `elif` branch after the `/api/data` branch:
+
+```python
+ elif path == "/api/daily-summaries":
+ from urllib.parse import urlparse, parse_qs
+ qs = parse_qs(urlparse(self.path).query)
+ date = qs.get("date", [""])[0]
+ data = get_daily_summaries(date)
+ body = json.dumps(data).encode("utf-8")
+ self.send_response(200)
+ self.send_header("Content-Type", "application/json")
+ self.send_header("Content-Length", str(len(body)))
+ self.end_headers()
+ self.wfile.write(body)
+```
+
+- [ ] **Step 5: Run tests**
+
+Run: `python3 -m pytest tests/test_dashboard.py -v`
+Expected: prior tests + 3 new = all pass.
+
+- [ ] **Step 6: Run full suite**
+
+Run: `python3 -m pytest tests/ -q`
+Expected: all tests pass.
+
+- [ ] **Step 7: Commit**
+
+```bash
+git add dashboard.py tests/test_dashboard.py
+git commit -m "feat(dashboard): add /api/daily-summaries endpoint with lazy fetch"
+```
+
+---
+
+## Task 9: dashboard.py — UI section + JS
+
+**Files:**
+- Modify: `dashboard.py` (HTML_TEMPLATE)
+
+This task has no Python test (no JS test harness in this repo, matches existing convention). It ends with manual testing.
+
+- [ ] **Step 1: Add CSS for the new section**
+
+In `dashboard.py`, find the ``:
+
+```css
+#daily-activities { margin-top: 32px; }
+#daily-activities h2 { margin-bottom: 12px; }
+#daily-activities .day-row { border: 1px solid #e0e0e0; border-radius: 4px; margin-bottom: 8px; padding: 0; background: #fff; }
+#daily-activities .day-row summary { padding: 10px 14px; cursor: pointer; font-weight: 500; display: flex; gap: 12px; align-items: center; }
+#daily-activities .day-row summary::-webkit-details-marker { display: none; }
+#daily-activities .day-row summary::before { content: "▶"; font-size: 0.7em; color: #888; transition: transform 0.15s; }
+#daily-activities .day-row[open] summary::before { transform: rotate(90deg); }
+#daily-activities .day-meta { color: #888; font-weight: normal; font-size: 0.9em; }
+#daily-activities .day-cost { margin-left: auto; font-variant-numeric: tabular-nums; }
+#daily-activities .project-block { padding: 8px 14px 8px 32px; border-top: 1px solid #f0f0f0; }
+#daily-activities .project-name { font-weight: 500; display: flex; align-items: center; gap: 6px; }
+#daily-activities .project-cost { color: #888; font-variant-numeric: tabular-nums; margin-left: auto; }
+#daily-activities .star { color: #f5a623; }
+#daily-activities ul.activities { margin: 6px 0 0 0; padding-left: 20px; }
+#daily-activities ul.activities li { margin: 2px 0; }
+#daily-activities .spinner { color: #888; font-style: italic; padding: 4px 0; }
+#daily-activities .err { color: #c0392b; padding: 4px 0; }
+#daily-activities .err button { margin-left: 8px; font-size: 0.85em; }
+#daily-activities .banner { padding: 10px 14px; background: #fff3cd; border: 1px solid #ffe599; border-radius: 4px; margin-bottom: 12px; }
+```
+
+- [ ] **Step 2: Add the HTML section**
+
+In `dashboard.py`, find the line containing `