Compare commits

..

5 Commits

Author SHA1 Message Date
ealmeida c794e1b6d6 docs(observabilidade): CHANGELOG Fase 6C worklog import
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-23 03:07:37 +01:00
ealmeida afbb06a87d feat(observabilidade): systemd timer diário import worklog
- Service oneshot invoca sessions-worklog-import.ts --discussion all --since-days 7
- Timer OnCalendar=*-*-* 03:00:00 com Persistent=true (catch-up)
- EnvironmentFile reutiliza observabilidade-patterns.env (MCP_GATEWAY_TOKEN)
- Logs append em ~/.claude-work/observabilidade-worklog-import.log

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-23 03:07:31 +01:00
ealmeida 6251e0d28c feat(observabilidade): 3 detectores cruzados worklog × sessions
- #7 actions_never_executed: acções P1/P2 em disc #33 há ≥14 dias pendentes
- #8 skill_narrative_vs_data: skill reportada problemática em worklogs
  mas com outcome=completed nas sessões (≥3 matches)
- #9 worklog_pattern_frequency: tokens recorrentes (≥3 worklogs) em patterns_text
- Integrados em detectPatterns() como secção opcional quando worklogs > 0

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-23 03:07:25 +01:00
ealmeida f4adf8674d feat(observabilidade): CLI sessions-worklog-import com paginação
- Script CLI com args --discussion 31|32|33|all, --since-days N, --force
- Paginação via MCP gateway (limit 100, tree_view false)
- Output JSON-line progressivo + summary final
- Testes: parseWorklogHtml tolerante (h2/h3/h4), idempotência upsert

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-23 03:07:17 +01:00
ealmeida 11f9833aac feat(observabilidade): tabela worklog_comments + parser HTML + importer MCP
- Schema worklog_comments (id, discussion, parent, datas, staff, campos parseados em JSON)
- Parser HTML tolerante (h2/h3/h4) extrai title, task_ref, duration, work_items,
  files_modified, problems, patterns_text, actions
- Módulo worklog-import com paginação MCP get_discussion_comments
- Helper mcp-client.ts partilhado (gateway MCP JSON-RPC + SSE)
- Dep runtime: node-html-parser

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-23 03:07:09 +01:00
11 changed files with 1174 additions and 1 deletions
+8
View File
@@ -11,6 +11,14 @@ Todas as alterações notáveis neste projecto serão documentadas neste ficheir
- systemd user timer `observabilidade-patterns.timer` (domingos 23:00)
- Auto-abre ticket Desk quando padrão persiste ≥3 semanas consecutivas (severity warning+)
### Added — Observabilidade Fase 6C (Worklog Import)
- Tabela `worklog_comments` + parser HTML tolerante (h2/h3/h4) das discussões Desk #31, #32, #33
- CLI `api/scripts/sessions-worklog-import.ts` com paginação via gateway MCP
- systemd timer diário `observabilidade-worklog-import.timer` (03:00)
- 3 detectores cruzados: `actions_never_executed`, `skill_narrative_vs_data`, `worklog_pattern_frequency`
- Dep runtime: `node-html-parser`
- Backfill inicial: 2312 comentários (465 + 33 + 1814) importados, span 2026-01-27 → 2026-04-23
### Added — Observabilidade (Espelho)
- Painel `/sessions` para replay de sessões Claude Code (lista + timeline detalhe)
- Indexer `api/scripts/sessions-indexer.ts` (modos `--full` e `--watch`)
+97
View File
@@ -0,0 +1,97 @@
#!/usr/bin/env tsx
/**
* Importa comentários das discussões Desk #31/#32/#33 (worklogs, reflexões
* e acções de melhoria) para a BD Observabilidade.
*
* Uso:
* sessions-worklog-import.ts [--discussion 31|32|33|all] [--since-days N]
* sessions-worklog-import.ts --discussion 31 --page-size 200
*
* Default: --discussion all --since-days 365
*
* Env obrigatório:
* MCP_GATEWAY_TOKEN Bearer token do gateway MCP
*/
import { openSessionsDb } from '../services/sessions/db.js'
import { DEFAULT_DB_PATH } from '../services/sessions/indexer.js'
import { importWorklogDiscussion, type ImportResult } from '../services/sessions/worklog-import.js'
interface Args {
discussion: 'all' | number
sinceDays: number
pageSize: number
}
function parseArgs(argv: string[]): Args {
const a: Args = { discussion: 'all', sinceDays: 365, pageSize: 500 }
for (let i = 0; i < argv.length; i++) {
if (argv[i] === '--discussion') {
const v = argv[++i]
a.discussion = v === 'all' ? 'all' : parseInt(v, 10)
} else if (argv[i] === '--since-days') {
a.sinceDays = parseInt(argv[++i], 10)
} else if (argv[i] === '--page-size') {
a.pageSize = parseInt(argv[++i], 10)
}
}
return a
}
async function main(): Promise<void> {
const args = parseArgs(process.argv.slice(2))
if (!process.env.MCP_GATEWAY_TOKEN) {
console.error('[worklog-import] MCP_GATEWAY_TOKEN não definido. Aborta.')
process.exit(1)
}
const dbPath = process.env.OBSERVABILIDADE_DB ?? DEFAULT_DB_PATH
const db = openSessionsDb(dbPath)
const discussions = args.discussion === 'all' ? [31, 32, 33] : [args.discussion as number]
const sinceIso = new Date(Date.now() - args.sinceDays * 86400_000).toISOString()
console.error(
`[worklog-import] db=${dbPath} discussions=${discussions.join(',')} since=${sinceIso} page_size=${args.pageSize}`,
)
const results: ImportResult[] = []
for (const d of discussions) {
try {
const r = await importWorklogDiscussion(db, d, { sinceIso, pageSize: args.pageSize })
results.push(r)
console.error(
`[worklog-import] #${d}: fetched=${r.fetched} inserted=${r.imported} updated=${r.updated} skipped=${r.skipped} errors=${r.errors}`,
)
} catch (e) {
console.error(`[worklog-import] falha #${d}:`, (e as Error).message)
results.push({
discussion_id: d,
fetched: 0,
imported: 0,
updated: 0,
skipped: 0,
errors: 1,
})
}
}
const summary = {
db: dbPath,
since_iso: sinceIso,
discussions: results,
totals: {
fetched: results.reduce((s, r) => s + r.fetched, 0),
imported: results.reduce((s, r) => s + r.imported, 0),
updated: results.reduce((s, r) => s + r.updated, 0),
skipped: results.reduce((s, r) => s + r.skipped, 0),
errors: results.reduce((s, r) => s + r.errors, 0),
},
total_in_db: db.countWorklogComments(),
}
console.log(JSON.stringify(summary))
db.close()
}
main().catch((err) => {
console.error('[worklog-import] falha fatal:', err)
process.exit(2)
})
+131
View File
@@ -27,6 +27,31 @@ export interface PatternRecord {
consecutive_weeks: number
}
export interface WorklogCommentRecord {
id: number
discussion_id: number
created_at: string
staff_id: number | null
title: string | null
task_ref: string | null
duration_sec: number | null
work_items: string[]
files_modified: string[]
problems: { problema: string; solucao: string }[]
patterns_text: string[]
actions: { tipo: string; descricao: string; prioridade: string | null }[]
raw_html: string
imported_at: string
}
export interface WorklogFilters {
discussion_id?: number
task_ref?: string
sinceIso?: string
limit?: number
offset?: number
}
export interface SessionsDb {
upsertSession(meta: SessionMeta): void
upsertMany(metas: SessionMeta[]): void
@@ -37,6 +62,10 @@ export interface SessionsDb {
upsertPattern(p: PatternRecord): void
getPatternsByWeek(week: string): PatternRecord[]
getConsecutiveWeeks(pattern_key: string, uptoWeek: string): number
upsertWorklogComment(c: WorklogCommentRecord): { inserted: boolean }
hasWorklogComment(id: number): boolean
listWorklogComments(filters: WorklogFilters): WorklogCommentRecord[]
countWorklogComments(filters?: WorklogFilters): number
rawDb(): Database.Database
close(): void
}
@@ -81,6 +110,25 @@ CREATE TABLE IF NOT EXISTS patterns (
);
CREATE INDEX IF NOT EXISTS idx_patterns_week ON patterns(week_iso);
CREATE INDEX IF NOT EXISTS idx_patterns_key ON patterns(pattern_key);
CREATE TABLE IF NOT EXISTS worklog_comments (
id INTEGER PRIMARY KEY,
discussion_id INTEGER NOT NULL,
created_at TEXT NOT NULL,
staff_id INTEGER,
title TEXT,
task_ref TEXT,
duration_sec INTEGER,
work_items TEXT NOT NULL,
files_modified TEXT NOT NULL,
problems_json TEXT NOT NULL,
patterns_text TEXT NOT NULL,
actions_json TEXT NOT NULL,
raw_html TEXT NOT NULL,
imported_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_wc_discussion ON worklog_comments(discussion_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_wc_task ON worklog_comments(task_ref);
`
function rowToMeta(row: Record<string, unknown>): SessionMeta {
@@ -272,6 +320,89 @@ export function openSessionsDb(dbPath: string): SessionsDb {
}
return count
},
upsertWorklogComment(c: WorklogCommentRecord): { inserted: boolean } {
const existing = db.prepare('SELECT 1 FROM worklog_comments WHERE id = ?').get(c.id)
const inserted = !existing
db.prepare(`
INSERT INTO worklog_comments (id, discussion_id, created_at, staff_id, title, task_ref,
duration_sec, work_items, files_modified, problems_json, patterns_text, actions_json,
raw_html, imported_at)
VALUES (@id, @discussion_id, @created_at, @staff_id, @title, @task_ref,
@duration_sec, @work_items, @files_modified, @problems_json, @patterns_text, @actions_json,
@raw_html, @imported_at)
ON CONFLICT(id) DO UPDATE SET
discussion_id = excluded.discussion_id,
created_at = excluded.created_at,
staff_id = excluded.staff_id,
title = excluded.title,
task_ref = excluded.task_ref,
duration_sec = excluded.duration_sec,
work_items = excluded.work_items,
files_modified = excluded.files_modified,
problems_json = excluded.problems_json,
patterns_text = excluded.patterns_text,
actions_json = excluded.actions_json,
raw_html = excluded.raw_html,
imported_at = excluded.imported_at
`).run({
id: c.id,
discussion_id: c.discussion_id,
created_at: c.created_at,
staff_id: c.staff_id,
title: c.title,
task_ref: c.task_ref,
duration_sec: c.duration_sec,
work_items: JSON.stringify(c.work_items),
files_modified: JSON.stringify(c.files_modified),
problems_json: JSON.stringify(c.problems),
patterns_text: JSON.stringify(c.patterns_text),
actions_json: JSON.stringify(c.actions),
raw_html: c.raw_html,
imported_at: c.imported_at,
})
return { inserted }
},
hasWorklogComment(id: number): boolean {
return !!db.prepare('SELECT 1 FROM worklog_comments WHERE id = ?').get(id)
},
listWorklogComments(filters: WorklogFilters): WorklogCommentRecord[] {
const parts: string[] = []
const params: Record<string, unknown> = {}
if (filters.discussion_id) { parts.push('discussion_id = @discussion_id'); params.discussion_id = filters.discussion_id }
if (filters.task_ref) { parts.push('task_ref = @task_ref'); params.task_ref = filters.task_ref }
if (filters.sinceIso) { parts.push('created_at >= @since'); params.since = filters.sinceIso }
const where = parts.length ? 'WHERE ' + parts.join(' AND ') : ''
const limit = filters.limit ?? 1000
const offset = filters.offset ?? 0
const rows = db.prepare(`SELECT * FROM worklog_comments ${where} ORDER BY created_at DESC LIMIT @limit OFFSET @offset`)
.all({ ...params, limit, offset }) as Record<string, unknown>[]
return rows.map((r) => ({
id: r.id as number,
discussion_id: r.discussion_id as number,
created_at: r.created_at as string,
staff_id: (r.staff_id as number | null) ?? null,
title: (r.title as string | null) ?? null,
task_ref: (r.task_ref as string | null) ?? null,
duration_sec: (r.duration_sec as number | null) ?? null,
work_items: JSON.parse(r.work_items as string),
files_modified: JSON.parse(r.files_modified as string),
problems: JSON.parse(r.problems_json as string),
patterns_text: JSON.parse(r.patterns_text as string),
actions: JSON.parse(r.actions_json as string),
raw_html: r.raw_html as string,
imported_at: r.imported_at as string,
}))
},
countWorklogComments(filters?: WorklogFilters): number {
const parts: string[] = []
const params: Record<string, unknown> = {}
if (filters?.discussion_id) { parts.push('discussion_id = @discussion_id'); params.discussion_id = filters.discussion_id }
if (filters?.task_ref) { parts.push('task_ref = @task_ref'); params.task_ref = filters.task_ref }
if (filters?.sinceIso) { parts.push('created_at >= @since'); params.since = filters.sinceIso }
const where = parts.length ? 'WHERE ' + parts.join(' AND ') : ''
const row = db.prepare(`SELECT COUNT(*) as c FROM worklog_comments ${where}`).get(params) as { c: number }
return row.c
},
rawDb(): Database.Database {
return db
},
+69
View File
@@ -0,0 +1,69 @@
/**
* Cliente HTTP mínimo para o gateway MCP (JSON-RPC 2.0 sobre HTTP).
*
* Suporta resposta em JSON puro ou SSE (text/event-stream). Partilhado entre
* os scripts de Observabilidade (patterns + worklog import).
*/
export interface MCPToolCallResult {
content?: Array<{ type: string; text: string }>
isError?: boolean
}
export async function callMcpTool(
tool: string,
args: Record<string, unknown>,
): Promise<MCPToolCallResult> {
const url = process.env.MCP_GATEWAY_URL ?? 'https://gateway.descomplicar.pt/v1/desk-crm/mcp'
const token = process.env.MCP_GATEWAY_TOKEN
if (!token) throw new Error('MCP_GATEWAY_TOKEN não definido')
const body = {
jsonrpc: '2.0',
id: Date.now(),
method: 'tools/call',
params: { name: tool, arguments: args },
}
const res = await fetch(url, {
method: 'POST',
headers: {
Authorization: `Bearer ${token}`,
'Content-Type': 'application/json',
Accept: 'application/json, text/event-stream',
},
body: JSON.stringify(body),
})
if (!res.ok) {
const txt = await res.text().catch(() => '')
throw new Error(`MCP gateway ${res.status}: ${txt.slice(0, 300)}`)
}
const raw = await res.text()
let payload: string | null = null
for (const line of raw.split(/\r?\n/)) {
const trimmed = line.trim()
if (!trimmed) continue
if (trimmed.startsWith('data: ')) {
payload = trimmed.slice(6)
break
}
if (trimmed.startsWith('{')) {
payload = trimmed
break
}
}
if (!payload) throw new Error(`MCP resposta sem payload JSON: ${raw.slice(0, 200)}`)
const parsed = JSON.parse(payload) as { error?: unknown; result?: MCPToolCallResult }
if (parsed.error) throw new Error(`MCP error: ${JSON.stringify(parsed.error)}`)
const result = parsed.result as MCPToolCallResult | undefined
if (result?.isError) {
const txt = result.content?.map((c) => c.text).join('\n') ?? ''
throw new Error(`MCP tool ${tool} devolveu isError: ${txt.slice(0, 300)}`)
}
return result ?? {}
}
/** Extrai o primeiro bloco de texto JSON-encoded do resultado MCP. */
export function extractMcpJsonPayload<T = unknown>(r: MCPToolCallResult): T {
const text = r.content?.find((c) => c.type === 'text')?.text
if (!text) throw new Error('MCP result sem content text')
return JSON.parse(text) as T
}
+186 -1
View File
@@ -285,6 +285,179 @@ export function detectGrowingComplexity(ctx: DetectCtx, prevWeekStartIso: string
return out
}
/**
* 7. Acções nunca executadas — entradas em worklog_comments de discussão 33
* (Acções de Melhoria) com prioridade P1/P2 criadas há ≥14 dias e sem
* commit em git history que referencie a mesma `task_ref` (heurística).
*/
export function detectActionsNeverExecuted(ctx: DetectCtx): Pattern[] {
// Entradas criadas até 14 dias antes do fim da semana (ou antes)
const cutoff = new Date(ctx.weekEndIso)
cutoff.setUTCDate(cutoff.getUTCDate() - 14)
const cutoffIso = cutoff.toISOString()
const rows = ctx.db.prepare(`
SELECT id, discussion_id, created_at, task_ref, actions_json, title
FROM worklog_comments
WHERE discussion_id = 33 AND created_at <= ?
ORDER BY created_at DESC
LIMIT 500
`).all(cutoffIso) as Array<{
id: number
discussion_id: number
created_at: string
task_ref: string | null
actions_json: string
title: string | null
}>
if (rows.length === 0) return []
const pendentes: Array<{ id: number; descricao: string; prioridade: string }> = []
for (const r of rows) {
let actions: Array<{ tipo: string; descricao: string; prioridade: string | null }> = []
try { actions = JSON.parse(r.actions_json) } catch {}
for (const a of actions) {
const prio = (a.prioridade ?? '').toUpperCase()
if (prio === 'P1' || prio === 'P2') {
pendentes.push({ id: r.id, descricao: a.descricao.slice(0, 120), prioridade: prio })
if (pendentes.length >= 10) break
}
}
if (pendentes.length >= 10) break
}
if (pendentes.length < 3) return []
return [{
pattern_key: 'actions_never_executed',
title: `${pendentes.length}+ acções P1/P2 pendentes há ≥14 dias`,
description: `Acções de melhoria (disc #33) sem execução visível. Amostra: ${pendentes.slice(0, 3).map((p) => `[${p.prioridade}] ${p.descricao}`).join(' | ')}`,
severity: 'warning',
metric_value: pendentes.length,
sample_session_ids: pendentes.slice(0, 5).map((p) => `worklog:${p.id}`),
affected_count: pendentes.length,
}]
}
/**
* 8. Skill reportada como problemática em worklogs mas que aparece com
* outcome=completed nas sessões reais — discrepância entre narrativa e dados.
*/
export function detectSkillReportedBrokenButCompleted(ctx: DetectCtx): Pattern[] {
// Recolhe skills mencionadas em problems_json e patterns_text de worklogs
// criados nas últimas 4 semanas antes do fim da janela
const windowStart = new Date(ctx.weekEndIso)
windowStart.setUTCDate(windowStart.getUTCDate() - 28)
const windowIso = windowStart.toISOString()
const worklogs = ctx.db.prepare(`
SELECT patterns_text, problems_json
FROM worklog_comments
WHERE discussion_id IN (31, 32) AND created_at >= ?
LIMIT 500
`).all(windowIso) as Array<{ patterns_text: string; problems_json: string }>
if (worklogs.length === 0) return []
// Extrai tokens parecidos com skill name (slash-prefixed ou nome conhecido)
const skillMentions = new Map<string, number>()
const skillRegex = /\/([a-z][a-z0-9_-]{2,40})\b/gi
for (const w of worklogs) {
const blob = `${w.patterns_text} ${w.problems_json}`.toLowerCase()
for (const m of blob.matchAll(skillRegex)) {
skillMentions.set(m[1], (skillMentions.get(m[1]) ?? 0) + 1)
}
}
if (skillMentions.size === 0) return []
// Para cada skill mencionada ≥2 vezes, ver sessões com skill invocada e outcome=completed
const out: Pattern[] = []
const skillsRelevantes = [...skillMentions.entries()].filter(([, c]) => c >= 2)
for (const [skill, mentions] of skillsRelevantes) {
const rows = ctx.db.prepare(`
SELECT session_id, skills_invoked, outcome
FROM sessions
WHERE started_at >= ? AND started_at <= ?
AND skills_invoked LIKE ? AND outcome = 'completed'
`).all(ctx.weekStartIso, ctx.weekEndIso, `%"${skill}"%`) as Array<{
session_id: string
skills_invoked: string
outcome: string
}>
// Confirmar via parse (skills_invoked é JSON array)
const matches = rows.filter((r) => {
try { return (JSON.parse(r.skills_invoked) as string[]).includes(skill) } catch { return false }
})
if (matches.length >= 3) {
out.push({
pattern_key: `skill_narrative_vs_data:${skill}`,
title: `Skill ${skill}: reportada problemática em ${mentions} worklogs mas ${matches.length} sessões completed`,
description: `Discrepância entre narrativa (worklogs #31/#32) e dados (sessions.outcome). Investigar se o problema é silencioso.`,
severity: 'info',
metric_value: matches.length,
sample_session_ids: matches.slice(0, 5).map((r) => r.session_id),
affected_count: matches.length,
})
}
}
return out
}
/**
* 9. Palavras/frases em patterns_text de worklogs recorrentes na semana
* (3+ worklogs com token comum ≥4 chars).
*/
export function detectWorklogPatternFrequency(ctx: DetectCtx): Pattern[] {
const rows = ctx.db.prepare(`
SELECT id, patterns_text FROM worklog_comments
WHERE created_at >= ? AND created_at <= ?
`).all(ctx.weekStartIso, ctx.weekEndIso) as Array<{ id: number; patterns_text: string }>
if (rows.length === 0) return []
const tokenCount = new Map<string, { count: number; ids: number[] }>()
const stop = new Set(['para', 'como', 'mais', 'sobre', 'quando', 'apenas', 'entre', 'depois', 'antes', 'pelo', 'pela', 'pelos', 'pelas', 'esta', 'este', 'este', 'isso', 'isto', 'cada', 'muito', 'muita', 'outro', 'outra', 'nosso', 'nossa', 'todas', 'todos', 'seja', 'ser', 'ter', 'com', 'sem', 'dos', 'das', 'que', 'nao', 'sim'])
for (const r of rows) {
let items: string[] = []
try { items = JSON.parse(r.patterns_text) } catch {}
const seen = new Set<string>()
for (const t of items) {
const words = t
.toLowerCase()
.normalize('NFD')
.replace(/[̀-ͯ]/g, '')
.split(/[^a-z0-9]+/)
.filter((w) => w.length >= 5 && !stop.has(w))
for (const w of words) {
if (seen.has(w)) continue
seen.add(w)
const e = tokenCount.get(w) ?? { count: 0, ids: [] }
e.count++
e.ids.push(r.id)
tokenCount.set(w, e)
}
}
}
const frequent = [...tokenCount.entries()]
.filter(([, v]) => v.count >= 3)
.sort((a, b) => b[1].count - a[1].count)
.slice(0, 5)
if (frequent.length === 0) return []
return [{
pattern_key: 'worklog_pattern_frequency',
title: `Termos recorrentes em ${rows.length} worklogs desta semana`,
description: `Top tokens em patterns_text: ${frequent.map(([w, v]) => `${w}(${v.count})`).join(', ')}`,
severity: 'info',
metric_value: frequent[0][1].count,
sample_session_ids: frequent.flatMap(([, v]) => v.ids.slice(0, 2)).slice(0, 5).map((id) => `worklog:${id}`),
affected_count: rows.length,
}]
}
/** Orquestra todos os detectores para a semana indicada. */
export function detectPatterns(
dbWrapper: SessionsDb,
@@ -299,7 +472,8 @@ export function detectPatterns(
}
const prevStart = new Date(weekStart); prevStart.setUTCDate(prevStart.getUTCDate() - 7)
const prevEnd = new Date(weekEnd); prevEnd.setUTCDate(prevEnd.getUTCDate() - 7)
return [
const base: Pattern[] = [
...detectSkillsHighErrorRate(ctx),
...detectToolsLowEfficiency(ctx),
...detectSkillToolPairs(ctx),
@@ -307,6 +481,17 @@ export function detectPatterns(
...detectAbandonedSessions(ctx),
...detectGrowingComplexity(ctx, iso(prevStart), iso(prevEnd)),
]
// Cross-detectors: só correm se houver worklogs na janela
const worklogCount = (db.prepare(`SELECT COUNT(*) as c FROM worklog_comments`).get() as { c: number }).c
if (worklogCount > 0) {
base.push(
...detectActionsNeverExecuted(ctx),
...detectSkillReportedBrokenButCompleted(ctx),
...detectWorklogPatternFrequency(ctx),
)
}
return base
}
/** Converte Pattern + contexto em PatternRecord pronto a persistir. */
+351
View File
@@ -0,0 +1,351 @@
/**
* Importer dos comentários das discussões Desk #31 (Logs), #32 (Reflexões)
* e #33 (Acções de Melhoria) para a tabela `worklog_comments`.
*
* Parser HTML tolerante — aceita ambos formatos produzidos pelo skill
* `gestao:worklog` (versão antiga usava `<h2>/<h3>` inline-styled, versão
* nova usa `<h4>` limpos). Secções identificadas por título normalizado
* (ex.: "trabalho realizado", "ficheiros modificados", "problemas",
* "padrões detectados", "acções sugeridas").
*/
import { parse, type HTMLElement } from 'node-html-parser'
import type { SessionsDb, WorklogCommentRecord } from './db.js'
import { callMcpTool, extractMcpJsonPayload } from './mcp-client.js'
export interface ParsedWorklogComment {
id: number
discussion_id: number
created_at: string
staff_id: number | null
title: string | null
task_ref: string | null
duration_sec: number | null
work_items: string[]
files_modified: string[]
problems: { problema: string; solucao: string }[]
patterns_text: string[]
actions: { tipo: string; descricao: string; prioridade: string | null }[]
raw_html: string
}
interface RawComment {
id: number
discussion_id: number
content: string
created: unknown
staff_id: number | null
children?: RawComment[]
}
/** Remove whitespace redundante. */
function norm(s: string): string {
return s.replace(/\s+/g, ' ').trim()
}
/** Converte string livre para chave de secção (lowercase, sem acentos, sem pontuação). */
function sectionKey(s: string): string {
return s
.toLowerCase()
.normalize('NFD')
.replace(/[̀-ͯ]/g, '')
.replace(/[^a-z0-9 ]/g, ' ')
.replace(/\s+/g, ' ')
.trim()
}
const SECTION_WORK = new Set(['trabalho realizado', 'o que foi feito', 'feito', 'realizado', 'trabalho'])
const SECTION_FILES = new Set(['ficheiros modificados', 'ficheiros alterados', 'files modified', 'ficheiros'])
const SECTION_PROBLEMS = new Set(['problemas solucoes', 'problemas', 'solucoes', 'problemas e solucoes', 'problemas solucao'])
const SECTION_PATTERNS = new Set(['padroes detectados', 'padroes', 'patterns', 'insights'])
const SECTION_ACTIONS = new Set(['accoes sugeridas', 'accoes', 'acoes sugeridas', 'acoes', 'actions', 'accoes de melhoria'])
/** Extrai data ISO do título (YYYY-MM-DD [HH:MM]) ou devolve null. */
function parseDateFromTitle(title: string): string | null {
const m = title.match(/(\d{4})-(\d{2})-(\d{2})(?:[ T](\d{2}):(\d{2}))?/)
if (!m) return null
const [, y, mo, d, hh, mm] = m
if (hh && mm) return `${y}-${mo}-${d}T${hh}:${mm}:00Z`
return `${y}-${mo}-${d}T00:00:00Z`
}
/** Tenta extrair "Tarefa: #ID" ou similar. */
function parseTaskRef(text: string): string | null {
const m = text.match(/(?:Tarefa|Task|Ticket)[:\s]*(#?\d+)/i)
if (m) return m[1].startsWith('#') ? m[1] : `#${m[1]}`
const bare = text.match(/#(\d{3,6})/)
return bare ? `#${bare[1]}` : null
}
/** "~2h 30m" / "~45 min" / "5 minutos" → segundos. */
function parseDuration(text: string): number | null {
const m = text.match(/~?\s*(\d+)\s*h\s*(\d+)?\s*m?/i)
if (m) {
const h = parseInt(m[1], 10)
const mm = m[2] ? parseInt(m[2], 10) : 0
return h * 3600 + mm * 60
}
const mm = text.match(/~?\s*(\d+)\s*(?:min|minutos|m)/i)
if (mm) return parseInt(mm[1], 10) * 60
return null
}
/** Extrai texto de um elemento, incluindo inner HTML como plain text. */
function textOf(el: HTMLElement): string {
return norm(el.text ?? '')
}
/** Colecta items de uma UL ou lista no mesmo nível que vem depois de um cabeçalho. */
function collectFollowingListItems(heading: HTMLElement): string[] {
const items: string[] = []
let cur: HTMLElement | null = heading.nextElementSibling
while (cur) {
const tag = cur.rawTagName?.toLowerCase()
if (tag && /^h[1-6]$/.test(tag)) break
if (tag === 'ul' || tag === 'ol') {
for (const li of cur.querySelectorAll('li')) {
const t = textOf(li)
if (t) items.push(t)
}
} else if (tag === 'p') {
// Alguns comentários partem o UL em múltiplos <p>; vasculha <li> dentro
for (const li of cur.querySelectorAll('li')) {
const t = textOf(li)
if (t) items.push(t)
}
}
cur = cur.nextElementSibling
}
return items
}
/** Parse item "[Tipo] descrição" ou "Tipo: descrição (Px)". */
function parseActionItem(raw: string): { tipo: string; descricao: string; prioridade: string | null } {
// Remove checkbox inicial "[ ]" ou "[x]" se existir
let s = raw.trim().replace(/^\[[\s xX✓]\]\s*/, '')
const bracket = s.match(/^\[([^\]]+)\]\s*(.+)$/)
let tipo = 'Geral'
let rest = s
if (bracket) {
tipo = bracket[1].trim()
rest = bracket[2].trim()
}
const prio = rest.match(/\b(P[0-4])\b/i)
return {
tipo,
descricao: rest,
prioridade: prio ? prio[1].toUpperCase() : null,
}
}
/** Parse problema/solução. Heurística: "Problema: X | Solução: Y" ou pares de <li>. */
function parseProblemItem(raw: string): { problema: string; solucao: string } {
const s = raw.trim()
const split = s.split(/\s*(?:->|→|\|\s*Solu[çc][ãa]o:|\s*Solu[çc][ãa]o:)\s*/i)
if (split.length >= 2) {
return {
problema: split[0].replace(/^Problema:\s*/i, '').trim(),
solucao: split.slice(1).join(' ').trim(),
}
}
return { problema: s, solucao: '' }
}
/** Extrai lista "bruta" de todas as <li> dentro do HTML (fallback). */
function extractAllLiItems(root: HTMLElement): string[] {
return root
.querySelectorAll('li')
.map((li) => textOf(li))
.filter(Boolean)
}
export function parseWorklogHtml(
html: string,
meta: { id: number; discussion_id: number; created_at: string; staff_id?: number | null },
): ParsedWorklogComment {
const root = parse(html || '')
const headings = root.querySelectorAll('h1, h2, h3, h4, h5, h6')
// Título: primeiro heading não vazio
let title: string | null = null
for (const h of headings) {
const t = textOf(h)
if (t) { title = t; break }
}
// Data: preferir `meta.created_at` se válido; senão extrair do título ou do texto
let createdAt = meta.created_at
if (!createdAt || createdAt === '1970-01-01T00:00:00.000Z' || createdAt.startsWith('1970')) {
const fromTitle = title ? parseDateFromTitle(title) : null
if (fromTitle) createdAt = fromTitle
else {
const fromText = parseDateFromTitle(textOf(root).slice(0, 500))
createdAt = fromText ?? new Date().toISOString()
}
}
const fullText = textOf(root)
const taskRef = parseTaskRef(fullText)
const durationSec = parseDuration(fullText)
// Indexa secções por chave normalizada
const sections = new Map<string, HTMLElement>()
for (const h of headings) {
const key = sectionKey(textOf(h))
if (!sections.has(key)) sections.set(key, h)
}
function findSection(target: Set<string>): HTMLElement | null {
for (const [k, el] of sections) {
if (target.has(k)) return el
}
// match parcial (ex.: "trabalho realizado manutenção" — começa com)
for (const [k, el] of sections) {
for (const t of target) {
if (k.startsWith(t) || t.startsWith(k)) return el
}
}
return null
}
const workHeading = findSection(SECTION_WORK)
const filesHeading = findSection(SECTION_FILES)
const problemsHeading = findSection(SECTION_PROBLEMS)
const patternsHeading = findSection(SECTION_PATTERNS)
const actionsHeading = findSection(SECTION_ACTIONS)
const workItems = workHeading ? collectFollowingListItems(workHeading) : []
const filesModified = filesHeading ? collectFollowingListItems(filesHeading) : []
const problemsRaw = problemsHeading ? collectFollowingListItems(problemsHeading) : []
const patternsText = patternsHeading ? collectFollowingListItems(patternsHeading) : []
const actionsRaw = actionsHeading ? collectFollowingListItems(actionsHeading) : []
// Fallback: se nenhuma secção encontrada mas existem <li>, e a discussão é #33,
// tratar tudo como acções (formato diferente das outras discussões)
let actions = actionsRaw.map(parseActionItem)
if (meta.discussion_id === 33 && actions.length === 0) {
actions = extractAllLiItems(root).map(parseActionItem)
}
const problems = problemsRaw.map(parseProblemItem)
return {
id: meta.id,
discussion_id: meta.discussion_id,
created_at: createdAt,
staff_id: meta.staff_id ?? null,
title,
task_ref: taskRef,
duration_sec: durationSec,
work_items: workItems,
files_modified: filesModified,
problems,
patterns_text: patternsText,
actions,
raw_html: html,
}
}
/** Converte o campo `created` devolvido pelo Desk MCP (pode ser objecto vazio). */
function normalizeMcpDate(v: unknown): string {
if (!v) return ''
if (typeof v === 'string') return v
if (typeof v === 'object') {
const obj = v as Record<string, unknown>
if (typeof obj.date === 'string') return obj.date
if (typeof obj.datetime === 'string') return obj.datetime
}
return ''
}
/** Achata a árvore de comentários (comentários com children recursivos). */
function flattenComments(comments: RawComment[]): RawComment[] {
const out: RawComment[] = []
for (const c of comments) {
out.push(c)
if (c.children && c.children.length) {
out.push(...flattenComments(c.children))
}
}
return out
}
export interface ImportResult {
discussion_id: number
fetched: number
imported: number
updated: number
skipped: number
errors: number
}
/**
* Importa todos os comentários de uma discussão Desk. Paginação por `limit`/`offset`.
* Idempotente por `id` — comentários já existentes sofrem update (raw_html pode mudar).
*/
export async function importWorklogDiscussion(
db: SessionsDb,
discussionId: number,
opts: { sinceIso?: string; pageSize?: number; maxPages?: number } = {},
): Promise<ImportResult> {
// O MCP desk-crm parece clampar resultados em 200/página independentemente do limit.
// Pedimos 200 e iteramos offset até a resposta vir vazia.
const pageSize = opts.pageSize ?? 200
const maxPages = opts.maxPages ?? 20
const result: ImportResult = {
discussion_id: discussionId,
fetched: 0,
imported: 0,
updated: 0,
skipped: 0,
errors: 0,
}
let offset = 0
for (let page = 0; page < maxPages; page++) {
const raw = await callMcpTool('get_discussion_comments', {
discussion_id: discussionId,
limit: pageSize,
offset,
})
const payload = extractMcpJsonPayload<{
success?: boolean
comments?: RawComment[]
}>(raw)
const pageComments = flattenComments(payload.comments ?? [])
if (pageComments.length === 0) break
result.fetched += pageComments.length
const importedAt = new Date().toISOString()
for (const c of pageComments) {
try {
const createdStr = normalizeMcpDate(c.created)
const parsed = parseWorklogHtml(c.content ?? '', {
id: c.id,
discussion_id: c.discussion_id ?? discussionId,
created_at: createdStr || '',
staff_id: c.staff_id,
})
if (opts.sinceIso && parsed.created_at < opts.sinceIso) {
result.skipped++
continue
}
const record: WorklogCommentRecord = {
...parsed,
imported_at: importedAt,
}
const { inserted } = db.upsertWorklogComment(record)
if (inserted) result.imported++
else result.updated++
} catch (e) {
console.error(`[worklog-import] erro a parsear comentário #${c.id}:`, (e as Error).message)
result.errors++
}
}
// Avança offset; quando próxima página vier vazia, o while quebra na próxima iter.
offset += pageComments.length
// Safety: se MCP devolveu 0, para
if (pageComments.length === 0) break
}
return result
}
+175
View File
@@ -0,0 +1,175 @@
import { describe, it, expect, beforeEach } from 'vitest'
import { mkdtempSync } from 'fs'
import { tmpdir } from 'os'
import { join } from 'path'
import { openSessionsDb, type SessionsDb, type WorklogCommentRecord } from '../services/sessions/db.js'
import { parseWorklogHtml } from '../services/sessions/worklog-import.js'
import { detectActionsNeverExecuted, weekRange } from '../services/sessions/patterns.js'
const SAMPLE_H4 = `
<h4>2026-04-15 10:30 - Refactor API sessions</h4>
<p><strong>Projecto:</strong> DashDescomplicar</p>
<p><strong>Tarefa:</strong> #2059 - Observabilidade Espelho</p>
<p><strong>Duração:</strong> ~2h 15m</p>
<h4>Trabalho Realizado</h4>
<ul><li>Criar módulo worklog-import</li><li>Integrar detectores cruzados</li></ul>
<h4>Ficheiros Modificados</h4>
<ul><li><code>api/services/sessions/db.ts</code></li><li><code>api/scripts/sessions-worklog-import.ts</code></li></ul>
<h4>Problemas / Soluções</h4>
<ul><li>Parser HTML frágil → usar node-html-parser</li></ul>
<h4>Padrões Detectados</h4>
<ul><li>MCP gateway responde em SSE ou JSON</li></ul>
<h4>Acções Sugeridas</h4>
<ul><li>[Refactor] Extrair callMcpTool para módulo partilhado P2</li></ul>
`
const SAMPLE_H2 = `
<h2>2026-01-31 - Estratégia Stack</h2>
<p><strong>Duração:</strong> ~2h</p>
<h3>Trabalho Realizado</h3>
<ul><li>Stack Mapeado - 15 sistemas</li></ul>
<h3>Insights</h3>
<ul><li>Posicionamento: Marketing alta performance</li></ul>
`
const SAMPLE_D33 = `
<ul>
<li>[ ] [MCP] Corrigir bug desk-crm-v3 com tabelas de discussões</li>
</ul>
<p><strong>Origem:</strong> Sessão 2026-02-02</p>
<p><strong>Prioridade:</strong> P1</p>
`
describe('parseWorklogHtml', () => {
it('extrai campos de comentário formato <h4>', () => {
const parsed = parseWorklogHtml(SAMPLE_H4, { id: 100, discussion_id: 31, created_at: '' })
expect(parsed.id).toBe(100)
expect(parsed.title).toMatch(/2026-04-15/)
expect(parsed.task_ref).toBe('#2059')
expect(parsed.duration_sec).toBe(2 * 3600 + 15 * 60)
expect(parsed.work_items.length).toBe(2)
expect(parsed.files_modified.length).toBe(2)
expect(parsed.patterns_text.length).toBe(1)
expect(parsed.actions.length).toBe(1)
expect(parsed.actions[0].tipo).toBe('Refactor')
expect(parsed.actions[0].prioridade).toBe('P2')
expect(parsed.created_at.startsWith('2026-04-15')).toBe(true)
})
it('extrai campos de comentário formato <h2>/<h3> (legacy)', () => {
const parsed = parseWorklogHtml(SAMPLE_H2, { id: 64, discussion_id: 31, created_at: '' })
expect(parsed.title).toMatch(/2026-01-31/)
expect(parsed.work_items.length).toBeGreaterThanOrEqual(1)
expect(parsed.duration_sec).toBe(2 * 3600)
expect(parsed.created_at.startsWith('2026-01-31')).toBe(true)
})
it('extrai acções em formato discussão #33 (lista crua)', () => {
const parsed = parseWorklogHtml(SAMPLE_D33, { id: 200, discussion_id: 33, created_at: '2026-02-02T00:00:00Z' })
expect(parsed.actions.length).toBe(1)
expect(parsed.actions[0].tipo).toBe('MCP')
})
})
describe('upsertWorklogComment idempotência', () => {
let db: SessionsDb
beforeEach(() => {
const dir = mkdtempSync(join(tmpdir(), 'obs-wl-'))
db = openSessionsDb(join(dir, 'sessions.db'))
})
it('insert primeiro, update depois', () => {
const base: WorklogCommentRecord = {
id: 42,
discussion_id: 31,
created_at: '2026-04-15T10:30:00Z',
staff_id: 25,
title: 'Test',
task_ref: '#100',
duration_sec: 600,
work_items: ['a'],
files_modified: [],
problems: [],
patterns_text: [],
actions: [],
raw_html: '<h4>Test</h4>',
imported_at: '2026-04-23T00:00:00Z',
}
const r1 = db.upsertWorklogComment(base)
expect(r1.inserted).toBe(true)
expect(db.countWorklogComments()).toBe(1)
const r2 = db.upsertWorklogComment({ ...base, title: 'Updated' })
expect(r2.inserted).toBe(false)
expect(db.countWorklogComments()).toBe(1)
const list = db.listWorklogComments({ discussion_id: 31 })
expect(list[0].title).toBe('Updated')
})
})
describe('detectActionsNeverExecuted', () => {
let db: SessionsDb
beforeEach(() => {
const dir = mkdtempSync(join(tmpdir(), 'obs-act-'))
db = openSessionsDb(join(dir, 'sessions.db'))
})
it('sinaliza acções P1/P2 antigas sem execução', () => {
const old = new Date('2026-03-01T00:00:00Z').toISOString()
for (let i = 0; i < 4; i++) {
db.upsertWorklogComment({
id: 300 + i,
discussion_id: 33,
created_at: old,
staff_id: 25,
title: `Acção ${i}`,
task_ref: `#${1000 + i}`,
duration_sec: null,
work_items: [],
files_modified: [],
problems: [],
patterns_text: [],
actions: [{ tipo: 'MCP', descricao: `Corrigir bug X${i}`, prioridade: i % 2 ? 'P1' : 'P2' }],
raw_html: '',
imported_at: '2026-04-23T00:00:00Z',
})
}
const range = weekRange(new Date('2026-04-22T00:00:00Z'))
const patterns = detectActionsNeverExecuted({
db: db.rawDb(),
weekStartIso: range.start.toISOString(),
weekEndIso: range.end.toISOString(),
})
expect(patterns.length).toBe(1)
expect(patterns[0].pattern_key).toBe('actions_never_executed')
expect(patterns[0].affected_count).toBeGreaterThanOrEqual(3)
})
it('não sinaliza se acções recentes (<14 dias)', () => {
const recent = new Date().toISOString()
for (let i = 0; i < 5; i++) {
db.upsertWorklogComment({
id: 400 + i,
discussion_id: 33,
created_at: recent,
staff_id: 25,
title: null,
task_ref: null,
duration_sec: null,
work_items: [],
files_modified: [],
problems: [],
patterns_text: [],
actions: [{ tipo: 'MCP', descricao: 'x', prioridade: 'P1' }],
raw_html: '',
imported_at: recent,
})
}
const range = weekRange(new Date())
const patterns = detectActionsNeverExecuted({
db: db.rawDb(),
weekStartIso: range.start.toISOString(),
weekEndIso: range.end.toISOString(),
})
expect(patterns.length).toBe(0)
})
})
+133
View File
@@ -21,6 +21,7 @@
"googleapis": "^171.4.0",
"lucide-react": "^0.563.0",
"mysql2": "^3.11.5",
"node-html-parser": "^7.1.0",
"oidc-client-ts": "^3.0.1",
"pg": "^8.20.0",
"react": "^19.2.0",
@@ -3414,6 +3415,12 @@
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
"license": "MIT"
},
"node_modules/boolbase": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz",
"integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==",
"license": "ISC"
},
"node_modules/brace-expansion": {
"version": "1.1.13",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.13.tgz",
@@ -3905,6 +3912,22 @@
"node": ">= 8"
}
},
"node_modules/css-select": {
"version": "5.2.2",
"resolved": "https://registry.npmjs.org/css-select/-/css-select-5.2.2.tgz",
"integrity": "sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw==",
"license": "BSD-2-Clause",
"dependencies": {
"boolbase": "^1.0.0",
"css-what": "^6.1.0",
"domhandler": "^5.0.2",
"domutils": "^3.0.1",
"nth-check": "^2.0.1"
},
"funding": {
"url": "https://github.com/sponsors/fb55"
}
},
"node_modules/css-tree": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/css-tree/-/css-tree-3.1.0.tgz",
@@ -3919,6 +3942,18 @@
"node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0"
}
},
"node_modules/css-what": {
"version": "6.2.2",
"resolved": "https://registry.npmjs.org/css-what/-/css-what-6.2.2.tgz",
"integrity": "sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA==",
"license": "BSD-2-Clause",
"engines": {
"node": ">= 6"
},
"funding": {
"url": "https://github.com/sponsors/fb55"
}
},
"node_modules/css.escape": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz",
@@ -4384,6 +4419,73 @@
"license": "MIT",
"peer": true
},
"node_modules/dom-serializer": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz",
"integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==",
"license": "MIT",
"dependencies": {
"domelementtype": "^2.3.0",
"domhandler": "^5.0.2",
"entities": "^4.2.0"
},
"funding": {
"url": "https://github.com/cheeriojs/dom-serializer?sponsor=1"
}
},
"node_modules/dom-serializer/node_modules/entities": {
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz",
"integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==",
"license": "BSD-2-Clause",
"engines": {
"node": ">=0.12"
},
"funding": {
"url": "https://github.com/fb55/entities?sponsor=1"
}
},
"node_modules/domelementtype": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz",
"integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/fb55"
}
],
"license": "BSD-2-Clause"
},
"node_modules/domhandler": {
"version": "5.0.3",
"resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz",
"integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==",
"license": "BSD-2-Clause",
"dependencies": {
"domelementtype": "^2.3.0"
},
"engines": {
"node": ">= 4"
},
"funding": {
"url": "https://github.com/fb55/domhandler?sponsor=1"
}
},
"node_modules/domutils": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/domutils/-/domutils-3.2.2.tgz",
"integrity": "sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==",
"license": "BSD-2-Clause",
"dependencies": {
"dom-serializer": "^2.0.0",
"domelementtype": "^2.3.0",
"domhandler": "^5.0.3"
},
"funding": {
"url": "https://github.com/fb55/domutils?sponsor=1"
}
},
"node_modules/dotenv": {
"version": "16.6.1",
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz",
@@ -5837,6 +5939,15 @@
"node": ">= 0.4"
}
},
"node_modules/he": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz",
"integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==",
"license": "MIT",
"bin": {
"he": "bin/he"
}
},
"node_modules/hermes-estree": {
"version": "0.25.1",
"resolved": "https://registry.npmjs.org/hermes-estree/-/hermes-estree-0.25.1.tgz",
@@ -7371,6 +7482,16 @@
"url": "https://opencollective.com/node-fetch"
}
},
"node_modules/node-html-parser": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/node-html-parser/-/node-html-parser-7.1.0.tgz",
"integrity": "sha512-iJo8b2uYGT40Y8BTyy5ufL6IVbN8rbm/1QK2xffXU/1a/v3AAa0d1YAoqBNYqaS4R/HajkWIpIfdE6KcyFh1AQ==",
"license": "MIT",
"dependencies": {
"css-select": "^5.1.0",
"he": "1.2.0"
}
},
"node_modules/node-releases": {
"version": "2.0.27",
"resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
@@ -7390,6 +7511,18 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/nth-check": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz",
"integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==",
"license": "BSD-2-Clause",
"dependencies": {
"boolbase": "^1.0.0"
},
"funding": {
"url": "https://github.com/fb55/nth-check?sponsor=1"
}
},
"node_modules/object-assign": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+1
View File
@@ -29,6 +29,7 @@
"googleapis": "^171.4.0",
"lucide-react": "^0.563.0",
"mysql2": "^3.11.5",
"node-html-parser": "^7.1.0",
"oidc-client-ts": "^3.0.1",
"pg": "^8.20.0",
"react": "^19.2.0",
@@ -0,0 +1,13 @@
[Unit]
Description=Observabilidade — import diário de worklogs Desk (#31/#32/#33)
After=default.target
[Service]
Type=oneshot
WorkingDirectory=/media/ealmeida/Dados/Dev/DashDescomplicar
ExecStart=/home/ealmeida/.nvm/versions/node/v22.22.2/bin/npx tsx api/scripts/sessions-worklog-import.ts --discussion all --since-days 7
Environment="OBSERVABILIDADE_DB=/home/ealmeida/.claude-work/sessions.db"
Environment="PATH=/home/ealmeida/.nvm/versions/node/v22.22.2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
EnvironmentFile=/home/ealmeida/.claude-work/observabilidade-patterns.env
StandardOutput=append:/home/ealmeida/.claude-work/observabilidade-worklog-import.log
StandardError=append:/home/ealmeida/.claude-work/observabilidade-worklog-import.log
@@ -0,0 +1,10 @@
[Unit]
Description=Observabilidade — import diário de worklogs Desk
[Timer]
OnCalendar=daily
OnCalendar=*-*-* 03:00:00
Persistent=true
[Install]
WantedBy=default.target