- Updated `inspectChains` to accept optional `editorContent`, allowing for real-time inspection without relying on potentially stale vault data. - Introduced `buildNoteIndexFromContent` to facilitate graph indexing directly from provided content. - Improved handling of template matching profiles in `ChainWorkbenchModal`, ensuring accurate context during chain inspections. - Added debug logging for better traceability of the chain inspection process.
1193 lines
44 KiB
TypeScript
1193 lines
44 KiB
TypeScript
/**
|
|
* Chain Inspector v0: analyzes relationships, chains, gaps, and backward paths.
|
|
*/
|
|
|
|
import type { App } from "obsidian";
|
|
import { TFile } from "obsidian";
|
|
import type { SectionContext } from "./sectionContext";
|
|
import type { IndexedEdge, SectionNode } from "./graphIndex";
|
|
import { buildNoteIndex, loadNeighborNote } from "./graphIndex";
|
|
import type { ChainRolesConfig, ChainTemplatesConfig } from "../dictionary/types";
|
|
import { splitIntoSections } from "../mapping/sectionParser";
|
|
import { normalizeLinkTarget, headingsMatch } from "../unresolvedLink/linkHelpers";
|
|
import type { EdgeVocabulary } from "../vocab/types";
|
|
import { parseEdgeVocabulary } from "../vocab/parseEdgeVocabulary";
|
|
import { VocabularyLoader } from "../vocab/VocabularyLoader";
|
|
import { applySeverityPolicy } from "./severityPolicy";
|
|
import { getLogger, loggerRegistry } from "../utils/logger";
|
|
|
|
// Don't create logger instance at import time - create it when needed
|
|
// This ensures it always reads current log levels from registry
|
|
function getChainInspectorLogger() {
|
|
return getLogger("chainInspector");
|
|
}
|
|
|
|
export interface InspectorOptions {
|
|
includeNoteLinks: boolean;
|
|
includeCandidates: boolean;
|
|
maxDepth: number;
|
|
direction: "forward" | "backward" | "both";
|
|
maxTemplateMatches?: number; // Optional: limit per template; undefined = no limit (e.g. Chain Workbench)
|
|
/** Default max distinct matches per template. Overridable by chain_templates.yaml defaults.matching.max_matches_per_template. */
|
|
maxMatchesPerTemplateDefault?: number; // default: 2
|
|
/** Schleifenschutz: max. gesammelte Zuordnungen pro Template. Overridable durch chain_templates.yaml defaults.matching.max_assignments_collected. */
|
|
maxAssignmentsCollectedDefault?: number; // default: 1000
|
|
/** When true, log template-matching details (candidates per slot, collected/complete counts, returned matches) to console. */
|
|
debugLogging?: boolean;
|
|
}
|
|
|
|
export interface Finding {
|
|
code: string;
|
|
severity: "info" | "warn" | "error";
|
|
message: string;
|
|
evidence?: {
|
|
file: string;
|
|
sectionHeading: string | null;
|
|
};
|
|
}
|
|
|
|
export interface NeighborEdge {
|
|
rawEdgeType: string;
|
|
target: { file: string; heading: string | null };
|
|
scope: "section" | "note" | "candidate";
|
|
evidence: {
|
|
file: string;
|
|
sectionHeading: string | null;
|
|
lineRange?: { start: number; end: number };
|
|
};
|
|
}
|
|
|
|
export interface Path {
|
|
nodes: Array<{ file: string; heading: string | null }>;
|
|
edges: Array<{ rawEdgeType: string; from: string; to: string }>;
|
|
}
|
|
|
|
export interface TemplateMatch {
|
|
templateName: string;
|
|
score: number;
|
|
slotAssignments: {
|
|
[slotId: string]: {
|
|
nodeKey: string;
|
|
file: string;
|
|
heading?: string | null;
|
|
noteType: string;
|
|
};
|
|
};
|
|
missingSlots: string[];
|
|
satisfiedLinks: number;
|
|
requiredLinks: number;
|
|
roleEvidence?: Array<{
|
|
from: string;
|
|
to: string;
|
|
edgeRole: string;
|
|
rawEdgeType: string;
|
|
}>;
|
|
slotsComplete: boolean;
|
|
linksComplete: boolean;
|
|
confidence: "confirmed" | "plausible" | "weak";
|
|
}
|
|
|
|
export interface ChainInspectorReport {
|
|
context: {
|
|
file: string;
|
|
heading: string | null;
|
|
zoneKind: string;
|
|
};
|
|
settings: InspectorOptions;
|
|
neighbors: {
|
|
incoming: NeighborEdge[];
|
|
outgoing: NeighborEdge[];
|
|
};
|
|
paths: {
|
|
forward: Path[];
|
|
backward: Path[];
|
|
};
|
|
findings: Finding[];
|
|
analysisMeta?: {
|
|
edgesTotal: number;
|
|
edgesWithCanonical: number;
|
|
edgesUnmapped: number;
|
|
roleMatches: { [roleName: string]: number };
|
|
topNUsed?: number;
|
|
};
|
|
templateMatches?: TemplateMatch[];
|
|
templatesSource?: {
|
|
path: string;
|
|
status: "loaded" | "error" | "using-last-known-good";
|
|
loadedAt: number | null;
|
|
templateCount: number;
|
|
};
|
|
templateMatchingProfileUsed?: {
|
|
name: string;
|
|
resolvedFrom: "settings" | "default";
|
|
profileConfig?: {
|
|
required_links?: boolean;
|
|
min_slots_filled_for_gap_findings?: number;
|
|
min_score_for_gap_findings?: number;
|
|
};
|
|
};
|
|
}
|
|
|
|
const MIN_TEXT_LENGTH_FOR_EDGE_CHECK = 200;
|
|
const CAUSAL_ROLE_NAMES = ["causal", "influences", "enables_constraints"];
|
|
|
|
/**
|
|
* Resolve canonical edge type from raw edge type using edge vocabulary.
|
|
*/
|
|
export function resolveCanonicalEdgeType(
|
|
rawEdgeType: string,
|
|
edgeVocabulary: EdgeVocabulary | null
|
|
): { canonical?: string; matchedBy: "canonical" | "alias" | "none" } {
|
|
if (!edgeVocabulary) {
|
|
return { matchedBy: "none" };
|
|
}
|
|
|
|
// Check if raw type is already canonical
|
|
if (edgeVocabulary?.byCanonical.has(rawEdgeType)) {
|
|
return { canonical: rawEdgeType, matchedBy: "canonical" };
|
|
}
|
|
|
|
// Check if raw type is an alias (case-insensitive lookup)
|
|
const lowerRaw = rawEdgeType.toLowerCase();
|
|
const canonical = edgeVocabulary.aliasToCanonical.get(lowerRaw);
|
|
if (canonical) {
|
|
return { canonical, matchedBy: "alias" };
|
|
}
|
|
|
|
return { matchedBy: "none" };
|
|
}
|
|
|
|
/**
|
|
* Filter edges based on options.
|
|
*/
|
|
function filterEdges(
|
|
edges: IndexedEdge[],
|
|
options: InspectorOptions
|
|
): IndexedEdge[] {
|
|
return edges.filter((edge) => {
|
|
if (edge.scope === "candidate" && !options.includeCandidates) {
|
|
return false;
|
|
}
|
|
if (edge.scope === "note" && !options.includeNoteLinks) {
|
|
return false;
|
|
}
|
|
return true;
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Get neighbors (incoming/outgoing) for current section context.
|
|
*/
|
|
function getNeighbors(
|
|
edges: IndexedEdge[],
|
|
context: SectionContext,
|
|
options: InspectorOptions
|
|
): { incoming: NeighborEdge[]; outgoing: NeighborEdge[] } {
|
|
const filtered = filterEdges(edges, options);
|
|
|
|
const currentSection: { file: string; heading: string | null } = {
|
|
file: context.file,
|
|
heading: context.heading,
|
|
};
|
|
|
|
const incoming: NeighborEdge[] = [];
|
|
const outgoing: NeighborEdge[] = [];
|
|
|
|
// Helper: check if edge target matches current file (by path or basename)
|
|
const currentFileBasename = context.file.split("/").pop()?.replace(/\.md$/, "") || "";
|
|
const matchesCurrentFile = (targetFile: string): boolean => {
|
|
if (targetFile === currentSection.file) return true;
|
|
if (targetFile === currentFileBasename) return true;
|
|
if (targetFile === `${currentFileBasename}.md`) return true;
|
|
// Check if targetFile is basename of currentSection.file
|
|
const currentBasename = currentSection.file.split("/").pop()?.replace(/\.md$/, "") || "";
|
|
return targetFile === currentBasename;
|
|
};
|
|
|
|
for (const edge of filtered) {
|
|
// Check if edge targets current section
|
|
// Match exact section (file + heading) OR note-level link (file only, heading null)
|
|
const targetsCurrentSection =
|
|
matchesCurrentFile(edge.target.file) &&
|
|
(headingsMatch(edge.target.heading, currentSection.heading) ||
|
|
(edge.target.heading === null && currentSection.heading !== null));
|
|
|
|
if (targetsCurrentSection) {
|
|
// Incoming edge
|
|
incoming.push({
|
|
rawEdgeType: edge.rawEdgeType,
|
|
target: {
|
|
file:
|
|
"sectionHeading" in edge.source
|
|
? edge.source.file
|
|
: edge.source.file,
|
|
heading:
|
|
"sectionHeading" in edge.source
|
|
? edge.source.sectionHeading
|
|
: null,
|
|
},
|
|
scope: edge.scope,
|
|
evidence: edge.evidence,
|
|
});
|
|
}
|
|
|
|
// Check if edge originates from current section
|
|
const sourceMatches =
|
|
("sectionHeading" in edge.source
|
|
? headingsMatch(edge.source.sectionHeading, currentSection.heading) &&
|
|
edge.source.file === currentSection.file
|
|
: edge.scope === "note" && edge.source.file === currentSection.file) &&
|
|
edge.source.file === currentSection.file;
|
|
|
|
if (sourceMatches) {
|
|
// Outgoing edge
|
|
outgoing.push({
|
|
rawEdgeType: edge.rawEdgeType,
|
|
target: edge.target,
|
|
scope: edge.scope,
|
|
evidence: edge.evidence,
|
|
});
|
|
}
|
|
}
|
|
|
|
// Sort for deterministic output
|
|
const sortEdges = (a: NeighborEdge, b: NeighborEdge) => {
|
|
if (a.rawEdgeType !== b.rawEdgeType) {
|
|
return a.rawEdgeType.localeCompare(b.rawEdgeType);
|
|
}
|
|
if (a.target.file !== b.target.file) {
|
|
return a.target.file.localeCompare(b.target.file);
|
|
}
|
|
const aHeading = a.target.heading || "";
|
|
const bHeading = b.target.heading || "";
|
|
return aHeading.localeCompare(bHeading);
|
|
};
|
|
|
|
incoming.sort(sortEdges);
|
|
outgoing.sort(sortEdges);
|
|
|
|
return { incoming, outgoing };
|
|
}
|
|
|
|
/**
|
|
* Traverse paths from current node.
|
|
*/
|
|
function traversePaths(
|
|
edges: IndexedEdge[],
|
|
context: SectionContext,
|
|
options: InspectorOptions
|
|
): { forward: Path[]; backward: Path[] } {
|
|
const filtered = filterEdges(edges, options);
|
|
const currentSection: { file: string; heading: string | null } = {
|
|
file: context.file,
|
|
heading: context.heading,
|
|
};
|
|
|
|
const forward: Path[] = [];
|
|
const backward: Path[] = [];
|
|
|
|
if (options.direction === "forward" || options.direction === "both") {
|
|
forward.push(...traverseForward(filtered, currentSection, options.maxDepth));
|
|
}
|
|
|
|
if (options.direction === "backward" || options.direction === "both") {
|
|
backward.push(...traverseBackward(filtered, currentSection, options.maxDepth));
|
|
}
|
|
|
|
return { forward, backward };
|
|
}
|
|
|
|
function traverseForward(
|
|
edges: IndexedEdge[],
|
|
start: { file: string; heading: string | null },
|
|
maxDepth: number
|
|
): Path[] {
|
|
const paths: Path[] = [];
|
|
const visited = new Set<string>();
|
|
|
|
function visit(
|
|
current: { file: string; heading: string | null },
|
|
path: Path,
|
|
depth: number
|
|
) {
|
|
if (depth > maxDepth) return;
|
|
|
|
const nodeKey = `${current.file}:${current.heading || ""}`;
|
|
if (visited.has(nodeKey)) return;
|
|
visited.add(nodeKey);
|
|
|
|
// Find outgoing edges
|
|
for (const edge of edges) {
|
|
const sourceMatches =
|
|
("sectionHeading" in edge.source
|
|
? headingsMatch(edge.source.sectionHeading, current.heading) &&
|
|
edge.source.file === current.file
|
|
: edge.scope === "note" && edge.source.file === current.file) &&
|
|
edge.source.file === current.file;
|
|
|
|
if (sourceMatches) {
|
|
const newPath: Path = {
|
|
nodes: [...path.nodes, edge.target],
|
|
edges: [
|
|
...path.edges,
|
|
{
|
|
rawEdgeType: edge.rawEdgeType,
|
|
from: nodeKey,
|
|
to: `${edge.target.file}:${edge.target.heading || ""}`,
|
|
},
|
|
],
|
|
};
|
|
|
|
if (depth < maxDepth) {
|
|
visit(edge.target, newPath, depth + 1);
|
|
} else {
|
|
paths.push(newPath);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (path.nodes.length > 1) {
|
|
paths.push(path);
|
|
}
|
|
}
|
|
|
|
visit(start, { nodes: [start], edges: [] }, 0);
|
|
return paths;
|
|
}
|
|
|
|
function traverseBackward(
|
|
edges: IndexedEdge[],
|
|
start: { file: string; heading: string | null },
|
|
maxDepth: number
|
|
): Path[] {
|
|
const paths: Path[] = [];
|
|
const visited = new Set<string>();
|
|
|
|
function visit(
|
|
current: { file: string; heading: string | null },
|
|
path: Path,
|
|
depth: number
|
|
) {
|
|
if (depth > maxDepth) return;
|
|
|
|
const nodeKey = `${current.file}:${current.heading || ""}`;
|
|
if (visited.has(nodeKey)) return;
|
|
visited.add(nodeKey);
|
|
|
|
// Find incoming edges (edges that target current node)
|
|
// Match exact section OR note-level link (heading null matches any section in that file)
|
|
// Also match by basename (since edges might use basename instead of full path)
|
|
const currentFileBasename = current.file.split("/").pop()?.replace(/\.md$/, "") || "";
|
|
const matchesCurrentFile = (targetFile: string): boolean => {
|
|
if (targetFile === current.file) return true;
|
|
if (targetFile === currentFileBasename) return true;
|
|
if (targetFile === `${currentFileBasename}.md`) return true;
|
|
const currentBasename = current.file.split("/").pop()?.replace(/\.md$/, "") || "";
|
|
return targetFile === currentBasename;
|
|
};
|
|
|
|
for (const edge of edges) {
|
|
const targetsCurrentNode =
|
|
matchesCurrentFile(edge.target.file) &&
|
|
(headingsMatch(edge.target.heading, current.heading) ||
|
|
(edge.target.heading === null && current.heading !== null));
|
|
|
|
if (targetsCurrentNode) {
|
|
const sourceNode: { file: string; heading: string | null } =
|
|
"sectionHeading" in edge.source
|
|
? {
|
|
file: edge.source.file,
|
|
heading: edge.source.sectionHeading,
|
|
}
|
|
: { file: edge.source.file, heading: null };
|
|
|
|
const sourceKey = `${sourceNode.file}:${sourceNode.heading || ""}`;
|
|
const newPath: Path = {
|
|
nodes: [sourceNode, ...path.nodes],
|
|
edges: [
|
|
{
|
|
rawEdgeType: edge.rawEdgeType,
|
|
from: sourceKey,
|
|
to: nodeKey,
|
|
},
|
|
...path.edges,
|
|
],
|
|
};
|
|
|
|
if (depth < maxDepth) {
|
|
visit(sourceNode, newPath, depth + 1);
|
|
} else {
|
|
paths.push(newPath);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (path.nodes.length > 1) {
|
|
paths.push(path);
|
|
}
|
|
}
|
|
|
|
visit(start, { nodes: [start], edges: [] }, 0);
|
|
return paths;
|
|
}
|
|
|
|
/**
|
|
* Compute gap heuristics findings.
|
|
*/
|
|
function computeFindings(
|
|
allEdges: IndexedEdge[], // All edges (including neighbor notes) for incoming edge detection
|
|
currentEdges: IndexedEdge[], // Current note edges only for outgoing edge detection
|
|
context: SectionContext,
|
|
sections: SectionNode[],
|
|
sectionContent: string,
|
|
chainRoles: ChainRolesConfig | null,
|
|
edgeVocabulary: EdgeVocabulary | null,
|
|
app: App,
|
|
options: InspectorOptions
|
|
): Finding[] {
|
|
const findings: Finding[] = [];
|
|
|
|
// Find current section content (normalized heading match for block-id variants)
|
|
const currentSection = sections.find(
|
|
(s) => s.file === context.file && headingsMatch(s.heading, context.heading)
|
|
);
|
|
|
|
if (!currentSection) {
|
|
return findings;
|
|
}
|
|
|
|
// Filter edges for current section (outgoing edges only - from current note)
|
|
const sectionEdges = filterEdges(currentEdges, options).filter((edge) => {
|
|
if (edge.scope === "candidate") return false; // Exclude candidates for gap checks
|
|
if (edge.scope === "note") return false; // Exclude note-level for section checks
|
|
|
|
const sourceMatches =
|
|
"sectionHeading" in edge.source
|
|
? headingsMatch(edge.source.sectionHeading, context.heading) &&
|
|
edge.source.file === context.file
|
|
: false;
|
|
|
|
return sourceMatches;
|
|
});
|
|
|
|
// Check: missing_edges
|
|
const textWithoutHeadings = sectionContent
|
|
.split("\n")
|
|
.filter((line) => !line.match(/^#{1,6}\s/))
|
|
.join("\n");
|
|
const textWithoutEdgeBlocks = textWithoutHeadings.replace(
|
|
/>\s*\[!edge\][\s\S]*?(?=\n\n|\n>|$)/g,
|
|
""
|
|
);
|
|
const textLength = textWithoutEdgeBlocks.trim().length;
|
|
|
|
if (textLength > MIN_TEXT_LENGTH_FOR_EDGE_CHECK && sectionEdges.length === 0) {
|
|
findings.push({
|
|
code: "missing_edges",
|
|
severity: "warn",
|
|
message: `Section has ${textLength} characters of content but no explicit edges`,
|
|
evidence: {
|
|
file: context.file,
|
|
sectionHeading: context.heading,
|
|
},
|
|
});
|
|
}
|
|
|
|
// Check: one_sided_connectivity
|
|
// Use same matching logic as getNeighbors for consistency
|
|
const currentFileBasename = context.file.split("/").pop()?.replace(/\.md$/, "") || "";
|
|
const matchesCurrentFile = (targetFile: string): boolean => {
|
|
if (targetFile === context.file) return true; // Full path match
|
|
if (targetFile === currentFileBasename) return true; // Basename match
|
|
if (targetFile === `${currentFileBasename}.md`) return true; // Basename with .md
|
|
// Check if targetFile is basename of context.file (redundant but consistent with getNeighbors)
|
|
const currentBasename = context.file.split("/").pop()?.replace(/\.md$/, "") || "";
|
|
return targetFile === currentBasename;
|
|
};
|
|
|
|
// Count incoming edges (edges targeting current section)
|
|
// Use filterEdges to respect includeCandidates option (consistent with getNeighbors)
|
|
const filteredAllEdges = filterEdges(allEdges, options);
|
|
const incoming = filteredAllEdges.filter((edge) => {
|
|
// Don't manually filter candidates here - filterEdges already did that based on options.includeCandidates
|
|
const fileMatches = matchesCurrentFile(edge.target.file);
|
|
const headingMatches = headingsMatch(edge.target.heading, context.heading) ||
|
|
(edge.target.heading === null && context.heading !== null);
|
|
return fileMatches && headingMatches;
|
|
});
|
|
|
|
// Count outgoing edges (edges originating from current section)
|
|
// Use filterEdges to respect includeCandidates option (consistent with getNeighbors)
|
|
// Note: sectionEdges already filters out candidates/note-level for missing_edges check,
|
|
// but for one_sided_connectivity we need to use the same filtering as getNeighbors
|
|
const filteredCurrentEdges = filterEdges(currentEdges, options);
|
|
const outgoing = filteredCurrentEdges.filter((edge) => {
|
|
const sourceMatches =
|
|
"sectionHeading" in edge.source
|
|
? headingsMatch(edge.source.sectionHeading, context.heading) &&
|
|
edge.source.file === context.file
|
|
: edge.scope === "note" && edge.source.file === context.file;
|
|
return sourceMatches;
|
|
});
|
|
|
|
// Debug logging for findings (use effective counts that match report.neighbors)
|
|
getChainInspectorLogger().debug(`computeFindings: incomingEffective=${incoming.length}, outgoingEffective=${outgoing.length}, allEdges=${allEdges.length}, filteredAllEdges=${filteredAllEdges.length}`);
|
|
|
|
if (incoming.length > 0 && outgoing.length === 0) {
|
|
findings.push({
|
|
code: "one_sided_connectivity",
|
|
severity: "info",
|
|
message: "Section has only incoming edges, no outgoing edges",
|
|
evidence: {
|
|
file: context.file,
|
|
sectionHeading: context.heading,
|
|
},
|
|
});
|
|
} else if (outgoing.length > 0 && incoming.length === 0) {
|
|
findings.push({
|
|
code: "one_sided_connectivity",
|
|
severity: "info",
|
|
message: "Section has only outgoing edges, no incoming edges",
|
|
evidence: {
|
|
file: context.file,
|
|
sectionHeading: context.heading,
|
|
},
|
|
});
|
|
}
|
|
|
|
// Check: only_candidates
|
|
const candidateEdges = currentEdges.filter(
|
|
(edge) =>
|
|
edge.scope === "candidate" &&
|
|
("sectionHeading" in edge.source
|
|
? headingsMatch(edge.source.sectionHeading, context.heading) &&
|
|
edge.source.file === context.file
|
|
: false)
|
|
);
|
|
if (candidateEdges.length > 0 && sectionEdges.length === 0) {
|
|
findings.push({
|
|
code: "only_candidates",
|
|
severity: "info",
|
|
message: "Section has only candidate edges, no explicit edges",
|
|
evidence: {
|
|
file: context.file,
|
|
sectionHeading: context.heading,
|
|
},
|
|
});
|
|
}
|
|
|
|
// Check: dangling_target
|
|
// Check outgoing edges from current section for missing files/headings
|
|
for (const edge of sectionEdges) {
|
|
const targetFile = edge.target.file;
|
|
const targetHeading = edge.target.heading;
|
|
|
|
// Try to resolve target file
|
|
const resolvedFile = app.metadataCache.getFirstLinkpathDest(
|
|
normalizeLinkTarget(targetFile),
|
|
context.file
|
|
);
|
|
|
|
if (!resolvedFile) {
|
|
// File does not exist
|
|
const sourceHeading = "sectionHeading" in edge.source ? edge.source.sectionHeading : null;
|
|
findings.push({
|
|
code: "dangling_target",
|
|
severity: "error",
|
|
message: `Target file does not exist: ${targetFile}`,
|
|
evidence: {
|
|
file: context.file,
|
|
sectionHeading: sourceHeading,
|
|
},
|
|
});
|
|
continue;
|
|
}
|
|
|
|
// If heading is specified, check if it exists in the file
|
|
if (targetHeading !== null) {
|
|
const targetContent = app.metadataCache.getFileCache(resolvedFile);
|
|
if (targetContent) {
|
|
// Use file cache to check headings
|
|
const headings = targetContent.headings || [];
|
|
const headingExists = headings.some(
|
|
(h) => headingsMatch(h.heading, targetHeading)
|
|
);
|
|
|
|
if (!headingExists) {
|
|
const sourceHeading = "sectionHeading" in edge.source ? edge.source.sectionHeading : null;
|
|
findings.push({
|
|
code: "dangling_target_heading",
|
|
severity: "warn",
|
|
message: `Target heading not found in ${targetFile}: ${targetHeading}`,
|
|
evidence: {
|
|
file: context.file,
|
|
sectionHeading: sourceHeading,
|
|
},
|
|
});
|
|
}
|
|
} else {
|
|
// File cache not available - metadataCache might not have processed the file yet
|
|
// Skip heading check in this case (file exists but cache not ready)
|
|
// This is acceptable as metadataCache will eventually update
|
|
}
|
|
}
|
|
}
|
|
|
|
// Check: no_causal_roles (if chainRoles available)
|
|
// Use canonical types for role matching if edgeVocabulary is available
|
|
if (chainRoles) {
|
|
const hasCausalRole = sectionEdges.some((edge) => {
|
|
// First try canonical type if available
|
|
const { canonical } = resolveCanonicalEdgeType(edge.rawEdgeType, edgeVocabulary);
|
|
const edgeTypeToCheck = canonical || edge.rawEdgeType;
|
|
|
|
for (const [roleName, role] of Object.entries(chainRoles?.roles || {})) {
|
|
if (CAUSAL_ROLE_NAMES.includes(roleName)) {
|
|
// Check both canonical and raw type (permissive)
|
|
if (role.edge_types.includes(edgeTypeToCheck) || role.edge_types.includes(edge.rawEdgeType)) {
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
return false;
|
|
});
|
|
|
|
if (sectionEdges.length > 0 && !hasCausalRole) {
|
|
findings.push({
|
|
code: "no_causal_roles",
|
|
severity: "info",
|
|
message: "Section has edges but none match causal roles",
|
|
evidence: {
|
|
file: context.file,
|
|
sectionHeading: context.heading,
|
|
},
|
|
});
|
|
}
|
|
}
|
|
|
|
// Sort findings: severity desc, code asc
|
|
findings.sort((a, b) => {
|
|
const severityOrder = { error: 3, warn: 2, info: 1 };
|
|
const severityDiff =
|
|
(severityOrder[b.severity] || 0) - (severityOrder[a.severity] || 0);
|
|
if (severityDiff !== 0) return severityDiff;
|
|
return a.code.localeCompare(b.code);
|
|
});
|
|
|
|
return findings;
|
|
}
|
|
|
|
/**
|
|
* Inspect chains for current section context.
|
|
* @param editorContent Optional: If provided, use this content instead of reading from vault.
|
|
* This is useful when the file was just modified and vault cache is stale.
|
|
*/
|
|
export async function inspectChains(
|
|
app: App,
|
|
context: SectionContext,
|
|
options: InspectorOptions,
|
|
chainRoles: ChainRolesConfig | null,
|
|
edgeVocabularyPath?: string,
|
|
chainTemplates?: ChainTemplatesConfig | null,
|
|
templatesLoadResult?: { path: string; status: string; loadedAt: number | null; templateCount: number },
|
|
templateMatchingProfileName?: string,
|
|
editorContent?: string
|
|
): Promise<ChainInspectorReport> {
|
|
// Build index for current note
|
|
const currentFile = app.vault.getAbstractFileByPath(context.file);
|
|
if (!currentFile || !("path" in currentFile)) {
|
|
throw new Error(`File not found: ${context.file}`);
|
|
}
|
|
// Type guard: check if it's a TFile (has extension property)
|
|
if (!("extension" in currentFile) || currentFile.extension !== "md") {
|
|
throw new Error(`File not found or not a markdown file: ${context.file}`);
|
|
}
|
|
|
|
// Use editor content if provided, otherwise read from vault
|
|
const { buildNoteIndex, buildNoteIndexFromContent } = await import("./graphIndex");
|
|
const { edges: currentEdges, sections } = editorContent
|
|
? await buildNoteIndexFromContent(app, currentFile as TFile, editorContent)
|
|
: await buildNoteIndex(app, currentFile as TFile);
|
|
|
|
// Collect all outgoing targets to load neighbor notes
|
|
// Respect includeNoteLinks and includeCandidates toggles
|
|
const outgoingTargets = new Set<string>();
|
|
for (const edge of currentEdges) {
|
|
// Skip candidates if not included
|
|
if (edge.scope === "candidate" && !options.includeCandidates) continue;
|
|
// Skip note-level links if not included
|
|
if (edge.scope === "note" && !options.includeNoteLinks) continue;
|
|
|
|
// Only consider edges from current context
|
|
if (
|
|
("sectionHeading" in edge.source
|
|
? headingsMatch(edge.source.sectionHeading, context.heading) &&
|
|
edge.source.file === context.file
|
|
: edge.scope === "note" && edge.source.file === context.file) &&
|
|
edge.source.file === context.file
|
|
) {
|
|
outgoingTargets.add(edge.target.file);
|
|
}
|
|
}
|
|
|
|
// Find notes that link to current note (for incoming edges)
|
|
// Use Obsidian's metadataCache.getBacklinksForFile() for efficient lookup
|
|
// This is much faster than scanning all files manually
|
|
const notesLinkingToCurrent = new Set<string>();
|
|
|
|
try {
|
|
// @ts-ignore - getBacklinksForFile exists but may not be in TS definitions
|
|
const backlinks = app.metadataCache.getBacklinksForFile(currentFile as TFile);
|
|
if (backlinks) {
|
|
// backlinks is a Map-like structure: source file path -> array of references
|
|
for (const sourcePath of backlinks.keys()) {
|
|
if (sourcePath === context.file) continue; // Skip self
|
|
notesLinkingToCurrent.add(sourcePath);
|
|
}
|
|
getChainInspectorLogger().debug(`Found ${notesLinkingToCurrent.size} notes linking to current note via getBacklinksForFile`);
|
|
} else {
|
|
getChainInspectorLogger().debug("getBacklinksForFile returned null/undefined");
|
|
}
|
|
} catch (e) {
|
|
// Fallback: if getBacklinksForFile is not available, use manual scan
|
|
// This should rarely happen, but provides compatibility
|
|
getChainInspectorLogger().warn("getBacklinksForFile not available, falling back to manual scan", e);
|
|
|
|
const currentNoteBasename = (currentFile as TFile).basename;
|
|
const currentNotePath = context.file;
|
|
const currentNotePathWithoutExt = currentNotePath.replace(/\.md$/, "");
|
|
|
|
const allMarkdownFiles = app.vault.getMarkdownFiles();
|
|
for (const file of allMarkdownFiles) {
|
|
if (file.path === currentNotePath) continue;
|
|
|
|
try {
|
|
const content = await app.vault.cachedRead(file);
|
|
const wikilinkRegex = /\[\[([^\]]+?)\]\]/g;
|
|
let match: RegExpExecArray | null;
|
|
while ((match = wikilinkRegex.exec(content)) !== null) {
|
|
if (!match[1]) continue;
|
|
|
|
const normalizedLink = normalizeLinkTarget(match[1].trim());
|
|
if (!normalizedLink) continue;
|
|
|
|
const resolvedFile = app.metadataCache.getFirstLinkpathDest(
|
|
normalizedLink,
|
|
file.path
|
|
);
|
|
|
|
if (resolvedFile && resolvedFile.path === currentNotePath) {
|
|
notesLinkingToCurrent.add(file.path);
|
|
break;
|
|
}
|
|
|
|
// Fallback string matching
|
|
if (
|
|
normalizedLink === currentNoteBasename ||
|
|
normalizedLink === currentNotePath ||
|
|
normalizedLink === currentNotePathWithoutExt ||
|
|
normalizedLink.replace(/\.md$/, "") === currentNotePathWithoutExt
|
|
) {
|
|
notesLinkingToCurrent.add(file.path);
|
|
break;
|
|
}
|
|
}
|
|
} catch {
|
|
continue;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Load neighbor notes lazily to find incoming edges
|
|
const allEdges = [...currentEdges];
|
|
|
|
// Resolve and deduplicate outgoing neighbor files
|
|
const outgoingNeighborFiles = new Set<string>(); // Use Set to deduplicate by resolved path
|
|
const outgoingNeighborFileMap = new Map<string, TFile>(); // original target -> resolved TFile
|
|
|
|
// Resolve outgoing targets (may be basenames without folder)
|
|
getChainInspectorLogger().debug(`Loading outgoing neighbor notes: ${outgoingTargets.size}`);
|
|
for (const targetFile of outgoingTargets) {
|
|
if (targetFile === context.file) continue; // Skip self
|
|
|
|
const neighborFile = await loadNeighborNote(app, targetFile, context.file);
|
|
if (neighborFile) {
|
|
const resolvedPath = neighborFile.path;
|
|
outgoingNeighborFiles.add(resolvedPath);
|
|
outgoingNeighborFileMap.set(targetFile, neighborFile);
|
|
}
|
|
}
|
|
|
|
// Load edges from outgoing neighbors
|
|
for (const neighborPath of outgoingNeighborFiles) {
|
|
const neighborFile = app.vault.getAbstractFileByPath(neighborPath);
|
|
if (neighborFile && "extension" in neighborFile && neighborFile.extension === "md") {
|
|
const { edges: neighborEdges } = await buildNoteIndex(app, neighborFile as TFile);
|
|
allEdges.push(...neighborEdges);
|
|
getChainInspectorLogger().debug(`Loaded ${neighborEdges.length} edges from ${neighborPath} (outgoing neighbor)`);
|
|
}
|
|
}
|
|
|
|
// Load notes that link to current note (for incoming edges and backward paths)
|
|
// Deduplicate with outgoing neighbors (same file might be both incoming and outgoing)
|
|
const allNeighborFiles = new Set<string>([...outgoingNeighborFiles]);
|
|
|
|
if (options.debugLogging) {
|
|
getChainInspectorLogger().debug(`Loading ${notesLinkingToCurrent.size} notes that link to current note`);
|
|
}
|
|
for (const sourceFile of notesLinkingToCurrent) {
|
|
if (sourceFile === context.file) continue; // Skip self
|
|
if (allNeighborFiles.has(sourceFile)) continue; // Skip if already loaded as outgoing neighbor
|
|
|
|
const sourceNoteFile = await loadNeighborNote(app, sourceFile, context.file);
|
|
if (sourceNoteFile) {
|
|
allNeighborFiles.add(sourceNoteFile.path);
|
|
const { edges: sourceEdges } = await buildNoteIndex(app, sourceNoteFile);
|
|
getChainInspectorLogger().debug(`Loaded ${sourceEdges.length} edges from ${sourceFile}`);
|
|
|
|
// Debug: Show all edges from this file (first 5) to understand what we're working with
|
|
if (sourceEdges.length > 0) {
|
|
getChainInspectorLogger().debug(`Sample edges from ${sourceFile} (showing first 5):`);
|
|
for (const edge of sourceEdges.slice(0, 5)) {
|
|
const sourceInfo = "sectionHeading" in edge.source
|
|
? `${edge.source.file}#${edge.source.sectionHeading || "null"}`
|
|
: `${edge.source.file} (note-level)`;
|
|
getChainInspectorLogger().debug(` - ${edge.rawEdgeType} from ${sourceInfo} -> ${edge.target.file}#${edge.target.heading || "null"}`);
|
|
}
|
|
}
|
|
|
|
// Debug: Log ALL edges that target current note (any section)
|
|
// Match by full path OR basename (since edges might use basename only)
|
|
const currentFileBasename = (currentFile as TFile).basename;
|
|
const edgesTargetingCurrentNote = sourceEdges.filter((e) => {
|
|
// Match full path
|
|
if (e.target.file === context.file) return true;
|
|
// Match basename (e.g., "Krebserkrankung von Sushi" matches "03_Experiences/Events/Krebserkrankung von Sushi.md")
|
|
if (e.target.file === currentFileBasename) return true;
|
|
// Match basename without extension
|
|
if (e.target.file === currentFileBasename.replace(/\.md$/, "")) return true;
|
|
return false;
|
|
});
|
|
if (edgesTargetingCurrentNote.length > 0) {
|
|
getChainInspectorLogger().debug(`✓ Found ${edgesTargetingCurrentNote.length} edges from ${sourceFile} targeting current note (${context.file}):`);
|
|
for (const edge of edgesTargetingCurrentNote) {
|
|
const sourceInfo = "sectionHeading" in edge.source
|
|
? `${edge.source.file}#${edge.source.sectionHeading || "null"}`
|
|
: `${edge.source.file} (note-level)`;
|
|
getChainInspectorLogger().debug(` - ${edge.rawEdgeType} from ${sourceInfo} -> ${edge.target.file}#${edge.target.heading || "null"} [scope: ${edge.scope}]`);
|
|
}
|
|
// Check why they don't match current section
|
|
const currentSectionKey = `${context.file}:${context.heading || "null"}`;
|
|
getChainInspectorLogger().debug(`Current section: ${currentSectionKey}`);
|
|
// Use same matching logic as getNeighbors
|
|
const debugFileBasename = context.file.split("/").pop()?.replace(/\.md$/, "") || "";
|
|
const matchesCurrentFileDebug = (targetFile: string): boolean => {
|
|
if (targetFile === context.file) return true;
|
|
if (targetFile === debugFileBasename) return true;
|
|
if (targetFile === `${debugFileBasename}.md`) return true;
|
|
// Also check against currentFileBasename (from TFile.basename)
|
|
if (targetFile === currentFileBasename) return true;
|
|
if (targetFile === currentFileBasename.replace(/\.md$/, "")) return true;
|
|
return false;
|
|
};
|
|
for (const edge of edgesTargetingCurrentNote) {
|
|
const targetKey = `${edge.target.file}:${edge.target.heading || "null"}`;
|
|
const fileMatches = matchesCurrentFileDebug(edge.target.file);
|
|
const headingMatches = headingsMatch(edge.target.heading, context.heading) ||
|
|
(edge.target.heading === null && context.heading !== null);
|
|
const matches = fileMatches && headingMatches;
|
|
getChainInspectorLogger().debug(` - Edge target: ${targetKey}, file matches: ${fileMatches ? "YES" : "NO"}, heading matches: ${headingMatches ? "YES" : "NO"}, should match: ${matches ? "YES" : "NO"}`);
|
|
}
|
|
} else {
|
|
getChainInspectorLogger().debug(`✗ No edges from ${sourceFile} target current note (${context.file})`);
|
|
getChainInspectorLogger().debug(` - Edges in this file target: ${[...new Set(sourceEdges.map(e => e.target.file))].slice(0, 3).join(", ")}...`);
|
|
}
|
|
allEdges.push(...sourceEdges);
|
|
} else {
|
|
getChainInspectorLogger().debug(`Could not load neighbor note: ${sourceFile}`);
|
|
}
|
|
}
|
|
|
|
// Get section content for gap analysis
|
|
// Use editor content if provided, otherwise read from vault
|
|
const content = editorContent || await app.vault.read(currentFile as TFile);
|
|
const sectionsWithContent = splitIntoSections(content);
|
|
const currentSectionContent =
|
|
sectionsWithContent[context.sectionIndex]?.content || "";
|
|
|
|
// Get neighbors (now includes edges from neighbor notes)
|
|
getChainInspectorLogger().debug(`Total edges after loading neighbors: ${allEdges.length} (current: ${currentEdges.length}, neighbors: ${allEdges.length - currentEdges.length})`);
|
|
const neighbors = getNeighbors(allEdges, context, options);
|
|
getChainInspectorLogger().debug(`Neighbors found: ${neighbors.incoming.length} incoming, ${neighbors.outgoing.length} outgoing`);
|
|
|
|
// Traverse paths (now includes edges from neighbor notes)
|
|
const paths = traversePaths(allEdges, context, options);
|
|
|
|
// Load edge vocabulary if path provided
|
|
let edgeVocabulary: EdgeVocabulary | null = null;
|
|
if (edgeVocabularyPath) {
|
|
try {
|
|
const vocabText = await VocabularyLoader.loadText(app, edgeVocabularyPath);
|
|
edgeVocabulary = parseEdgeVocabulary(vocabText);
|
|
} catch (error) {
|
|
getChainInspectorLogger().warn(`Could not load edge vocabulary from ${edgeVocabularyPath}:`, error);
|
|
}
|
|
}
|
|
|
|
// Compute findings (use allEdges for incoming checks, currentEdges for outgoing checks)
|
|
// Note: computeFindings will use filterEdges internally to respect includeCandidates,
|
|
// ensuring consistency with report.neighbors
|
|
const effectiveIncomingCount = neighbors.incoming.length;
|
|
const effectiveOutgoingCount = neighbors.outgoing.length;
|
|
const effectiveFilteredEdges = filterEdges(allEdges, options);
|
|
getChainInspectorLogger().debug(`Before computeFindings: effectiveIncoming=${effectiveIncomingCount}, effectiveOutgoing=${effectiveOutgoingCount}, effectiveFilteredEdges=${effectiveFilteredEdges.length}`);
|
|
|
|
let findings = computeFindings(
|
|
allEdges, // Use allEdges so we can detect incoming edges from neighbor notes
|
|
currentEdges, // Use currentEdges for outgoing edge checks (only current note can have outgoing edges)
|
|
context,
|
|
sections,
|
|
currentSectionContent,
|
|
chainRoles,
|
|
edgeVocabulary,
|
|
app,
|
|
options
|
|
);
|
|
|
|
// Compute analysisMeta
|
|
const filteredEdges = filterEdges(allEdges, options);
|
|
let edgesWithCanonical = 0;
|
|
let edgesUnmapped = 0;
|
|
const roleMatches: { [roleName: string]: number } = {};
|
|
|
|
for (const edge of filteredEdges) {
|
|
const { canonical, matchedBy } = resolveCanonicalEdgeType(edge.rawEdgeType, edgeVocabulary);
|
|
if (matchedBy !== "none") {
|
|
edgesWithCanonical++;
|
|
} else {
|
|
edgesUnmapped++;
|
|
}
|
|
|
|
// Count role matches (using canonical if available)
|
|
if (chainRoles && canonical) {
|
|
for (const [roleName, role] of Object.entries(chainRoles?.roles || {})) {
|
|
if (role.edge_types.includes(canonical)) {
|
|
roleMatches[roleName] = (roleMatches[roleName] || 0) + 1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Sort roleMatches keys for deterministic output
|
|
const sortedRoleMatches: { [roleName: string]: number } = {};
|
|
const sortedRoleNames = Object.keys(roleMatches).sort();
|
|
for (const roleName of sortedRoleNames) {
|
|
const count = roleMatches[roleName];
|
|
if (count !== undefined) {
|
|
sortedRoleMatches[roleName] = count;
|
|
}
|
|
}
|
|
|
|
let analysisMeta: ChainInspectorReport["analysisMeta"] = {
|
|
edgesTotal: filteredEdges.length,
|
|
edgesWithCanonical,
|
|
edgesUnmapped,
|
|
roleMatches: sortedRoleMatches,
|
|
};
|
|
|
|
// Resolve profile early (even if no templates) for logging
|
|
const profileName = templateMatchingProfileName || "discovery";
|
|
let profile: import("../dictionary/types").TemplateMatchingProfile | undefined;
|
|
let resolvedFrom: "settings" | "default" = "default";
|
|
|
|
if (chainTemplates?.defaults?.profiles) {
|
|
if (profileName === "discovery" && chainTemplates.defaults.profiles.discovery) {
|
|
profile = chainTemplates.defaults.profiles.discovery;
|
|
resolvedFrom = templateMatchingProfileName ? "settings" : "default";
|
|
} else if (profileName === "decisioning" && chainTemplates.defaults.profiles.decisioning) {
|
|
profile = chainTemplates.defaults.profiles.decisioning;
|
|
resolvedFrom = templateMatchingProfileName ? "settings" : "default";
|
|
}
|
|
}
|
|
|
|
// Log start-of-run header with resolved profile and settings
|
|
const requiredLinks = profile?.required_links ?? false;
|
|
|
|
getChainInspectorLogger().info(
|
|
`Run: profile=${profileName} (resolvedFrom=${resolvedFrom}) required_links=${requiredLinks} includeCandidates=${options.includeCandidates} maxDepth=${options.maxDepth} direction=${options.direction}`
|
|
);
|
|
|
|
// Template matching
|
|
let templateMatches: TemplateMatch[] = [];
|
|
let templatesSource: ChainInspectorReport["templatesSource"] = undefined;
|
|
let templateMatchingProfileUsed: ChainInspectorReport["templateMatchingProfileUsed"] = undefined;
|
|
|
|
if (chainTemplates && templatesLoadResult) {
|
|
templatesSource = {
|
|
path: templatesLoadResult.path,
|
|
status: templatesLoadResult.status as "loaded" | "error" | "using-last-known-good",
|
|
loadedAt: templatesLoadResult.loadedAt,
|
|
templateCount: templatesLoadResult.templateCount,
|
|
};
|
|
|
|
// Set profile used info
|
|
templateMatchingProfileUsed = {
|
|
name: profileName,
|
|
resolvedFrom,
|
|
profileConfig: profile ? {
|
|
required_links: profile.required_links,
|
|
min_slots_filled_for_gap_findings: profile.min_slots_filled_for_gap_findings,
|
|
min_score_for_gap_findings: profile.min_score_for_gap_findings,
|
|
} : undefined,
|
|
};
|
|
|
|
try {
|
|
const { matchTemplates } = await import("./templateMatching");
|
|
const allTemplateMatches = await matchTemplates(
|
|
app,
|
|
{ file: context.file, heading: context.heading },
|
|
allEdges,
|
|
chainTemplates,
|
|
chainRoles,
|
|
edgeVocabulary,
|
|
options,
|
|
profile
|
|
);
|
|
|
|
// Sort all matches: confidence rank (confirmed > plausible > weak), then score desc, then templateName asc
|
|
const confidenceRank = (c: "confirmed" | "plausible" | "weak"): number => {
|
|
if (c === "confirmed") return 3;
|
|
if (c === "plausible") return 2;
|
|
return 1; // weak
|
|
};
|
|
|
|
const sortedMatches = [...allTemplateMatches].sort((a, b) => {
|
|
// First by confidence rank (desc)
|
|
const rankDiff = confidenceRank(b.confidence) - confidenceRank(a.confidence);
|
|
if (rankDiff !== 0) return rankDiff;
|
|
|
|
// Then by score (desc)
|
|
if (b.score !== a.score) return b.score - a.score;
|
|
|
|
// Finally by templateName (asc)
|
|
return a.templateName.localeCompare(b.templateName);
|
|
});
|
|
|
|
// Limit to topN per template when maxTemplateMatches is set; undefined = no limit (e.g. Chain Workbench)
|
|
const topN = options.maxTemplateMatches !== undefined ? options.maxTemplateMatches : Number.MAX_SAFE_INTEGER;
|
|
const byTemplate = new Map<string, typeof allTemplateMatches>();
|
|
for (const m of sortedMatches) {
|
|
const list = byTemplate.get(m.templateName) ?? [];
|
|
list.push(m);
|
|
byTemplate.set(m.templateName, list);
|
|
}
|
|
templateMatches = [];
|
|
for (const list of byTemplate.values()) {
|
|
templateMatches.push(...(topN < Number.MAX_SAFE_INTEGER ? list.slice(0, topN) : list));
|
|
}
|
|
templateMatches.sort((a, b) => {
|
|
const rankDiff = confidenceRank(b.confidence) - confidenceRank(a.confidence);
|
|
if (rankDiff !== 0) return rankDiff;
|
|
if (b.score !== a.score) return b.score - a.score;
|
|
return a.templateName.localeCompare(b.templateName);
|
|
});
|
|
|
|
// Store topNUsed in analysisMeta
|
|
if (analysisMeta) {
|
|
analysisMeta.topNUsed = topN;
|
|
}
|
|
|
|
// Add template-based findings with profile thresholds
|
|
for (const match of templateMatches) {
|
|
// Find the template definition to check for template-level required_links override
|
|
const templateDef = chainTemplates?.templates?.find(t => t.name === match.templateName);
|
|
|
|
// Determine effective required_links: template.matching > profile > defaults.matching > false
|
|
const effectiveRequiredLinks = templateDef?.matching?.required_links ??
|
|
profile?.required_links ??
|
|
chainTemplates?.defaults?.matching?.required_links ??
|
|
false;
|
|
|
|
// missing_slot_<slotId> findings (with profile thresholds)
|
|
if (match.missingSlots.length > 0) {
|
|
const slotsFilled = Object.keys(match.slotAssignments).length;
|
|
const minSlotsFilled = profile?.min_slots_filled_for_gap_findings ?? 2;
|
|
const minScore = profile?.min_score_for_gap_findings ?? 0;
|
|
|
|
// Only emit if thresholds are met
|
|
if (slotsFilled >= minSlotsFilled && match.score >= minScore) {
|
|
for (const slotId of match.missingSlots) {
|
|
findings.push({
|
|
code: `missing_slot_${slotId}`,
|
|
severity: applySeverityPolicy(profileName as "discovery" | "decisioning" | undefined, `missing_slot_${slotId}`, "warn"),
|
|
message: `Template ${match.templateName}: missing slot ${slotId} near current section`,
|
|
evidence: {
|
|
file: context.file,
|
|
sectionHeading: context.heading,
|
|
},
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
// missing_link_constraints finding
|
|
// Only emit if required_links=true (strict mode)
|
|
// In soft mode (required_links=false), suppress this finding even if links are incomplete
|
|
if (effectiveRequiredLinks && match.slotsComplete && match.requiredLinks > 0 && !match.linksComplete) {
|
|
findings.push({
|
|
code: "missing_link_constraints",
|
|
severity: applySeverityPolicy(profileName as "discovery" | "decisioning" | undefined, "missing_link_constraints", "info"),
|
|
message: `Template ${match.templateName}: slots complete but link constraints missing (${match.satisfiedLinks}/${match.requiredLinks} satisfied)`,
|
|
evidence: {
|
|
file: context.file,
|
|
sectionHeading: context.heading,
|
|
},
|
|
});
|
|
}
|
|
|
|
// weak_chain_roles finding
|
|
if (match.roleEvidence && match.roleEvidence.length > 0) {
|
|
const hasCausalRole = match.roleEvidence.some((ev) =>
|
|
CAUSAL_ROLE_NAMES.includes(ev.edgeRole)
|
|
);
|
|
if (!hasCausalRole && match.satisfiedLinks > 0) {
|
|
findings.push({
|
|
code: "weak_chain_roles",
|
|
severity: applySeverityPolicy(profileName as "discovery" | "decisioning" | undefined, "weak_chain_roles", "info"),
|
|
message: `Template ${match.templateName}: links satisfied but only by non-causal roles`,
|
|
evidence: {
|
|
file: context.file,
|
|
sectionHeading: context.heading,
|
|
},
|
|
});
|
|
}
|
|
}
|
|
}
|
|
} catch (e) {
|
|
getChainInspectorLogger().error("Template matching failed:", e);
|
|
}
|
|
}
|
|
|
|
// Apply severity policy to all findings
|
|
findings = findings.map((finding) => ({
|
|
...finding,
|
|
severity: applySeverityPolicy(profileName as "discovery" | "decisioning" | undefined, finding.code, finding.severity),
|
|
}));
|
|
|
|
return {
|
|
context: {
|
|
file: context.file,
|
|
heading: context.heading,
|
|
zoneKind: context.zoneKind,
|
|
},
|
|
settings: options,
|
|
neighbors,
|
|
paths,
|
|
findings,
|
|
analysisMeta,
|
|
templateMatches: templateMatches.length > 0 ? templateMatches : undefined,
|
|
templatesSource,
|
|
templateMatchingProfileUsed,
|
|
};
|
|
}
|