- Introduced commands for exporting graph data and displaying chains from the current note. - Enhanced linting functionality with options for showing canonical hints and specifying chain traversal direction. - Added new utility functions for graph traversal and index building. - Updated settings interface to include new options for user configuration.
145 lines
4.3 KiB
TypeScript
145 lines
4.3 KiB
TypeScript
import type { App, TFile } from "obsidian";
|
|
import type { Vocabulary } from "../vocab/Vocabulary";
|
|
import type { ParsedEdge } from "../parser/types";
|
|
import type { GraphBuildResult, NodeMeta, EdgeRecord } from "./types";
|
|
import { parseEdgesFromCallouts } from "../parser/parseEdgesFromCallouts";
|
|
import { extractFrontmatterId } from "../parser/parseFrontmatter";
|
|
import { normalizeTargetToBasename } from "./resolveTarget";
|
|
|
|
/**
|
|
* Build graph from vault markdown files.
|
|
* Uses frontmatter.id as the primary node identifier.
|
|
*/
|
|
export async function buildGraph(
|
|
app: App,
|
|
vocabulary: Vocabulary
|
|
): Promise<GraphBuildResult> {
|
|
const filePathToId = new Map<string, string>();
|
|
const basenameLowerToPath = new Map<string, string>();
|
|
const idToMeta = new Map<string, NodeMeta>();
|
|
const edges: EdgeRecord[] = [];
|
|
const warnings: GraphBuildResult["warnings"] = {
|
|
missingFrontmatterId: [],
|
|
missingTargetFile: [],
|
|
missingTargetId: [],
|
|
};
|
|
|
|
// Get all markdown files
|
|
const markdownFiles = app.vault.getMarkdownFiles();
|
|
|
|
// First pass: build node maps
|
|
for (const file of markdownFiles) {
|
|
try {
|
|
const content = await app.vault.read(file);
|
|
const id = extractFrontmatterId(content);
|
|
|
|
// Always add to basenameLowerToPath for target resolution
|
|
const basenameLower = file.basename.toLowerCase();
|
|
basenameLowerToPath.set(basenameLower, file.path);
|
|
|
|
if (!id) {
|
|
warnings.missingFrontmatterId.push(file.path);
|
|
continue; // Skip files without ID (don't add to filePathToId or idToMeta)
|
|
}
|
|
|
|
// Extract optional title from frontmatter (simple extraction)
|
|
let title: string | undefined;
|
|
const titleMatch = content.match(/^title\s*:\s*(.+)$/m);
|
|
if (titleMatch && titleMatch[1]) {
|
|
let titleValue = titleMatch[1].trim();
|
|
if ((titleValue.startsWith('"') && titleValue.endsWith('"')) ||
|
|
(titleValue.startsWith("'") && titleValue.endsWith("'"))) {
|
|
titleValue = titleValue.slice(1, -1);
|
|
}
|
|
title = titleValue;
|
|
}
|
|
|
|
// Populate maps
|
|
filePathToId.set(file.path, id);
|
|
|
|
const meta: NodeMeta = {
|
|
id,
|
|
path: file.path,
|
|
basename: file.basename,
|
|
title,
|
|
};
|
|
idToMeta.set(id, meta);
|
|
} catch (error) {
|
|
console.error(`Error processing file ${file.path}:`, error);
|
|
// Continue with other files
|
|
}
|
|
}
|
|
|
|
// Second pass: build edges
|
|
for (const file of markdownFiles) {
|
|
try {
|
|
const content = await app.vault.read(file);
|
|
const srcId = filePathToId.get(file.path);
|
|
|
|
if (!srcId) {
|
|
// File has no ID, skip edge processing
|
|
continue;
|
|
}
|
|
|
|
const parsedEdges = parseEdgesFromCallouts(content);
|
|
|
|
for (const parsedEdge of parsedEdges) {
|
|
const normalized = vocabulary.normalize(parsedEdge.rawType);
|
|
|
|
for (const target of parsedEdge.targets) {
|
|
if (!target) continue;
|
|
|
|
// Normalize target to basename
|
|
const resolvedBase = normalizeTargetToBasename(target);
|
|
const targetPath = basenameLowerToPath.get(resolvedBase.toLowerCase());
|
|
|
|
if (!targetPath) {
|
|
warnings.missingTargetFile.push({
|
|
srcPath: file.path,
|
|
target: target,
|
|
});
|
|
continue;
|
|
}
|
|
|
|
// Check if target file has an ID
|
|
const dstId = filePathToId.get(targetPath);
|
|
if (!dstId) {
|
|
// File exists but has no frontmatter ID
|
|
warnings.missingTargetId.push({
|
|
srcPath: file.path,
|
|
targetPath: targetPath,
|
|
});
|
|
continue;
|
|
}
|
|
|
|
// Create edge record
|
|
const edge: EdgeRecord = {
|
|
srcId,
|
|
dstId,
|
|
rawType: parsedEdge.rawType,
|
|
canonicalType: normalized.canonical,
|
|
inverseType: normalized.inverse,
|
|
srcPath: file.path,
|
|
dstPath: targetPath,
|
|
lineStart: parsedEdge.lineStart,
|
|
lineEnd: parsedEdge.lineEnd,
|
|
rawTarget: target,
|
|
};
|
|
edges.push(edge);
|
|
}
|
|
}
|
|
} catch (error) {
|
|
console.error(`Error processing edges for file ${file.path}:`, error);
|
|
// Continue with other files
|
|
}
|
|
}
|
|
|
|
return {
|
|
filePathToId,
|
|
basenameLowerToPath,
|
|
idToMeta,
|
|
edges,
|
|
warnings,
|
|
};
|
|
}
|