Added mermaid language support

This commit is contained in:
2025-11-04 22:58:36 +08:00
parent 689b0d5d14
commit e9b6fef3cd
87 changed files with 4359 additions and 27 deletions

View File

@@ -10,6 +10,14 @@
// @ts-ignore: Unused imports
import {Call as $Call, Create as $Create} from "@wailsio/runtime";
/**
* AutoShowHide 自动显示/隐藏主窗口
*/
export function AutoShowHide(): Promise<void> & { cancel(): void } {
let $resultPromise = $Call.ByID(4044219428) as any;
return $resultPromise;
}
/**
* HandleWindowClose 处理窗口关闭事件
*/

View File

@@ -11,10 +11,15 @@
"lint": "eslint",
"lint:fix": "eslint --fix",
"build:lang-parser": "node src/views/editor/extensions/codeblock/lang-parser/build-parser.js",
"build:mermaid-parser": "node src/views/editor/language/mermaid/build-parsers.js",
"test": "vitest",
"docs:dev": "vitepress dev docs",
"docs:build": "vitepress build docs",
"docs:preview": "vitepress preview docs"
"docs:preview": "vitepress preview docs",
"app:dev": "cd .. &&wails3 dev",
"app:build": "cd .. && wails3 task build",
"app:package": "cd .. && wails3 package",
"app:generate": "cd .. && wails3 generate bindings -ts"
},
"dependencies": {
"@codemirror/autocomplete": "^6.19.1",

View File

@@ -18,7 +18,7 @@ BlockLanguage {
"go" | "clj" | "ex" | "erl" | "js" | "ts" | "swift" | "kt" | "groovy" |
"ps1" | "dart" | "scala" | "math" | "dockerfile" | "lua" | "vue" | "lezer" |
"liquid" | "wast" | "sass" | "less" | "angular" | "svelte" |
"http"
"http" | "mermaid"
}
@tokens {

View File

@@ -24,7 +24,7 @@ import {lessLanguage} from "@codemirror/lang-less";
import {angularLanguage} from "@codemirror/lang-angular";
import { svelteLanguage } from "@replit/codemirror-lang-svelte";
import { httpLanguage } from "@/views/editor/extensions/httpclient/language/http-language";
import { mermaidLanguage } from '@/views/editor/language/mermaid';
import {StreamLanguage} from "@codemirror/language";
import {ruby} from "@codemirror/legacy-modes/mode/ruby";
import {shell} from "@codemirror/legacy-modes/mode/shell";
@@ -226,6 +226,7 @@ export const LANGUAGES: LanguageInfo[] = [
}
}),
new LanguageInfo("http", "Http", httpLanguage.parser, ["http"]),
new LanguageInfo("mermaid", "Mermaid", mermaidLanguage.parser, ["mermaid"]),
];

View File

@@ -3,14 +3,14 @@ import {LRParser} from "@lezer/lr"
import {blockContent} from "./external-tokens.js"
export const parser = LRParser.deserialize({
version: 14,
states: "!jQQOQOOOVOQO'#C`O#xOPO'#C_OOOO'#Cc'#CcQQOQOOOOOO'#Ca'#CaO#}OSO,58zOOOO,58y,58yOOOO-E6a-E6aOOOP1G.f1G.fO$VOSO1G.fOOOP7+$Q7+$Q",
stateData: "$[~OXPO~OYTOZTO[TO]TO^TO_TO`TOaTObTOcTOdTOeTOfTOgTOhTOiTOjTOkTOlTOmTOnTOoTOpTOqTOrTOsTOtTOuTOvTOwTOxTOyTOzTO{TO|TO}TO!OTO!PTO!QTO!RTO!STO~OPVO~OUYO!TXO~O!TZO~O",
states: "!jQQOQOOOVOQO'#C`O#{OPO'#C_OOOO'#Cc'#CcQQOQOOOOOO'#Ca'#CaO$QOSO,58zOOOO,58y,58yOOOO-E6a-E6aOOOP1G.f1G.fO$YOSO1G.fOOOP7+$Q7+$Q",
stateData: "$_~OXPO~OYTOZTO[TO]TO^TO_TO`TOaTObTOcTOdTOeTOfTOgTOhTOiTOjTOkTOlTOmTOnTOoTOpTOqTOrTOsTOtTOuTOvTOwTOxTOyTOzTO{TO|TO}TO!OTO!PTO!QTO!RTO!STO!TTO~OPVO~OUYO!UXO~O!UZO~O",
goto: "jWPPPX]aPdTROSTQOSRUPQSORWS",
nodeNames: "⚠ BlockContent Document Block BlockDelimiter BlockLanguage Auto",
maxTerm: 51,
maxTerm: 52,
skippedNodes: [0],
repeatNodeCount: 1,
tokenData: "3u~RdYZ!a}!O!z#T#U#V#V#W$Q#W#X%R#X#Y&t#Z#['_#[#]([#^#_)R#_#`*Q#`#a*]#a#b,Y#d#e,y#f#g-r#g#h.V#h#i0|#j#k2R#k#l2d#l#m2{#m#n3^R!fP!TQ%&x%&y!iP!lP%&x%&y!oP!rP%&x%&y!uP!zOXP~!}P#T#U#Q~#VOU~~#YP#b#c#]~#`P#Z#[#c~#fP#i#j#i~#lP#`#a#o~#rP#T#U#u~#xP#f#g#{~$QO!Q~~$TR#`#a$^#d#e$i#g#h$t~$aP#^#_$d~$iOl~~$lP#d#e$o~$tOd~~$yPf~#g#h$|~%ROb~~%UQ#T#U%[#c#d%m~%_P#f#g%b~%eP#h#i%h~%mOu~~%pP#V#W%s~%vP#_#`%y~%|P#X#Y&P~&SP#f#g&V~&YP#Y#Z&]~&`P#]#^&c~&fP#`#a&i~&lP#X#Y&o~&tOx~~&wQ#f#g&}#l#m'Y~'QP#`#a'T~'YOn~~'_Om~~'bQ#c#d'h#f#g'm~'mOk~~'pP#c#d's~'vP#c#d'y~'|P#j#k(P~(SP#m#n(V~([Os~~(_P#h#i(b~(eQ#a#b(k#h#i(v~(nP#`#a(q~(vO]~~(yP#d#e(|~)RO!S~~)UQ#T#U)[#g#h)m~)_P#j#k)b~)eP#T#U)h~)mO`~~)rPo~#c#d)u~)xP#b#c){~*QOZ~~*TP#h#i*W~*]Or~~*`R#X#Y*i#]#^+`#i#j+}~*lQ#g#h*r#n#o*}~*uP#g#h*x~*}O!P~~+QP#X#Y+T~+WP#f#g+Z~+`O{~~+cP#e#f+f~+iP#i#j+l~+oP#]#^+r~+uP#W#X+x~+}O|~~,QP#T#U,T~,YOy~~,]Q#T#U,c#W#X,t~,fP#h#i,i~,lP#[#],o~,tOw~~,yO_~~,|R#[#]-V#g#h-b#m#n-m~-YP#d#e-]~-bOa~~-eP!R!S-h~-mOt~~-rO[~~-uQ#U#V-{#g#h.Q~.QOg~~.VOe~~.YU#T#U.l#V#W.}#[#]/f#e#f/k#j#k/v#k#l0e~.oP#g#h.r~.uP#g#h.x~.}O!O~~/QP#T#U/T~/WP#`#a/Z~/^P#T#U/a~/fOv~~/kOh~~/nP#`#a/q~/vO^~~/yP#X#Y/|~0PP#`#a0S~0VP#h#i0Y~0]P#X#Y0`~0eO!R~~0hP#]#^0k~0nP#Y#Z0q~0tP#h#i0w~0|Oq~~1PR#X#Y1Y#c#d1k#g#h1|~1]P#l#m1`~1cP#h#i1f~1kOY~~1nP#a#b1q~1tP#`#a1w~1|Oj~~2ROp~~2UP#i#j2X~2[P#X#Y2_~2dOz~~2gP#T#U2j~2mP#g#h2p~2sP#h#i2v~2{O}~~3OP#a#b3R~3UP#`#a3X~3^Oc~~3aP#T#U3d~3gP#a#b3j~3mP#`#a3p~3uOi~",
tokenData: "4m~RdYZ!a}!O!z#T#U#V#V#W$Q#W#X%R#X#Y&t#Z#['_#[#]([#^#_)R#_#`*Q#`#a*]#a#b,Y#d#e-q#f#g.j#g#h.}#h#i1t#j#k2y#k#l3[#l#m3s#m#n4UR!fP!UQ%&x%&y!iP!lP%&x%&y!oP!rP%&x%&y!uP!zOXP~!}P#T#U#Q~#VOU~~#YP#b#c#]~#`P#Z#[#c~#fP#i#j#i~#lP#`#a#o~#rP#T#U#u~#xP#f#g#{~$QO!Q~~$TR#`#a$^#d#e$i#g#h$t~$aP#^#_$d~$iOl~~$lP#d#e$o~$tOd~~$yPf~#g#h$|~%ROb~~%UQ#T#U%[#c#d%m~%_P#f#g%b~%eP#h#i%h~%mOu~~%pP#V#W%s~%vP#_#`%y~%|P#X#Y&P~&SP#f#g&V~&YP#Y#Z&]~&`P#]#^&c~&fP#`#a&i~&lP#X#Y&o~&tOx~~&wQ#f#g&}#l#m'Y~'QP#`#a'T~'YOn~~'_Om~~'bQ#c#d'h#f#g'm~'mOk~~'pP#c#d's~'vP#c#d'y~'|P#j#k(P~(SP#m#n(V~([Os~~(_P#h#i(b~(eQ#a#b(k#h#i(v~(nP#`#a(q~(vO]~~(yP#d#e(|~)RO!S~~)UQ#T#U)[#g#h)m~)_P#j#k)b~)eP#T#U)h~)mO`~~)rPo~#c#d)u~)xP#b#c){~*QOZ~~*TP#h#i*W~*]Or~~*`R#X#Y*i#]#^+`#i#j+}~*lQ#g#h*r#n#o*}~*uP#g#h*x~*}O!P~~+QP#X#Y+T~+WP#f#g+Z~+`O{~~+cP#e#f+f~+iP#i#j+l~+oP#]#^+r~+uP#W#X+x~+}O|~~,QP#T#U,T~,YOy~~,]R#T#U,f#W#X,w#X#Y,|~,iP#h#i,l~,oP#[#],r~,wOw~~,|O_~~-PP#f#g-S~-VP#a#b-Y~-]P#T#U-`~-cP#]#^-f~-iP#W#X-l~-qO!T~~-tR#[#]-}#g#h.Y#m#n.e~.QP#d#e.T~.YOa~~.]P!R!S.`~.eOt~~.jO[~~.mQ#U#V.s#g#h.x~.xOg~~.}Oe~~/QU#T#U/d#V#W/u#[#]0^#e#f0c#j#k0n#k#l1]~/gP#g#h/j~/mP#g#h/p~/uO!O~~/xP#T#U/{~0OP#`#a0R~0UP#T#U0X~0^Ov~~0cOh~~0fP#`#a0i~0nO^~~0qP#X#Y0t~0wP#`#a0z~0}P#h#i1Q~1TP#X#Y1W~1]O!R~~1`P#]#^1c~1fP#Y#Z1i~1lP#h#i1o~1tOq~~1wR#X#Y2Q#c#d2c#g#h2t~2TP#l#m2W~2ZP#h#i2^~2cOY~~2fP#a#b2i~2lP#`#a2o~2tOj~~2yOp~~2|P#i#j3P~3SP#X#Y3V~3[Oz~~3_P#T#U3b~3eP#g#h3h~3kP#h#i3n~3sO}~~3vP#a#b3y~3|P#`#a4P~4UOc~~4XP#T#U4[~4_P#a#b4b~4eP#`#a4h~4mOi~",
tokenizers: [blockContent, 0, 1],
topRules: {"Document":[0,2]},
tokenPrec: 0

View File

@@ -66,6 +66,7 @@ export type SupportedLanguage =
| 'angular'
| 'svelte'
| 'http' // HTTP Client
| 'mermaid'
/**
* 创建块的选项
@@ -85,7 +86,6 @@ export interface EditorOptions {
}
// 分隔符格式常量
export const DELIMITER_REGEX = /^\n∞∞∞([a-zA-Z0-9_-]+)(-a)?\n/gm;
export const DELIMITER_PREFIX = '\n∞∞∞';

View File

@@ -0,0 +1,57 @@
import { buildParserFile } from '@lezer/generator';
import { fileURLToPath } from 'url';
import { dirname, join } from 'path';
import { readFileSync, writeFileSync } from 'fs';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const parsersDir = join(__dirname, 'parsers');
const parserTypes = [
'mermaid',
'mindmap',
'pie',
'flowchart',
'sequence',
'journey',
'requirement',
'gantt'
];
console.log('开始构建 Mermaid 语法解析器...\n');
for (const type of parserTypes) {
try {
const grammarPath = join(parsersDir, type, `${type}.grammar`);
const outputPath = join(parsersDir, type, `${type}.parser.grammar.ts`);
console.log(`正在处理: ${type}`);
console.log(` 读取: ${grammarPath}`);
const grammar = readFileSync(grammarPath, 'utf-8');
const result = buildParserFile(grammar, {
fileName: `${type}.grammar`,
typeScript: true,
warn: (message) => console.warn(` 警告: ${message}`)
});
writeFileSync(outputPath, result.parser);
console.log(` ✓ 生成: ${outputPath}`);
// 生成 terms 文件
if (result.terms) {
const termsPath = join(parsersDir, type, `${type}.grammar.terms.ts`);
writeFileSync(termsPath, result.terms);
console.log(` ✓ 生成: ${termsPath}`);
}
console.log('');
} catch (error) {
console.error(` ✗ 错误: ${type} - ${error.message}\n`);
process.exit(1);
}
}
console.log('✓ 所有解析器构建完成!');

View File

@@ -0,0 +1,62 @@
import { foldService } from '@codemirror/language';
const countLeadingSpaces = (str: string) => {
let count = 0;
for (let i = 0; i < str.length; i++) {
if (str[i] === ' ') {
count++;
} else if (str[i] === '\t') {
count += 4;
} else {
break;
}
}
return count;
};
const isEmptyLine = (text: string) => {
return /^[ \t]*$/.test(text);
};
export const foldByIndent = () => {
return foldService.of((state, lineStart, lineEnd) => {
const line = state.doc.lineAt(lineStart);
const lineCount = state.doc.lines;
let indents = countLeadingSpaces(line.text);
let foldStart = lineStart;
let foldEnd = lineEnd;
let nextLine = line;
while (nextLine.number < lineCount) {
nextLine = state.doc.line(nextLine.number + 1);
if (nextLine.text === '' || isEmptyLine(nextLine.text)) continue;
let nextIndents = countLeadingSpaces(nextLine.text);
if (nextIndents > indents && !isEmptyLine(nextLine.text)) {
foldEnd = nextLine.to;
} else {
break;
}
}
if (
state.doc.lineAt(foldStart).number === state.doc.lineAt(foldEnd).number
) {
return null;
}
foldStart = line.to;
const lineAtFoldStart = state.doc.lineAt(foldStart);
if (lineAtFoldStart.text === '' || isEmptyLine(lineAtFoldStart.text)) {
return null;
}
return { from: foldStart, to: foldEnd };
});
};

View File

@@ -0,0 +1,45 @@
export {
mermaidLanguage,
mindmapLanguage,
pieLanguage,
flowchartLanguage,
sequenceLanguage,
journeyLanguage,
requirementLanguage,
ganttLanguage,
} from './language-definitions';
export {
mermaidLanguageDescription,
mindmapLanguageDescription,
pieLanguageDescription,
flowchartLanguageDescription,
sequenceLanguageDescription,
journeyLanguageDescription,
requirementLanguageDescription,
ganttLanguageDescription,
} from './language-descriptions';
export {
mermaid,
mindmap,
pie,
flowchart,
sequence,
journey,
requirement,
gantt,
} from './language-support';
export {
mermaidTags,
mindmapTags,
pieTags,
flowchartTags,
sequenceTags,
journeyTags,
requirementTags,
ganttTags,
} from './tags';
export { foldByIndent } from './extensions';

View File

@@ -0,0 +1,74 @@
import { LRLanguage } from '@codemirror/language';
import { parseMixed } from '@lezer/common';
import {
mermaidParser,
mindmapParser,
pieParser,
flowchartParser,
sequenceParser,
journeyParser,
requirementParser,
ganttParser,
} from '../parsers';
import { DiagramType, MermaidLanguageType } from '../types';
export const mermaidLanguage = LRLanguage.define({
name: MermaidLanguageType.Mermaid,
parser: mermaidParser.configure({
wrap: parseMixed((node) => {
switch (node.name) {
case DiagramType.Mindmap:
return { parser: mindmapParser };
case DiagramType.Pie:
return { parser: pieParser };
case DiagramType.Flowchart:
return { parser: flowchartParser };
case DiagramType.Sequence:
return { parser: sequenceParser };
case DiagramType.Journey:
return { parser: journeyParser };
case DiagramType.Requirement:
return { parser: requirementParser };
case DiagramType.Gantt:
return { parser: ganttParser };
default:
return null;
}
}),
}),
});
export const mindmapLanguage = LRLanguage.define({
name: MermaidLanguageType.Mindmap,
parser: mindmapParser,
});
export const pieLanguage = LRLanguage.define({
name: MermaidLanguageType.Pie,
parser: pieParser,
});
export const flowchartLanguage = LRLanguage.define({
name: MermaidLanguageType.Flowchart,
parser: flowchartParser,
});
export const sequenceLanguage = LRLanguage.define({
name: MermaidLanguageType.Sequence,
parser: sequenceParser,
});
export const journeyLanguage = LRLanguage.define({
name: MermaidLanguageType.Journey,
parser: journeyParser,
});
export const requirementLanguage = LRLanguage.define({
name: MermaidLanguageType.Requirement,
parser: requirementParser,
});
export const ganttLanguage = LRLanguage.define({
name: MermaidLanguageType.Gantt,
parser: ganttParser,
});

View File

@@ -0,0 +1,71 @@
import { LanguageDescription } from '@codemirror/language';
import {
mermaid,
mindmap,
pie,
flowchart,
sequence,
journey,
requirement,
gantt,
} from '../language-support';
import { MermaidDescriptionName, MermaidAlias } from '../types';
export const mermaidLanguageDescription = LanguageDescription.of({
name: MermaidDescriptionName.Mermaid,
load: async () => {
return mermaid();
},
});
export const mindmapLanguageDescription = LanguageDescription.of({
name: MermaidDescriptionName.Mindmap,
load: async () => {
return mindmap();
},
});
export const pieLanguageDescription = LanguageDescription.of({
name: MermaidDescriptionName.Pie,
load: async () => {
return pie();
},
});
export const flowchartLanguageDescription = LanguageDescription.of({
name: MermaidDescriptionName.Flowchart,
alias: [MermaidAlias.Graph],
load: async () => {
return flowchart();
},
});
export const sequenceLanguageDescription = LanguageDescription.of({
name: MermaidDescriptionName.Sequence,
alias: [MermaidAlias.Sequence],
load: async () => {
return sequence();
},
});
export const journeyLanguageDescription = LanguageDescription.of({
name: MermaidDescriptionName.Journey,
load: async () => {
return journey();
},
});
export const requirementLanguageDescription = LanguageDescription.of({
name: MermaidDescriptionName.Requirement,
alias: [MermaidAlias.Requirement],
load: async () => {
return requirement();
},
});
export const ganttLanguageDescription = LanguageDescription.of({
name: MermaidDescriptionName.Gantt,
load: async () => {
return gantt();
},
});

View File

@@ -0,0 +1,43 @@
import { LanguageSupport } from '@codemirror/language';
import {
mermaidLanguage,
mindmapLanguage,
pieLanguage,
flowchartLanguage,
sequenceLanguage,
journeyLanguage,
requirementLanguage,
ganttLanguage,
} from '../language-definitions';
export function mermaid() {
return new LanguageSupport(mermaidLanguage);
}
export function mindmap() {
return new LanguageSupport(mindmapLanguage);
}
export function pie() {
return new LanguageSupport(pieLanguage);
}
export function flowchart() {
return new LanguageSupport(flowchartLanguage);
}
export function sequence() {
return new LanguageSupport(sequenceLanguage);
}
export function journey() {
return new LanguageSupport(journeyLanguage);
}
export function requirement() {
return new LanguageSupport(requirementLanguage);
}
export function gantt() {
return new LanguageSupport(ganttLanguage);
}

View File

@@ -0,0 +1,170 @@
@top FlowchartDiagram { document+ }
@skip { spaces | LineComment }
@skip {} {
String {
singleQuote (stringContentSingle)* (singleQuote) |
doubleQuote (stringContentDouble)* (doubleQuote) |
backTick (stringContentBackTick)* (backTick)
}
}
document {
(
DiagramName |
DiagramName Orientation |
DiagramName Orientation newlines* subDocument (newlines* subDocument semicolon?)*
) newlines*
}
subDocument {
NodeId |
Node |
Link |
NodeEdge |
ampersand |
Keyword |
emptyParentheses |
colon |
tripleColon |
String |
StyleKeyword NodeId StyleText
}
NodeId {
identifier | Orientation
}
text {
NodeText | String
}
edgeText {
NodeEdgeText | String
}
emptyParentheses { "()" }
Node {
"(" text ")" |
"[" text "]" |
"|" text "|" |
"([" text "])" |
"[(" text "])" |
"[[" text "]]" |
"[(" text ")]" |
"((" text "))" |
">" text "]" |
"{" text "}" |
"{{" text "}}" |
"(((" text ")))"
}
NodeEdge {
(DoubleHyphen | DoubleEqual) edgeText Link
}
Link {
linkType1 |
linkType2 |
linkType3 |
linkType4 |
linkType5 |
linkType6
}
DiagramName {
kw<"flowchart"> |
kw<"graph">
}
Orientation {
kw<"TB"> |
kw<"TD"> |
kw<"BT"> |
kw<"RL"> |
kw<"LR">
}
StyleKeyword {
kw<"style"> |
kw<"linkStyle"> |
kw<"class"> |
kw<"classDef">
}
Keyword {
kw<"subgraph"> |
kw<"end"> |
kw<"direction"> |
kw<"click"> |
kw<"call"> |
kw<"href"> |
kw<"_self"> |
kw<"_blank"> |
kw<"_parent"> |
kw<"_to">
}
kw<term> { @specialize<identifier, term> }
@external tokens nodeEdgeText from "./tokens" { NodeEdgeText }
@external tokens nodeText from "./tokens" { NodeText }
@external tokens styleText from "./tokens" { StyleText }
/*
Single character tokens will need to go inside the @tokens rule to specify precedence over "char".
Longer tokens always beat shorter tokens, which is why "flowchart" takes priority over multiple "char" tokens.
The @specialize rule also helps makes "DiagramName" have higher priority for "flowchart" even though it overlaps with "char" and "identifier" tokens
*/
@tokens {
char { @asciiLetter | @digit | $[!"\#$%&'*+\.`?\\_\/\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC] }
identifier { char+ }
newlines { $[\n]+ }
spaces { @whitespace+ }
stringContentSingle { ![']+ }
stringContentDouble { !["]+ }
stringContentBackTick { ![`]+ }
LineComment { "%%" ![\n]* }
DoubleHyphen { "--" }
DoubleEqual { "==" }
linkType1 { ("<-" | "x-" | "o-") ("->" | "-x" | "-o")}
linkType2 { ("<=" | "x=" | "o=") ("=>" | "=x" | "=o")}
linkType3 { ("<-" | "x-" | "o-")? "-"+ ("->" | "-x" | "-o")?}
linkType4 { ("<=" | "x=" | "o=")? "="+ ("=>" | "=x" | "=o")?}
linkType5 { ("<-" | "x-" | "o-" | "-")? "."+ ("->" | "-x" | "-o" | "-")?}
linkType6 { "~~~" | "---" | "===" }
ampersand { "&" }
semicolon { ";" }
singleQuote { "'" }
doubleQuote { '"' }
backTick { "`" }
tripleColon[@name=":::"] { ":::" }
colon[@name=":"] { ":" }
@precedence {
newlines,
spaces,
LineComment,
linkType1,
linkType2,
linkType3,
linkType4,
linkType5,
linkType6,
DoubleHyphen,
DoubleEqual,
ampersand,
semicolon,
singleQuote,
doubleQuote,
backTick,
tripleColon,
colon,
identifier
}
}
@external propSource flowchartHighlighting from "./highlight"

View File

@@ -0,0 +1,3 @@
export declare const NodeText: number;
export declare const NodeEdgeText: number;
export declare const StyleText: number;

View File

@@ -0,0 +1,20 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
export const
NodeEdgeText = 1,
NodeText = 2,
StyleText = 3,
LineComment = 4,
FlowchartDiagram = 5,
DiagramName = 6,
Orientation = 7,
NodeId = 8,
Node = 9,
String = 10,
Link = 11,
NodeEdge = 12,
DoubleHyphen = 13,
DoubleEqual = 14,
Keyword = 15,
colon = 16,
tripleColon = 17,
StyleKeyword = 18

View File

@@ -0,0 +1,217 @@
import { describe, it, expect } from 'vitest';
import { parser } from './flowchart.parser.grammar';
/**
* Flowchart Grammar 测试
*
* 测试目标:验证标准的 Mermaid Flowchart 语法是否能正确解析,不应该出现错误节点(⚠)
*/
describe('Flowchart Grammar 解析测试', () => {
/**
* 辅助函数:解析代码并返回语法树
*/
function parseCode(code: string) {
const tree = parser.parse(code);
return tree;
}
/**
* 辅助函数:检查语法树中是否有错误节点
*/
function hasErrorNodes(tree: any): { hasError: boolean; errors: Array<{ name: string; from: number; to: number; text: string }> } {
const errors: Array<{ name: string; from: number; to: number; text: string }> = [];
tree.iterate({
enter: (node: any) => {
if (node.name === '⚠') {
errors.push({
name: node.name,
from: node.from,
to: node.to,
text: tree.toString().substring(node.from, node.to)
});
}
}
});
return {
hasError: errors.length > 0,
errors
};
}
/**
* 辅助函数:打印语法树结构(用于调试)
*/
function printTree(tree: any, code: string, maxDepth = 5) {
const lines: string[] = [];
tree.iterate({
enter: (node: any) => {
const depth = getNodeDepth(tree, node);
if (depth > maxDepth) return false; // 限制深度
const indent = ' '.repeat(depth);
const text = code.substring(node.from, Math.min(node.to, node.from + 30));
const displayText = text.length === 30 ? text + '...' : text;
lines.push(`${indent}${node.name} [${node.from}-${node.to}]: "${displayText.replace(/\n/g, '\\n')}"`);
}
});
return lines.join('\n');
}
/**
* 获取节点深度
*/
function getNodeDepth(tree: any, targetNode: any): number {
let depth = 0;
let current = targetNode;
while (current.parent) {
depth++;
current = current.parent;
}
return depth;
}
it('应该正确解析基础的 flowchart 声明TB 方向)', () => {
const code = `flowchart TB
A
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析基础的 graph 声明LR 方向)', () => {
const code = `graph LR
A
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带方框节点的流程图', () => {
const code = `flowchart TD
A[Christmas]
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带箭头连接的节点', () => {
const code = `flowchart TD
A --> B
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带标签的连接线', () => {
const code = `flowchart TD
A -- text --> B
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析不同形状的节点(圆形)', () => {
const code = `flowchart TD
A((Circle))
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析不同形状的节点(菱形)', () => {
const code = `flowchart TD
A{Diamond}
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析完整的流程图示例', () => {
const code = `flowchart TD
A[Start] --> B{Is it?}
B -->|Yes| C[OK]
B -->|No| D[End]
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
});

View File

@@ -0,0 +1,3 @@
import { LRParser } from '@lezer/lr';
export declare const parser: LRParser;

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,22 @@
import { styleTags, tags as t } from '@lezer/highlight';
import { flowchartTags } from '../../tags';
export const flowchartHighlighting = styleTags({
'( )': t.paren,
'[ ]': t.squareBracket,
'{ }': t.brace,
'<': t.angleBracket,
DiagramName: flowchartTags.diagramName,
DoubleEqual: flowchartTags.link,
DoubleHyphen: flowchartTags.link,
Keyword: flowchartTags.keyword,
LineComment: flowchartTags.lineComment,
Link: flowchartTags.link,
NodeEdge: flowchartTags.nodeEdge,
NodeEdgeText: flowchartTags.nodeEdgeText,
NodeId: flowchartTags.nodeId,
NodeText: flowchartTags.nodeText,
Number: flowchartTags.number,
Orientation: flowchartTags.orientation,
String: flowchartTags.string,
});

View File

@@ -0,0 +1,55 @@
import { ExternalTokenizer } from '@lezer/lr';
import { NodeText, NodeEdgeText, StyleText } from './flowchart.grammar.terms';
const skipCodePoints = [-1, 9, 13, 32, 34, 39, 96];
const startBracketCodePoints = [40, 62, 91, 123, 124];
const endBracketCodePoints = [41, 93, 124, 125];
const hyphen = 45;
const equal = 61;
const dot = 46;
export const nodeText = new ExternalTokenizer((input) => {
if (
skipCodePoints.includes(input.next) ||
startBracketCodePoints.includes(input.next)
)
return;
while (!endBracketCodePoints.includes(input.next) && input.next !== -1) {
input.advance();
}
input.acceptToken(NodeText);
});
export const nodeEdgeText = new ExternalTokenizer((input) => {
if (
skipCodePoints.includes(input.next) ||
startBracketCodePoints.includes(input.next) ||
input.next === hyphen ||
input.next === equal ||
input.next === dot
)
return;
while (
input.next !== hyphen &&
input.next !== equal &&
input.next !== dot &&
input.next !== -1
) {
input.advance();
}
input.acceptToken(NodeEdgeText);
});
export const styleText = new ExternalTokenizer((input) => {
if (input.next === 10 || input.next === -1) return;
while (input.next !== 10 && input.next !== -1) {
input.advance();
}
input.acceptToken(StyleText);
});

View File

@@ -0,0 +1,60 @@
@top GanttDiagram {
document
}
@skip { spaces }
document {
DiagramName newlines? |
DiagramName newlines (subDocument newlines?)+
}
subDocument {
Title ImportantText |
Section ImportantText |
DateFormat Text |
AxisFormat Text |
Excludes Text |
TickInterval Text |
TodayMarker Text |
Weekday Text |
Text |
InclusiveEndDates |
LineComment
}
ImportantText {
text
}
Text {
text
}
DiagramName { kw<"gantt"> }
kw<term> { @specialize<identifier, term> }
@external tokens textToken from "./tokens" {
AxisFormat[group=Keyword],
DateFormat[group=Keyword],
Excludes[group=Keyword],
InclusiveEndDates[group=Keyword],
TickInterval[group=Keyword],
Title[group=Keyword],
TodayMarker[group=Keyword],
Weekday[group=Keyword],
Section,
text
}
@tokens {
identifier { @asciiLetter+ }
spaces { @whitespace+ }
newlines { $[\n]+ }
LineComment { "%%" ![\n]* }
@precedence { newlines, spaces }
}
@external propSource ganttHighlighting from "./highlight"

View File

@@ -0,0 +1,10 @@
export declare const AxisFormat: number;
export declare const DateFormat: number;
export declare const Excludes: number;
export declare const InclusiveEndDates: number;
export declare const Section: number;
export declare const TickInterval: number;
export declare const Title: number;
export declare const TodayMarker: number;
export declare const Weekday: number;
export declare const text: number;

View File

@@ -0,0 +1,17 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
export const
AxisFormat = 1,
DateFormat = 2,
Excludes = 3,
InclusiveEndDates = 4,
TickInterval = 5,
Title = 6,
TodayMarker = 7,
Weekday = 8,
Section = 9,
text = 17,
GanttDiagram = 10,
DiagramName = 11,
ImportantText = 12,
Text = 13,
LineComment = 14

View File

@@ -0,0 +1,273 @@
import { describe, it, expect } from 'vitest';
import { parser } from './gantt.parser.grammar';
/**
* Gantt Diagram Grammar 测试
*
* 测试目标:验证标准的 Mermaid Gantt Diagram 语法是否能正确解析,不应该出现错误节点(⚠)
*/
describe('Gantt Diagram Grammar 解析测试', () => {
/**
* 辅助函数:解析代码并返回语法树
*/
function parseCode(code: string) {
const tree = parser.parse(code);
return tree;
}
/**
* 辅助函数:检查语法树中是否有错误节点
*/
function hasErrorNodes(tree: any): { hasError: boolean; errors: Array<{ name: string; from: number; to: number; text: string }> } {
const errors: Array<{ name: string; from: number; to: number; text: string }> = [];
tree.iterate({
enter: (node: any) => {
if (node.name === '⚠') {
errors.push({
name: node.name,
from: node.from,
to: node.to,
text: tree.toString().substring(node.from, node.to)
});
}
}
});
return {
hasError: errors.length > 0,
errors
};
}
/**
* 辅助函数:打印语法树结构(用于调试)
*/
function printTree(tree: any, code: string, maxDepth = 5) {
const lines: string[] = [];
tree.iterate({
enter: (node: any) => {
const depth = getNodeDepth(tree, node);
if (depth > maxDepth) return false; // 限制深度
const indent = ' '.repeat(depth);
const text = code.substring(node.from, Math.min(node.to, node.from + 30));
const displayText = text.length === 30 ? text + '...' : text;
lines.push(`${indent}${node.name} [${node.from}-${node.to}]: "${displayText.replace(/\n/g, '\\n')}"`);
}
});
return lines.join('\n');
}
/**
* 获取节点深度
*/
function getNodeDepth(tree: any, targetNode: any): number {
let depth = 0;
let current = targetNode;
while (current.parent) {
depth++;
current = current.parent;
}
return depth;
}
it('应该正确解析基础的 gantt 声明', () => {
const code = `gantt
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带标题的 gantt 图', () => {
const code = `gantt
title A Gantt Diagram
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带日期格式的 gantt 图', () => {
const code = `gantt
dateFormat YYYY-MM-DD
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带章节的 gantt 图', () => {
const code = `gantt
dateFormat YYYY-MM-DD
section Section
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带任务的 gantt 图', () => {
const code = `gantt
dateFormat YYYY-MM-DD
title Adding GANTT diagram functionality to mermaid
section A section
Completed task :done, des1, 2014-01-06,2014-01-08
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带活动任务的 gantt 图', () => {
const code = `gantt
dateFormat YYYY-MM-DD
section A section
Active task :active, des2, 2014-01-09, 3d
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带 axisFormat 的 gantt 图', () => {
const code = `gantt
dateFormat YYYY-MM-DD
axisFormat %m-%d
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带 excludes 的 gantt 图', () => {
const code = `gantt
dateFormat YYYY-MM-DD
excludes weekends
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带 todayMarker 的 gantt 图', () => {
const code = `gantt
dateFormat YYYY-MM-DD
todayMarker off
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析完整的 gantt 图示例', () => {
const code = `gantt
dateFormat YYYY-MM-DD
title Adding GANTT diagram functionality to mermaid
excludes weekends
section A section
Completed task :done, des1, 2014-01-06,2014-01-08
Active task :active, des2, 2014-01-09, 3d
Future task : des3, after des2, 5d
Future task2 : des4, after des3, 5d
section Critical tasks
Completed task in the critical line :crit, done, 2014-01-06,24h
Implement parser and jison :crit, done, after des1, 2d
Create tests for parser :crit, active, 3d
Future task in critical line :crit, 5d
Create tests for renderer :2d
Add to mermaid :1d
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
});

View File

@@ -0,0 +1,3 @@
import { LRParser } from '@lezer/lr';
export declare const parser: LRParser;

View File

@@ -0,0 +1,24 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
import {LRParser} from "@lezer/lr"
import {textToken} from "./tokens"
import {ganttHighlighting} from "./highlight"
const spec_identifier = {__proto__:null,gantt:44}
export const parser = LRParser.deserialize({
version: 14,
states: "!|OVQQOOO[QQO'#CpQOQQOOOOQO'#Cg'#CgO!XQRO,59[OOQP'#Ci'#CiO!`QRO'#CtO!SQRO'#CtOOQP'#Ct'#CtO!eQRO'#CkO#`QRO1G.vOOQP'#Ch'#ChOOQP,59`,59`OOQP,59V,59VOOQP-E6i-E6i",
stateData: "#j~OcOS~OfRO~OgSO`dX~OPVOQVORVOSWOTVOUUOVVOWVOXUO^WOaTO~O`da~PdOaZO~Og]OP_XQ_XR_XS_XT_XU_XV_XW_XX_X^_X`_Xa_X~O`di~PdOgc~",
goto: "!UiPPPPPPPPPPPjmpPwPPPP}PPP!QRPOR[USWSYR[VQYSR^YRQOTXSY",
nodeNames: "⚠ AxisFormat DateFormat Excludes InclusiveEndDates TickInterval Title TodayMarker Weekday Section GanttDiagram DiagramName ImportantText Text LineComment",
maxTerm: 24,
nodeProps: [
["group", -8,1,2,3,4,5,6,7,8,"Keyword"]
],
propSources: [ganttHighlighting],
skippedNodes: [0],
repeatNodeCount: 1,
tokenData: "$l~R_XY!QYZ!uZ^!Qpq!Quv#r!c!}$a#T#o$a#y#z!Q$f$g!Q#BY#BZ!Q$IS$I_!Q$I|$JO!Q$JT$JU!Q$KV$KW!Q&FU&FV!Q~!VYc~X^!Qpq!Q#y#z!Q$f$g!Q#BY#BZ!Q$IS$I_!Q$I|$JO!Q$JT$JU!Q$KV$KW!Q&FU&FV!Q~!|[g~c~XY!QYZ!uZ^!Qpq!Q#y#z!Q$f$g!Q#BY#BZ!Q$IS$I_!Q$I|$JO!Q$JT$JU!Q$KV$KW!Q&FU&FV!Q~#uPuv#x~#}S^~OY#xZ;'S#x;'S;=`$Z<%lO#x~$^P;=`<%l#x~$fQe~!c!}$a#T#o$a",
tokenizers: [textToken, 0],
topRules: {"GanttDiagram":[0,10]},
specialized: [{term: 21, get: (value: keyof typeof spec_identifier) => spec_identifier[value] || -1}],
tokenPrec: 115
})

View File

@@ -0,0 +1,9 @@
import { styleTags } from '@lezer/highlight';
import { ganttTags } from '../../tags';
export const ganttHighlighting = styleTags({
'DiagramName Section': ganttTags.diagramName,
Keyword: ganttTags.keyword,
ImportantText: ganttTags.string,
LineComment: ganttTags.lineComment,
});

View File

@@ -0,0 +1,59 @@
import { ExternalTokenizer } from '@lezer/lr';
import {
AxisFormat,
DateFormat,
Excludes,
InclusiveEndDates,
Section,
TickInterval,
Title,
TodayMarker,
Weekday,
text,
} from './gantt.grammar.terms';
const keywordMap: { [key: string]: number } = {
axisFormat: AxisFormat,
dateFormat: DateFormat,
excludes: Excludes,
inclusiveEndDates: InclusiveEndDates,
section: Section,
tickInterval: TickInterval,
title: Title,
todayMarker: TodayMarker,
weekday: Weekday,
};
const keywords = Object.keys(keywordMap);
export const textToken = new ExternalTokenizer((input) => {
if (input.next === 32 || input.next === 10 || input.next === -1) return;
if (input.next === 37 && input.peek(1) === 37) {
return;
}
let tokens = '';
while (input.next !== 10 && input.next !== -1) {
tokens += String.fromCodePoint(input.next);
input.advance();
}
const activeKeyword = keywords.filter((keyword) => {
if (keyword === tokens) {
return tokens.startsWith(keyword);
}
return tokens.startsWith(keyword + ' ');
});
if (activeKeyword.length > 0) {
input.acceptToken(
keywordMap[activeKeyword[0]],
activeKeyword[0].length - tokens.length
);
return;
}
input.acceptToken(text);
});

View File

@@ -0,0 +1,8 @@
export { parser as mermaidParser } from './mermaid/mermaid.parser.grammar';
export { parser as mindmapParser } from './mindmap/mindmap.parser.grammar';
export { parser as pieParser } from './pie/pie.parser.grammar';
export { parser as flowchartParser } from './flowchart/flowchart.parser.grammar';
export { parser as sequenceParser } from './sequence/sequence.parser.grammar';
export { parser as journeyParser } from './journey/journey.parser.grammar';
export { parser as requirementParser } from './requirement/requirement.parser.grammar';
export { parser as ganttParser } from './gantt/gantt.parser.grammar';

View File

@@ -0,0 +1,11 @@
import { styleTags } from '@lezer/highlight';
import { journeyTags } from '../../tags';
export const journeyHighlighting = styleTags({
DiagramName: journeyTags.diagramName,
'Text TaskName': journeyTags.text,
Actor: journeyTags.actor,
Keyword: journeyTags.keyword,
LineComment: journeyTags.lineComment,
Score: journeyTags.score,
});

View File

@@ -0,0 +1,59 @@
@top JourneyDiagram {
document
}
@skip { spaces }
document {
DiagramName newlines* (
() |
subDocument newlines* |
subDocument (newlines+ subDocument)+ newlines*
)
}
subDocument {
LineComment |
Keyword Text |
Task
}
Task {
TaskName ":" Score (":" Actor ("," Actor)*)?
}
Text {
text1
}
TaskName {
text2
}
Score {
text2
}
Actor {
text3
}
DiagramName { kw<"journey"> }
kw<term> { @specialize<identifier, term> }
@external tokens keywordTokens from "./tokens" { Keyword }
@external tokens textTokens1 from "./tokens" { text1 }
@external tokens textTokens2 from "./tokens" { text2 }
@external tokens textTokens3 from "./tokens" { text3 }
@tokens {
spaces { @whitespace+ }
newlines { $[\n]+ }
LineComment { "%%" ![\n]* }
identifier { @asciiLetter+ }
@precedence { newlines, spaces }
}
@external propSource journeyHighlighting from "./highlight"

View File

@@ -0,0 +1,5 @@
export declare const Keyword: number;
export declare const text1: number;
export declare const text2: number;
export declare const text3: number;

View File

@@ -0,0 +1,14 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
export const
Keyword = 1,
text1 = 14,
text2 = 15,
text3 = 16,
JourneyDiagram = 2,
DiagramName = 3,
LineComment = 4,
Text = 5,
Task = 6,
TaskName = 7,
Score = 8,
Actor = 9

View File

@@ -0,0 +1,234 @@
import { describe, it, expect } from 'vitest';
import { parser } from './journey.parser.grammar';
/**
* Journey Diagram Grammar 测试
*
* 测试目标:验证标准的 Mermaid Journey Diagram 语法是否能正确解析,不应该出现错误节点(⚠)
*/
describe('Journey Diagram Grammar 解析测试', () => {
/**
* 辅助函数:解析代码并返回语法树
*/
function parseCode(code: string) {
const tree = parser.parse(code);
return tree;
}
/**
* 辅助函数:检查语法树中是否有错误节点
*/
function hasErrorNodes(tree: any): { hasError: boolean; errors: Array<{ name: string; from: number; to: number; text: string }> } {
const errors: Array<{ name: string; from: number; to: number; text: string }> = [];
tree.iterate({
enter: (node: any) => {
if (node.name === '⚠') {
errors.push({
name: node.name,
from: node.from,
to: node.to,
text: tree.toString().substring(node.from, node.to)
});
}
}
});
return {
hasError: errors.length > 0,
errors
};
}
/**
* 辅助函数:打印语法树结构(用于调试)
*/
function printTree(tree: any, code: string, maxDepth = 5) {
const lines: string[] = [];
tree.iterate({
enter: (node: any) => {
const depth = getNodeDepth(tree, node);
if (depth > maxDepth) return false; // 限制深度
const indent = ' '.repeat(depth);
const text = code.substring(node.from, Math.min(node.to, node.from + 30));
const displayText = text.length === 30 ? text + '...' : text;
lines.push(`${indent}${node.name} [${node.from}-${node.to}]: "${displayText.replace(/\n/g, '\\n')}"`);
}
});
return lines.join('\n');
}
/**
* 获取节点深度
*/
function getNodeDepth(tree: any, targetNode: any): number {
let depth = 0;
let current = targetNode;
while (current.parent) {
depth++;
current = current.parent;
}
return depth;
}
it('应该正确解析基础的 journey 声明', () => {
const code = `journey
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带标题的 journey 图', () => {
const code = `journey
title My working day
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带章节的 journey 图', () => {
const code = `journey
title My working day
section Go to work
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带任务和分数的 journey 图', () => {
const code = `journey
title My working day
section Go to work
Make tea: 5
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带任务、分数和参与者的 journey 图', () => {
const code = `journey
title My working day
section Go to work
Make tea: 5: Me
Go upstairs: 3: Me
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带多个参与者的任务', () => {
const code = `journey
title My working day
section Go to work
Make tea: 5: Me, Cat
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析多个章节的 journey 图', () => {
const code = `journey
title My working day
section Go to work
Make tea: 5: Me
Go upstairs: 3: Me
section Work
Do work: 1: Me, Cat
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析完整的 journey 图示例', () => {
const code = `journey
title My working day
section Go to work
Make tea: 5: Me
Go upstairs: 3: Me
Do work: 1: Me, Cat
section Go home
Go downstairs: 5: Me
Sit down: 5: Me
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
});

View File

@@ -0,0 +1,3 @@
import { LRParser } from '@lezer/lr';
export declare const parser: LRParser;

View File

@@ -0,0 +1,21 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
import {LRParser} from "@lezer/lr"
import {keywordTokens, textTokens1, textTokens2, textTokens3} from "./tokens"
import {journeyHighlighting} from "./highlight"
const spec_identifier = {__proto__:null,journey:42}
export const parser = LRParser.deserialize({
version: 14,
states: "%^OVQ`OOO[QeO'#CoQOQ`OOOOQT'#C_'#C_OOQT'#Cf'#CfOmQeO,59ZOOQO'#Cc'#CcO!OQ`O'#CbOOQO'#Cs'#CsO!TQbO'#CsOvQ`O,59ZOOQT-E6d-E6dO!YQ`O1G.uO!bQdO,58|OOQO'#Ca'#CaOOQO,59_,59_O!gQeO1G.uO!YQ`O1G.uO!xQeO7+$aO#RQ`O7+$aOOQO'#Cd'#CdO#ZQ`O1G.hOOQO,59S,59SOOQO-E6f-E6fO#fQeO<<G{O#wQhO7+$SP#|QeO'#CfOOQO'#Ce'#CeO$[Q`O<<GnO#wQhO'#CgO$gQ`OAN=YOOQO,59R,59ROOQO-E6e-E6e",
stateData: "$u~ObOS~OeRO~OPXOSWO_UOfSO]cX~OPXOSWO_UOfSO]ca~Oh]O~O^^O~OfSO]ci~O_dO~OPXOSWO_UOfSO]ci~OPXOSWO_UOfSO]cq~OhiO]UifUi~OPXOSWO_UOfSO]cy~O`kO~OPXOSWO_UOfSO~OimO]UyfUy~OimO]U!RfU!R~Ofb~",
goto: "#_hPPPiPlow!P!S!Y!n!tPPPPPP#OPPP#RRPOR_X]WPT`bhj]VPT`bhjRe]QliRomQTPYZT`bhjQ`YSb[aRhcQnlRpnQaYQc[TgacRQOQYPQ[TXf`bhj",
nodeNames: "⚠ Keyword JourneyDiagram DiagramName LineComment Text Task TaskName Score Actor",
maxTerm: 25,
propSources: [journeyHighlighting],
skippedNodes: [0],
repeatNodeCount: 3,
tokenData: "$|~RaXY!WYZ!{Z^!Wpq!Wuv#x|}$g![!]$l!c!}$q#T#o$q#y#z!W$f$g!W#BY#BZ!W$IS$I_!W$I|$JO!W$JT$JU!W$KV$KW!W&FU&FV!W~!]Yb~X^!Wpq!W#y#z!W$f$g!W#BY#BZ!W$IS$I_!W$I|$JO!W$JT$JU!W$KV$KW!W&FU&FV!W~#S[f~b~XY!WYZ!{Z^!Wpq!W#y#z!W$f$g!W#BY#BZ!W$IS$I_!W$I|$JO!W$JT$JU!W$KV$KW!W&FU&FV!W~#{Puv$O~$TSS~OY$OZ;'S$O;'S;=`$a<%lO$O~$dP;=`<%l$O~$lOi~~$qOh~~$vQd~!c!}$q#T#o$q",
tokenizers: [keywordTokens, textTokens1, textTokens2, textTokens3, 0],
topRules: {"JourneyDiagram":[0,2]},
specialized: [{term: 20, get: (value: keyof typeof spec_identifier) => spec_identifier[value] || -1}],
tokenPrec: 172
})

View File

@@ -0,0 +1,69 @@
import { ExternalTokenizer } from '@lezer/lr';
import { Keyword, text1, text2, text3 } from './journey.grammar.terms';
import type { InputStream } from '@lezer/lr';
const skipCodePoints = [-1, 9, 10, 13, 32];
const keywords = ['title', 'section'];
const isComment = (input: InputStream) => {
return input.peek(0) === 37 && input.peek(1) === 37;
};
const shouldSkip = (input: InputStream) => {
return skipCodePoints.includes(input.next) || isComment(input);
};
export const keywordTokens = new ExternalTokenizer((input) => {
if (shouldSkip(input)) return;
let tokens = '';
while (!skipCodePoints.includes(input.next)) {
tokens += String.fromCodePoint(input.next);
input.advance();
}
const activeKeyword = keywords.filter((keyword) => {
if (keyword === tokens) {
return tokens.toLowerCase().startsWith(keyword);
}
return tokens.toLowerCase().startsWith(keyword + ' '); // ensure the keyword isn't used as a token unless there's a space at the end e.g. titleStuff
});
if (activeKeyword.length > 0) {
input.acceptToken(Keyword, activeKeyword[0].length - tokens.length);
return;
}
});
export const textTokens1 = new ExternalTokenizer((input) => {
if (shouldSkip(input)) return;
while (input.next !== 10 && input.next !== -1) {
input.advance();
}
input.acceptToken(text1);
});
export const textTokens2 = new ExternalTokenizer((input) => {
if (shouldSkip(input)) return;
while (input.next !== 58 && input.next !== 10 && input.next !== -1) {
input.advance();
}
input.acceptToken(text2);
});
export const textTokens3 = new ExternalTokenizer((input) => {
if (shouldSkip(input)) return;
while (input.next !== 44 && input.next !== 10 && input.next !== -1) {
input.advance();
}
input.acceptToken(text3);
});

View File

@@ -0,0 +1,29 @@
// See this link for entire Pie chart syntax in Mermaid: https://mermaid.js.org/syntax/pie.html#syntax
@top MermaidDiagram {
preDiagramLine* (
PieDiagram |
MindmapDiagram |
FlowchartDiagram |
SequenceDiagram |
JourneyDiagram |
RequirementDiagram |
GanttDiagram
)
}
@skip { space }
@tokens {
space { $[ \t\r]+ }
}
@external tokens diagramText from "./tokens" {
preDiagramLine,
PieDiagram,
MindmapDiagram,
FlowchartDiagram,
SequenceDiagram,
JourneyDiagram,
RequirementDiagram,
GanttDiagram
}

View File

@@ -0,0 +1,8 @@
export declare const preDiagramLine: number;
export declare const MindmapDiagram: number;
export declare const PieDiagram: number;
export declare const FlowchartDiagram: number;
export declare const SequenceDiagram: number;
export declare const JourneyDiagram: number;
export declare const RequirementDiagram: number;
export declare const GanttDiagram: number;

View File

@@ -0,0 +1,11 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
export const
preDiagramLine = 11,
PieDiagram = 1,
MindmapDiagram = 2,
FlowchartDiagram = 3,
SequenceDiagram = 4,
JourneyDiagram = 5,
RequirementDiagram = 6,
GanttDiagram = 7,
MermaidDiagram = 8

View File

@@ -0,0 +1,283 @@
import { describe, it, expect } from 'vitest';
import { parser } from './mermaid.parser.grammar';
/**
* Mermaid Grammar 测试
*
* 测试目标:验证标准的 Mermaid 综合语法是否能正确解析,不应该出现错误节点(⚠)
* 这个测试涵盖所有类型的 Mermaid 图表
*/
describe('Mermaid Grammar 解析测试', () => {
/**
* 辅助函数:解析代码并返回语法树
*/
function parseCode(code: string) {
const tree = parser.parse(code);
return tree;
}
/**
* 辅助函数:检查语法树中是否有错误节点
*/
function hasErrorNodes(tree: any): { hasError: boolean; errors: Array<{ name: string; from: number; to: number; text: string }> } {
const errors: Array<{ name: string; from: number; to: number; text: string }> = [];
tree.iterate({
enter: (node: any) => {
if (node.name === '⚠') {
errors.push({
name: node.name,
from: node.from,
to: node.to,
text: tree.toString().substring(node.from, node.to)
});
}
}
});
return {
hasError: errors.length > 0,
errors
};
}
/**
* 辅助函数:打印语法树结构(用于调试)
*/
function printTree(tree: any, code: string, maxDepth = 5) {
const lines: string[] = [];
tree.iterate({
enter: (node: any) => {
const depth = getNodeDepth(tree, node);
if (depth > maxDepth) return false; // 限制深度
const indent = ' '.repeat(depth);
const text = code.substring(node.from, Math.min(node.to, node.from + 30));
const displayText = text.length === 30 ? text + '...' : text;
lines.push(`${indent}${node.name} [${node.from}-${node.to}]: "${displayText.replace(/\n/g, '\\n')}"`);
}
});
return lines.join('\n');
}
/**
* 获取节点深度
*/
function getNodeDepth(tree: any, targetNode: any): number {
let depth = 0;
let current = targetNode;
while (current.parent) {
depth++;
current = current.parent;
}
return depth;
}
it('应该正确解析 Pie 图', () => {
const code = `pie title Pets
"Dogs" : 386
"Cats" : 85
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析 Mindmap 图', () => {
const code = `mindmap
root((mindmap))
Origins
Long history
Research
On effectiveness
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析 Flowchart 图', () => {
const code = `flowchart TD
A[Start] --> B{Is it?}
B -->|Yes| C[OK]
B -->|No| D[End]
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析 Sequence 图', () => {
const code = `sequenceDiagram
Alice->>John: Hello John
John-->>Alice: Great!
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析 Journey 图', () => {
const code = `journey
title My working day
section Go to work
Make tea: 5: Me
Go upstairs: 3: Me
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析 Requirement 图', () => {
const code = `requirementDiagram
requirement test_req {
id: 1
text: the test text
risk: high
}
element test_entity {
type: simulation
}
test_entity - satisfies -> test_req
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析 Gantt 图', () => {
const code = `gantt
dateFormat YYYY-MM-DD
title Adding GANTT diagram
section A section
Completed task :done, des1, 2014-01-06,2014-01-08
Active task :active, des2, 2014-01-09, 3d
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带注释的 flowchart 图', () => {
const code = `%% This is a comment
flowchart TD
A[Start] --> B[End]
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带空行的 sequence 图', () => {
const code = `
sequenceDiagram
participant Alice
Alice->>John: Hello
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析 graph 类型的流程图', () => {
const code = `graph LR
A[Square Rect] -- Link text --> B((Circle))
A --> C(Round Rect)
B --> D{Rhombus}
C --> D
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
});

View File

@@ -0,0 +1,3 @@
import { LRParser } from '@lezer/lr';
export declare const parser: LRParser;

View File

@@ -0,0 +1,17 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
import {LRParser} from "@lezer/lr"
import {diagramText} from "./tokens"
export const parser = LRParser.deserialize({
version: 14,
states: "nOVQROOOOQQ'#Ce'#CeOVQROOQOQPOOOOQQ-E6c-E6c",
stateData: "q~O]OS~OPROQRORROSROTROUROVROZPO~O",
goto: "aYPPPPPPPPPZQQORSQ",
nodeNames: "⚠ PieDiagram MindmapDiagram FlowchartDiagram SequenceDiagram JourneyDiagram RequirementDiagram GanttDiagram MermaidDiagram",
maxTerm: 13,
skippedNodes: [0],
repeatNodeCount: 1,
tokenData: "j~RRXY[]^[pq[~aR]~XY[]^[pq[",
tokenizers: [0, diagramText],
topRules: {"MermaidDiagram":[0,8]},
tokenPrec: 0
})

View File

@@ -0,0 +1,52 @@
import { ExternalTokenizer } from '@lezer/lr';
import {
preDiagramLine,
MindmapDiagram,
PieDiagram,
FlowchartDiagram,
SequenceDiagram,
JourneyDiagram,
RequirementDiagram,
GanttDiagram,
} from './mermaid.grammar.terms';
const skipCodePoints = [-1, 9, 13, 32];
const diagramMap: Record<string, number> = {
mindmap: MindmapDiagram,
pie: PieDiagram,
flowchart: FlowchartDiagram,
graph: FlowchartDiagram,
sequenceDiagram: SequenceDiagram,
journey: JourneyDiagram,
requirementDiagram: RequirementDiagram,
gantt: GanttDiagram,
};
const diagrams = Object.keys(diagramMap);
export const diagramText = new ExternalTokenizer((input) => {
if (skipCodePoints.includes(input.next)) return;
let tokens = '';
while (input.next != 10 && input.next !== -1) {
tokens += String.fromCodePoint(input.next);
input.advance();
}
input.advance();
const activeDiagram = diagrams.filter((diagram) => {
return tokens.startsWith(diagram);
});
if (activeDiagram.length > 0) {
while (input.next !== -1) {
input.advance();
}
input.acceptToken(diagramMap[activeDiagram[0]]);
} else {
input.acceptToken(preDiagramLine);
}
});

View File

@@ -0,0 +1,11 @@
import { styleTags } from '@lezer/highlight';
import { mindmapTags } from '../../tags';
export const mindmapHighlighting = styleTags({
DiagramName: mindmapTags.diagramName,
LineText1: mindmapTags.lineText1,
LineText2: mindmapTags.lineText2,
LineText3: mindmapTags.lineText3,
LineText4: mindmapTags.lineText4,
LineText5: mindmapTags.lineText5,
});

View File

@@ -0,0 +1,92 @@
@top MindmapDiagram {
newline+ |
DiagramName Line*
}
@skip { spaces | newlineEmpty }
lineText {
LineText1 |
LineText2 |
LineText3 |
LineText4 |
LineText5
}
ShapedText {
square |
roundedSquare |
circle |
bang |
cloud |
hexagon
}
square {
"[" lineText "]"
}
roundedSquare {
"(" lineText ")"
}
circle {
"((" lineText "))"
}
bang {
"))" lineText "(("
}
cloud {
")" lineText "("
}
hexagon {
"{{" lineText "}}"
}
IconLine {
"::" Icon "(" lineText ")"
}
ClassLine {
":::" lineText
}
Line {
newline |
newline indent (
lineText |
IconLine |
ClassLine |
ShapedText |
lineText ShapedText
)
}
DiagramName { kw<"mindmap"> }
Icon { kw<"icon"> }
kw<term> { @specialize<word, term> }
@context trackIndent from "./tokens.js"
@external tokens indentation from "./tokens" { indent }
@external tokens lineTextType from "./tokens" {
LineText1,
LineText2,
LineText3,
LineText4,
LineText5
}
@tokens {
spaces { ($[ \t\f] | "\\" $[\n\r])+ }
word { @asciiLetter+ }
}
@external tokens newlines from "./tokens" { newline, newlineEmpty }
@external propSource mindmapHighlighting from "./highlight"

View File

@@ -0,0 +1,8 @@
export declare const newline: number;
export declare const newlineEmpty: number;
export declare const indent: number;
export declare const LineText1: number;
export declare const LineText2: number;
export declare const LineText3: number;
export declare const LineText4: number;
export declare const LineText5: number;

View File

@@ -0,0 +1,17 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
export const
indent = 16,
LineText1 = 1,
LineText2 = 2,
LineText3 = 3,
LineText4 = 4,
LineText5 = 5,
newline = 17,
newlineEmpty = 18,
MindmapDiagram = 6,
DiagramName = 7,
Line = 8,
IconLine = 9,
Icon = 10,
ClassLine = 11,
ShapedText = 12

View File

@@ -0,0 +1,239 @@
import { describe, it, expect } from 'vitest';
import { parser } from './mindmap.parser.grammar';
/**
* Mindmap Grammar 测试
*
* 测试目标:验证标准的 Mermaid Mindmap 语法是否能正确解析,不应该出现错误节点(⚠)
*/
describe('Mindmap Grammar 解析测试', () => {
/**
* 辅助函数:解析代码并返回语法树
*/
function parseCode(code: string) {
const tree = parser.parse(code);
return tree;
}
/**
* 辅助函数:检查语法树中是否有错误节点
*/
function hasErrorNodes(tree: any): { hasError: boolean; errors: Array<{ name: string; from: number; to: number; text: string }> } {
const errors: Array<{ name: string; from: number; to: number; text: string }> = [];
tree.iterate({
enter: (node: any) => {
if (node.name === '⚠') {
errors.push({
name: node.name,
from: node.from,
to: node.to,
text: tree.toString().substring(node.from, node.to)
});
}
}
});
return {
hasError: errors.length > 0,
errors
};
}
/**
* 辅助函数:打印语法树结构(用于调试)
*/
function printTree(tree: any, code: string, maxDepth = 5) {
const lines: string[] = [];
tree.iterate({
enter: (node: any) => {
const depth = getNodeDepth(tree, node);
if (depth > maxDepth) return false; // 限制深度
const indent = ' '.repeat(depth);
const text = code.substring(node.from, Math.min(node.to, node.from + 30));
const displayText = text.length === 30 ? text + '...' : text;
lines.push(`${indent}${node.name} [${node.from}-${node.to}]: "${displayText.replace(/\n/g, '\\n')}"`);
}
});
return lines.join('\n');
}
/**
* 获取节点深度
*/
function getNodeDepth(tree: any, targetNode: any): number {
let depth = 0;
let current = targetNode;
while (current.parent) {
depth++;
current = current.parent;
}
return depth;
}
it('应该正确解析基础的 mindmap 声明', () => {
const code = `mindmap
Root
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带子节点的 mindmap', () => {
const code = `mindmap
Root
A
B
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析多层级的 mindmap', () => {
const code = `mindmap
Root
A
A1
A2
B
B1
B2
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带方括号形状的节点', () => {
const code = `mindmap
Root
[Square node]
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带圆括号形状的节点', () => {
const code = `mindmap
Root
(Rounded node)
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带双圆括号形状的节点(圆形)', () => {
const code = `mindmap
Root
((Circle node))
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带花括号形状的节点(六边形)', () => {
const code = `mindmap
Root
{{Hexagon node}}
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析完整的 mindmap 示例', () => {
const code = `mindmap
root((mindmap))
Origins
Long history
::icon(fa fa-book)
Popularisation
British popular psychology author Tony Buzan
Research
On effectiveness<br/>and features
On Automatic creation
Uses
Creative techniques
Strategic planning
Argument mapping
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
});

View File

@@ -0,0 +1,3 @@
import { LRParser } from '@lezer/lr';
export declare const parser: LRParser;

View File

@@ -0,0 +1,23 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
import {LRParser} from "@lezer/lr"
import {indentation, lineTextType, newlines} from "./tokens"
import {trackIndent} from "./tokens.js"
import {mindmapHighlighting} from "./highlight"
const spec_word = {__proto__:null,mindmap:44, icon:50}
export const parser = LRParser.deserialize({
version: 14,
states: "&fOYQ[OOOOQW'#Ci'#CiQbQ[OOQgQ[OOOOQW'#Cc'#CcOOQW-E6g-E6gOlQ]O'#CdOOQW'#Cj'#CjQgQ[OOO!]Q^O,59OOOQW-E6h-E6hOOQW'#Cs'#CsO!vQ[O'#CeO!{Q^O'#CgO!{Q^O'#CyO!{Q^O'#C|O!{Q^O'#C}O!{Q^O'#DQO!{Q^O'#DRO!{Q^O'#DSOOQW'#Ch'#ChO#^Q[O1G.jOOQW1G.j1G.jO#hQ[O,59POOQW'#Cf'#CfOOQW,59R,59RO#mQ[O,59eO#rQ[O,59hO#wQ[O,59iO#|Q[O,59lO$RQ[O,59mO$WQ[O,59nOOQW7+$U7+$UO!{Q^O1G.kOOQW1G/P1G/POOQW1G/S1G/SOOQW1G/T1G/TOOQW1G/W1G/WOOQW1G/X1G/XOOQW1G/Y1G/YO$]Q[O7+$VOOQW<<Gq<<Gq",
stateData: "$b~OdOSbOS~OaPOfSO~OaPO~OaUO~O`XO_WXaWX~Oj_OkbOn^Or`OsaOwcO~OPZOQZORZOSZOTZOh[Ol]O~PwOihO~OPZOQZORZOSZOTZO~O_WiaWi~PwOjqO~OorO~OksO~OstO~OruO~OjvO~OxwO~OkyO~O",
goto: "#YwPPPPPPPx{!P!S!P!V!]!cPPPPPPPP!iPPPPP#UPP#U#UPP#U#U#URROTVRWRfXRg[QfXRpeQQORTQQWRRYWQeXQi]Qj^Qk_Ql`QmaQnbQocRxqTdXe",
nodeNames: "⚠ LineText1 LineText2 LineText3 LineText4 LineText5 MindmapDiagram DiagramName Line IconLine Icon ClassLine ShapedText",
maxTerm: 40,
context: trackIndent,
propSources: [mindmapHighlighting],
skippedNodes: [0],
repeatNodeCount: 2,
tokenData: "$b~R]XYz[]zpqzxy!fyz!s![!]#Q!c!}#e!}#O#p#O#P!]#P#Q#u#T#o#e#o#p#z#q#r$V~!PSd~XYz[]zpqz#O#P!]~!`QYZz]^z~!kPj~xy!n~!sOr~~!xPk~yz!{~#QOs~~#TP![!]#W~#]Ph~![!]#`~#eOl~~#jQe~!c!}#e#T#o#e~#uOn~~#zOo~~#}P#o#p$Q~$VOw~~$YP#q#r$]~$bOx~",
tokenizers: [indentation, lineTextType, 0, newlines],
topRules: {"MindmapDiagram":[0,6]},
specialized: [{term: 21, get: (value: keyof typeof spec_word) => spec_word[value] || -1}],
tokenPrec: 0
})

View File

@@ -0,0 +1,140 @@
import { ExternalTokenizer, ContextTracker } from '@lezer/lr';
import {
newline as newlineToken,
newlineEmpty,
indent,
LineText1,
LineText2,
LineText3,
LineText4,
LineText5,
} from './mindmap.grammar.terms';
import type { InputStream } from '@lezer/lr';
type InputStreamWithRead = InputStream & {
read: (inputPosition: number, stackPosition: number) => string;
};
const LineTextTokens = [LineText1, LineText2, LineText3, LineText4, LineText5];
const newline = 10,
carriageReturn = 13,
space = 32,
tab = 9,
hash = 35,
colon = 58,
parenL = 40,
parenR = 41,
bracketL = 91,
bracketR = 93,
braceL = 123,
braceR = 125;
export const newlines = new ExternalTokenizer(
(input, _stack) => {
if (input.next < 0) return;
else {
input.advance();
let spaces = 0;
while ((input.next as number) == space || (input.next as number) == tab) {
input.advance();
spaces++;
}
let empty =
input.next == newline ||
input.next == carriageReturn ||
input.next == hash;
input.acceptToken(empty ? newlineEmpty : newlineToken, -spaces);
}
},
{ contextual: true, fallback: true }
);
export const lineTextType = new ExternalTokenizer((input, stack) => {
let chars = 0;
while (input.next > -1 && input.next !== newline) {
if (input.next === colon) return;
if (
input.next === parenL ||
input.next === bracketL ||
input.next === braceL
) {
if (chars > 0) {
input.acceptToken(stack.context.lineType);
return;
} else return;
}
if (
(input.next === parenR ||
input.next === bracketR ||
input.next === braceR) &&
chars > 0
) {
input.acceptToken(stack.context.lineType);
return;
}
input.advance();
chars++;
}
input.acceptToken(stack.context.lineType);
});
const tabDepth = (depth: number) => {
return 4 - (depth % 4);
};
export const indentation = new ExternalTokenizer((input, _stack) => {
let prev = input.peek(-1);
if (prev == newline || prev == carriageReturn) {
let depth = 0;
let chars = 0;
while (true) {
if (input.next == space) depth++;
else if (input.next == tab) depth += tabDepth(depth);
else break;
input.advance();
chars++;
}
if (
input.next != newline &&
input.next != carriageReturn &&
input.next != hash
) {
input.acceptToken(indent);
}
}
});
const indentTracker = {
lineType: LineText1,
};
const countIndent = (space: string) => {
let depth = 0;
for (let i = 0; i < space.length; i++)
depth += space.charCodeAt(i) == tab ? tabDepth(depth) : 1;
return depth;
};
const getLineType = (depth: number) => {
return LineTextTokens[depth % 5];
};
export const trackIndent = new ContextTracker({
start: indentTracker,
shift(context, term, stack, input: InputStreamWithRead) {
if (term === indent) {
const depth = countIndent(input.read(input.pos, stack.pos));
context.lineType = getLineType(depth);
}
return context;
},
});

View File

@@ -0,0 +1,12 @@
import { styleTags } from '@lezer/highlight';
import { pieTags } from '../../tags';
export const pieHighlighting = styleTags({
DiagramName: pieTags.diagramName,
LineComment: pieTags.lineComment,
Number: pieTags.number,
ShowData: pieTags.showData,
String: pieTags.string,
Title: pieTags.title,
TitleText: pieTags.titleText,
});

View File

@@ -0,0 +1,44 @@
@top PieDiagram { document+ }
@skip { spaces | LineComment }
document {
DiagramName ShowData? (
() |
Title |
Title TitleText |
Title TitleText kvPair+ |
Title kvPair+ |
kvPair+
)
}
@skip {} {
String {
'"' (stringContentDouble)* '"'
}
}
kvPair {
String ":" Number
}
DiagramName { kw<"pie"> }
ShowData { kw<"showData"> }
Title { kw<"title"> }
kw<term> { @specialize<identifier, term> }
@external tokens titleText from "./tokens" { TitleText }
@tokens {
identifier { @asciiLetter+ }
stringContentDouble { !["]+ }
spaces { @whitespace+ }
Number { @digit+ ("." @digit+)? }
LineComment { "%%" ![\n]* }
}
@external propSource pieHighlighting from "./highlight"

View File

@@ -0,0 +1,2 @@
export declare const LineComment: number;
export declare const TitleText: number;

View File

@@ -0,0 +1,10 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
export const
TitleText = 1,
LineComment = 2,
PieDiagram = 3,
DiagramName = 4,
ShowData = 5,
Title = 6,
String = 7,
Number = 8

View File

@@ -0,0 +1,204 @@
import { describe, it, expect } from 'vitest';
import { parser } from './pie.parser.grammar';
/**
* Pie Chart Grammar 测试
*
* 测试目标:验证标准的 Mermaid Pie Chart 语法是否能正确解析,不应该出现错误节点(⚠)
*/
describe('Pie Chart Grammar 解析测试', () => {
/**
* 辅助函数:解析代码并返回语法树
*/
function parseCode(code: string) {
const tree = parser.parse(code);
return tree;
}
/**
* 辅助函数:检查语法树中是否有错误节点
*/
function hasErrorNodes(tree: any): { hasError: boolean; errors: Array<{ name: string; from: number; to: number; text: string }> } {
const errors: Array<{ name: string; from: number; to: number; text: string }> = [];
tree.iterate({
enter: (node: any) => {
if (node.name === '⚠') {
errors.push({
name: node.name,
from: node.from,
to: node.to,
text: tree.toString().substring(node.from, node.to)
});
}
}
});
return {
hasError: errors.length > 0,
errors
};
}
/**
* 辅助函数:打印语法树结构(用于调试)
*/
function printTree(tree: any, code: string, maxDepth = 5) {
const lines: string[] = [];
tree.iterate({
enter: (node: any) => {
const depth = getNodeDepth(tree, node);
if (depth > maxDepth) return false; // 限制深度
const indent = ' '.repeat(depth);
const text = code.substring(node.from, Math.min(node.to, node.from + 30));
const displayText = text.length === 30 ? text + '...' : text;
lines.push(`${indent}${node.name} [${node.from}-${node.to}]: "${displayText.replace(/\n/g, '\\n')}"`);
}
});
return lines.join('\n');
}
/**
* 获取节点深度
*/
function getNodeDepth(tree: any, targetNode: any): number {
let depth = 0;
let current = targetNode;
while (current.parent) {
depth++;
current = current.parent;
}
return depth;
}
it('应该正确解析基础的 pie 声明', () => {
const code = `pie
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带 showData 的 pie 图', () => {
const code = `pie showData
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带标题的 pie 图', () => {
const code = `pie title My Pie Chart
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带数据的 pie 图', () => {
const code = `pie
"Dogs" : 386
"Cats" : 85
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带标题和数据的完整 pie 图', () => {
const code = `pie title Pets adopted by volunteers
"Dogs" : 386
"Cats" : 85
"Rats" : 15
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带小数值的数据', () => {
const code = `pie
"A" : 10.5
"B" : 20.75
"C" : 30.25
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带 showData 和完整数据的 pie 图', () => {
const code = `pie showData
title Key elements in Product X
"Calcium" : 42.96
"Potassium" : 50.05
"Magnesium" : 10.01
"Iron" : 5
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
});

View File

@@ -0,0 +1,3 @@
import { LRParser } from '@lezer/lr';
export declare const parser: LRParser;

View File

@@ -0,0 +1,21 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
import {LRParser} from "@lezer/lr"
import {titleText} from "./tokens"
import {pieHighlighting} from "./highlight"
const spec_identifier = {__proto__:null,pie:34, showData:36, title:38}
export const parser = LRParser.deserialize({
version: 14,
states: "$nOYQQOOO_QQO'#CkOOQO'#Ce'#CeQYQQOOOOQO'#C`'#C`OpOSO'#CcOxQQO'#CpOOQO'#Cf'#CfO}QQO,59VO!YQRO,59VO!hQQO,59VOOQO'#Ca'#CaOOQP'#Cb'#CbOOQO-E6c-E6cOOOO'#Cg'#CgO!vOSO,58}OOQO,58},58}O#OQQO,59[OOQO-E6d-E6dO#TQQO1G.qO#TQQO1G.qO#`QRO1G.qOOOO-E6e-E6eOOQO1G.i1G.iOOQO1G.v1G.vO#nQQO7+$]O#nQQO7+$]O#yQQO<<Gw",
stateData: "$U~O^OSQOS~OaSO~ObZOc[OeTO[_Xa_X~Oe`Of^O~OgaO~OeTO[_aa_a~OPdOeTO[_aa_a~Oc[OeTO[_aa_a~OegOf^O~OWhO~OeTO[_ia_i~OPjOeTO[_ia_i~OeTO[_qa_q~OeTO[_ya_y~O",
goto: "#RePPPPfjmsP!P!V!kPPP!qPPPP!uTPORRYPQXPReYeUPWXYcdeijkQROR]RQWPWbWcikScXYSideRkjQ_TRf_TQOReVPWXYcdeijk",
nodeNames: "⚠ TitleText LineComment PieDiagram DiagramName ShowData Title String Number",
maxTerm: 23,
propSources: [pieHighlighting],
skippedNodes: [0,2],
repeatNodeCount: 3,
tokenData: "*V~RrOX#]X^#t^p#]pq#tqr#]rs%gsu#]uv%lv!Q#]!Q!['`![!])R!]!c#]!c!})f!}#T#]#T#o)f#o#y#]#y#z#t#z$f#]$f$g#t$g#BY#]#BY#BZ#t#BZ$IS#]$IS$I_#t$I_$I|#]$I|$JO#t$JO$JT#]$JT$JU#t$JU$KV#]$KV$KW#t$KW&FU#]&FU&FV#t&FV;'S#];'S;=`#n<%lO#]Q#bSfQOr#]s;'S#];'S;=`#n<%lO#]Q#qP;=`<%l#]R#{h^PfQOX#]X^#t^p#]pq#tqr#]s#y#]#y#z#t#z$f#]$f$g#t$g#BY#]#BY#BZ#t#BZ$IS#]$IS$I_#t$I_$I|#]$I|$JO#t$JO$JT#]$JT$JU#t$JU$KV#]$KV$KW#t$KW&FU#]&FU&FV#t&FV;'S#];'S;=`#n<%lO#]~%lOe~R%qUfQOr#]su#]uv&Tv;'S#];'S;=`#n<%lO#]R&[VQPfQOY&TYZ#]Zr&Trs&qs;'S&T;'S;=`'Y<%lO&TP&vSQPOY&qZ;'S&q;'S;=`'S<%lO&qP'VP;=`<%l&qR']P;=`<%l&TR'gWWPfQOr#]s!O#]!O!P(P!P!Q#]!Q!['`![;'S#];'S;=`#n<%lO#]R(UUfQOr#]s!Q#]!Q![(h![;'S#];'S;=`#n<%lO#]R(oUWPfQOr#]s!Q#]!Q![(h![;'S#];'S;=`#n<%lO#]R)YSgPfQOr#]s;'S#];'S;=`#n<%lO#]R)mW`PfQOr#]s!c#]!c!})f!}#T#]#T#o)f#o;'S#];'S;=`#n<%lO#]",
tokenizers: [titleText, 0, 1],
topRules: {"PieDiagram":[0,3]},
specialized: [{term: 16, get: (value: keyof typeof spec_identifier) => spec_identifier[value] || -1}],
tokenPrec: 0
})

View File

@@ -0,0 +1,17 @@
import { ExternalTokenizer } from '@lezer/lr';
import { TitleText } from './pie.grammar.terms';
export const titleText = new ExternalTokenizer((input) => {
if (input.next === 10) {
input.acceptToken(TitleText);
return;
}
if (input.next === -1) return;
while (input.next !== 10 && input.next !== -1) {
input.advance();
}
input.acceptToken(TitleText);
});

View File

@@ -0,0 +1,13 @@
import { styleTags } from '@lezer/highlight';
import { requirementTags } from '../../tags';
export const requirementHighlighting = styleTags({
'DiagramName SubDiagramType': requirementTags.diagramName,
LineComment: requirementTags.lineComment,
IDNumber: requirementTags.number,
'UnquotedString RelationshipStart': requirementTags.unquotedString,
QuotedString: requirementTags.quotedString,
PropKeyword: requirementTags.unquotedString,
Keyword: requirementTags.keyword,
'ForwardArrow BackArrow Hyphen': requirementTags.arrow,
});

View File

@@ -0,0 +1,151 @@
@top RequirementDiagram {
document
}
@skip { spaces | LineComment }
@skip {} {
UnquotedString { unquotedString }
QuotedString {
'"' (stringContent)* '"'
}
}
document {
DiagramName newlines? |
DiagramName newlines ((subDiagram | RelationshipLine ) newlines?)+
}
subDiagram {
SubDiagramType subDiagramName "{" newlines "}" |
SubDiagramType subDiagramName "{" newlines subDiagramLine+ "}"
}
subDiagramName {
UnquotedString | QuotedString
}
subDiagramLine {
(
idLine |
textLine |
riskLine |
verifyMethodLine |
typeLine |
docRefLine
) newlines?
}
idLine {
ID ":" IDNumber
}
textLine {
Text ":" textContent
}
riskLine {
Risk ":" RiskType
}
verifyMethodLine {
VerifyMethod ":" VerifyMethodType
}
typeLine {
Type ":" textContent
}
docRefLine {
DocRef ":" textContent
}
textContent {
UnquotedString | QuotedString
}
RelationshipLine {
relationshipStart Hyphen RelationshipType ForwardArrow relationshipEnd |
relationshipStart BackArrow RelationshipType Hyphen relationshipEnd
}
relationshipStart {
RelationshipStart | QuotedString
}
relationshipEnd {
UnquotedString | QuotedString
}
DiagramName { diagramKw<"requirementDiagram"> }
SubDiagramType {
diagramKw<"requirement"> | diagramKw<"Requirement"> |
diagramKw<"functionalRequirement"> | diagramKw<"FunctionalRequirement"> |
diagramKw<"performanceRequirement"> | diagramKw<"PerformanceRequirement"> |
diagramKw<"interfaceRequirement"> | diagramKw<"InterfaceRequirement"> |
diagramKw<"physicalRequirement"> | diagramKw<"PhysicalRequirement"> |
diagramKw<"designConstraint"> | diagramKw<"DesignConstraint"> |
diagramKw<"element"> | diagramKw<"Element">
}
ID { propKw<"id"> | propKw<"Id"> | propKw<"ID"> }
Text { propKw<"text"> | propKw<"Text"> }
Risk { propKw<"risk"> | propKw<"Risk"> }
VerifyMethod { propKw<"verifymethod"> | propKw<"verifyMethod"> | propKw<"VerifyMethod"> }
Type { propKw<"type"> | propKw<"Type"> }
DocRef { propKw<"docRef"> | propKw<"DocRef"> }
RiskType {
kw<"low"> | kw<"Low"> |
kw<"medium"> | kw<"Medium"> |
kw<"high"> | kw<"High">
}
VerifyMethodType {
kw<"analysis"> | kw<"Analysis"> |
kw<"demonstration"> | kw<"Demonstration"> |
kw<"inspection"> | kw<"Inspection"> |
kw<"test"> | kw<"Test">
}
RelationshipType {
kw<"contains"> | kw<"Contains"> |
kw<"copies"> | kw<"Copies"> |
kw<"derives"> | kw<"Derives"> |
kw<"satisfies"> | kw<"Satisfies"> |
kw<"verifies"> | kw<"Verifies"> |
kw<"refines"> | kw<"Refines"> |
kw<"traces"> | kw<"Traces">
}
diagramKw<term> { @specialize<word, term> }
propKw<term> { @specialize[@name=PropKeyword]<word, term> }
kw<term> { @specialize[@name=Keyword]<word, term> }
@external tokens relationshipStart from "./tokens" { RelationshipStart }
@tokens {
word { @asciiLetter+ }
spaces { @whitespace+ }
newlines { $[\n]+ }
LineComment { "%%" ![\n]* }
unquotedString { word ![\r\n\{\<\>\-\=]* }
IDNumber { @digit+ ("." @digit+)* }
stringContent { !["]+ }
ForwardArrow { "->"}
BackArrow { "<-"}
Hyphen { "-" }
@precedence { newlines, spaces }
@precedence { ForwardArrow, Hyphen }
@precedence { BackArrow, Hyphen }
}
@external propSource requirementHighlighting from "./highlight"

View File

@@ -0,0 +1 @@
export declare const RelationshipStart: number;

View File

@@ -0,0 +1,25 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
export const
RelationshipStart = 1,
LineComment = 2,
RequirementDiagram = 3,
DiagramName = 4,
SubDiagramType = 5,
UnquotedString = 6,
QuotedString = 7,
ID = 8,
PropKeyword = 44,
IDNumber = 12,
Text = 13,
Risk = 16,
RiskType = 19,
Keyword = 61,
VerifyMethod = 26,
VerifyMethodType = 30,
Type = 39,
DocRef = 42,
RelationshipLine = 45,
Hyphen = 46,
RelationshipType = 47,
ForwardArrow = 62,
BackArrow = 63

View File

@@ -0,0 +1,330 @@
import { describe, it, expect } from 'vitest';
import { parser } from './requirement.parser.grammar';
/**
* Requirement Diagram Grammar 测试
*
* 测试目标:验证标准的 Mermaid Requirement Diagram 语法是否能正确解析,不应该出现错误节点(⚠)
*/
describe('Requirement Diagram Grammar 解析测试', () => {
/**
* 辅助函数:解析代码并返回语法树
*/
function parseCode(code: string) {
const tree = parser.parse(code);
return tree;
}
/**
* 辅助函数:检查语法树中是否有错误节点
*/
function hasErrorNodes(tree: any): { hasError: boolean; errors: Array<{ name: string; from: number; to: number; text: string }> } {
const errors: Array<{ name: string; from: number; to: number; text: string }> = [];
tree.iterate({
enter: (node: any) => {
if (node.name === '⚠') {
errors.push({
name: node.name,
from: node.from,
to: node.to,
text: tree.toString().substring(node.from, node.to)
});
}
}
});
return {
hasError: errors.length > 0,
errors
};
}
/**
* 辅助函数:打印语法树结构(用于调试)
*/
function printTree(tree: any, code: string, maxDepth = 5) {
const lines: string[] = [];
tree.iterate({
enter: (node: any) => {
const depth = getNodeDepth(tree, node);
if (depth > maxDepth) return false; // 限制深度
const indent = ' '.repeat(depth);
const text = code.substring(node.from, Math.min(node.to, node.from + 30));
const displayText = text.length === 30 ? text + '...' : text;
lines.push(`${indent}${node.name} [${node.from}-${node.to}]: "${displayText.replace(/\n/g, '\\n')}"`);
}
});
return lines.join('\n');
}
/**
* 获取节点深度
*/
function getNodeDepth(tree: any, targetNode: any): number {
let depth = 0;
let current = targetNode;
while (current.parent) {
depth++;
current = current.parent;
}
return depth;
}
it('应该正确解析基础的 requirementDiagram 声明', () => {
const code = `requirementDiagram
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析空的需求定义', () => {
const code = `requirementDiagram
requirement test_req {
}
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带 ID 的需求', () => {
const code = `requirementDiagram
requirement test_req {
id: 1
}
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带文本的需求', () => {
const code = `requirementDiagram
requirement test_req {
id: 1
text: the test text
}
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带风险级别的需求', () => {
const code = `requirementDiagram
requirement test_req {
id: 1
text: the test text
risk: high
}
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带验证方法的需求', () => {
const code = `requirementDiagram
requirement test_req {
id: 1
text: the test text
risk: high
verifymethod: test
}
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析不同类型的需求functionalRequirement', () => {
const code = `requirementDiagram
functionalRequirement test_req {
id: 1.1
text: the test text
}
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析元素定义', () => {
const code = `requirementDiagram
element test_entity {
type: simulation
}
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析关系contains', () => {
const code = `requirementDiagram
requirement test_req {
id: 1
}
element test_entity {
type: simulation
}
test_entity - contains -> test_req
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析反向关系', () => {
const code = `requirementDiagram
requirement test_req {
id: 1
}
element test_entity {
type: simulation
}
test_req <- satisfies - test_entity
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析完整的需求图示例', () => {
const code = `requirementDiagram
requirement test_req {
id: 1
text: the test text.
risk: high
verifymethod: test
}
functionalRequirement test_req2 {
id: 1.1
text: the second test text.
risk: low
verifymethod: inspection
}
element test_entity {
type: simulation
}
test_entity - satisfies -> test_req2
test_req - traces -> test_req2
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
});

View File

@@ -0,0 +1,3 @@
import { LRParser } from '@lezer/lr';
export declare const parser: LRParser;

View File

@@ -0,0 +1,21 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
import {LRParser} from "@lezer/lr"
import {relationshipStart} from "./tokens"
import {requirementHighlighting} from "./highlight"
const spec_word = {__proto__:null,requirementDiagram:144, requirement:150, Requirement:152, functionalRequirement:154, FunctionalRequirement:156, performanceRequirement:158, PerformanceRequirement:160, interfaceRequirement:162, InterfaceRequirement:164, physicalRequirement:166, PhysicalRequirement:168, designConstraint:170, DesignConstraint:172, element:174, Element:176, id:18, Id:20, ID:22, text:28, Text:30, risk:34, Risk:36, low:40, Low:42, medium:44, Medium:46, high:48, High:50, verifymethod:54, verifyMethod:56, VerifyMethod:58, analysis:62, Analysis:64, demonstration:66, Demonstration:68, inspection:70, Inspection:72, test:74, Test:76, type:80, Type:82, docRef:86, DocRef:88, contains:96, Contains:98, copies:100, Copies:102, derives:104, Derives:106, satisfies:108, Satisfies:110, verifies:112, Verifies:114, refines:116, Refines:118, traces:120, Traces:122}
export const parser = LRParser.deserialize({
version: 14,
states: ")`OYQQOOO_QQO'#DtQOQQOOOOQO'#C`'#C`O!kQRO,5:`O!rOSO'#CcOOQO'#Ef'#EfO!zQQO'#DZO#SQRO'#DnO$^QRO1G/zOOQO'#Ca'#CaO$eQWO'#DxOOOO'#Do'#DoO$mOSO,58}OOQP,58},58}O$uQQO,59uO$uQQO,59uOOQP,5:Y,5:YOOQP-E7l-E7lOOQP'#Cb'#CbOOQP'#Eg'#EgO%sQQO,5:dOOOO-E7m-E7mOOQP1G.i1G.iO%xQQO1G/aOOQO'#D]'#D]O%}QQO1G/aO&SQQO1G0OO$eQWO7+${O'VQQO7+%jOOQP<<Hg<<HgO'^QQO'#E_O'cQQO'#EbO'hQQO'#EcO'mQQO'#E^OOQO'#Dp'#DpO(qQQO<<IUOOQO'#Cd'#CdOOQO'#Ci'#CiOOQO'#Cl'#ClOOQO'#Cv'#CvOOQO'#DT'#DTOOQO'#DW'#DWO(xQQO'#EaO(}QQO'#EdO)SQQO'#EeOOQP<<IU<<IUO)XQQO,5:yO)^QQO,5:|O)rQQO,5:}OOQO,5:x,5:xOOQO-E7n-E7nOOQPAN>pAN>pO$eQWO,5:{O$eQWO,5;OO$eQWO,5;POOQO1G0e1G0eOOQO1G0h1G0hOOQO'#Co'#CoOOQO1G0i1G0iOOQO'#Cz'#CzOOQO1G0g1G0gOOQO1G0j1G0jOOQO1G0k1G0k",
stateData: "*e~O!gOSQOS~O!jRO~O!kSO!e!hX~OPUO!mYO!nYO!oYO!pYO!qYO!rYO!sYO!tYO!uYO!vYO!wYO!xYO!yYO!zYO!|TO~O!e!ha~PgO!|^O!}[O~O!O_O!a`O~O!kaOP!bX!e!bX!m!bX!n!bX!o!bX!p!bX!q!bX!r!bX!s!bX!t!bX!u!bX!v!bX!w!bX!x!bX!y!bX!z!bX!|!bX~O!e!hi~PgO!{cO!|TO~O!|gO!}[O~O!QiO!RiO!SiO!TiO!UiO!ViO!WiO!XiO!YiO!ZiO![iO!]iO!^iO!_iO~O#OkO~O!`lO~O!OlO~O!kmO~OXuOYuOZuO^vO_vOawObwOkxOlxOmxOxyOyyO{zO|zO~O#P!OO~P&XO#S!PO~O#S!QO~O#S!RO~O!k!SOX#QXY#QXZ#QX^#QX_#QXa#QXb#QXk#QXl#QXm#QXx#QXy#QX{#QX|#QX#P#QX~O#P!UO~P&XO#S!VO~O#S!WO~O#S!XO~O[!YO~Od![Oe![Of![Og![Oh![Oi![O~Oo!^Op!^Oq!^Or!^Os!^Ot!^Ou!^Ov!^O~O!k!a!g!`!O!`~",
goto: "%r#[PPPP#]#`#d#k#vPPPP#zPP$OPP$SPPPPPP$VPPP$ZPPPPPPPP$^PP$bPP$fP$jPPPPPPPPPPPPPPPP$p$v$|PPP%SPPP$fPPPPPPPPPPPPPPPPPPP%V%ZP%Z%Z%Z%Z%Z%_%cRPOTZSXZdZl!V!W!XSUSXZdZl!V!W!XTomtT{mtTpmtR!Z!QTqmtR!]!RT|mtT}mtTWSXQh_Rj`QXSRbXQ]TRf]QtmR!TtRQOTsmtTrmtTVSXQeZQnlQ!_!VQ!`!WR!a!X",
nodeNames: "⚠ RelationshipStart LineComment RequirementDiagram DiagramName SubDiagramType UnquotedString QuotedString ID PropKeyword PropKeyword PropKeyword IDNumber Text PropKeyword PropKeyword Risk PropKeyword PropKeyword RiskType Keyword Keyword Keyword Keyword Keyword Keyword VerifyMethod PropKeyword PropKeyword PropKeyword VerifyMethodType Keyword Keyword Keyword Keyword Keyword Keyword Keyword Keyword Type PropKeyword PropKeyword DocRef PropKeyword PropKeyword RelationshipLine Hyphen RelationshipType Keyword Keyword Keyword Keyword Keyword Keyword Keyword Keyword Keyword Keyword Keyword Keyword Keyword Keyword ForwardArrow BackArrow",
maxTerm: 103,
propSources: [requirementHighlighting],
skippedNodes: [0,2],
repeatNodeCount: 3,
tokenData: "1g~R{OX#xXY$aYZ&SZ^$a^p#xpq$aqr#xrs'}su#xuv(Sv}#x}!O)v!O!Q#x!Q![*t![!]+|!]!^#x!^!_,a!_!c#x!c!}-]!}#T#x#T#o-]#o#p0o#p#q#x#q#r1S#r#y#x#y#z$a#z$f#x$f$g$a$g#BY#x#BY#BZ$a#BZ$IS#x$IS$I_$a$I_$I|#x$I|$JO$a$JO$JT#x$JT$JU$a$JU$KV#x$KV$KW$a$KW&FU#x&FU&FV$a&FV;'S#x;'S;=`$Z<%lO#xQ#}S!}QOr#xs;'S#x;'S;=`$Z<%lO#xQ$^P;=`<%l#xV$hh!}Q!gTOX#xX^$a^p#xpq$aqr#xs#y#x#y#z$a#z$f#x$f$g$a$g#BY#x#BY#BZ$a#BZ$IS#x$IS$I_$a$I_$I|#x$I|$JO$a$JO$JT#x$JT$JU$a$JU$KV#x$KV$KW$a$KW&FU#x&FU&FV$a&FV;'S#x;'S;=`$Z<%lO#xV&]j!}Q!kP!gTOX#xXY$aYZ&SZ^$a^p#xpq$aqr#xs#y#x#y#z$a#z$f#x$f$g$a$g#BY#x#BY#BZ$a#BZ$IS#x$IS$I_$a$I_$I|#x$I|$JO$a$JO$JT#x$JT$JU$a$JU$KV#x$KV$KW$a$KW&FU#x&FU&FV$a&FV;'S#x;'S;=`$Z<%lO#x~(SO!|~V(XU!}QOr#xsu#xuv(kv;'S#x;'S;=`$Z<%lO#xV(rVQT!}QOY(kYZ#xZr(krs)Xs;'S(k;'S;=`)p<%lO(kT)^SQTOY)XZ;'S)X;'S;=`)j<%lO)XT)mP;=`<%l)XV)sP;=`<%l(kR)}U!}Q!OPOr#xs!`#x!`!a*a!a;'S#x;'S;=`$Z<%lO#xR*hS!}Q!`POr#xs;'S#x;'S;=`$Z<%lO#xR*{W[P!}QOr#xs!O#x!O!P+e!P!Q#x!Q![*t![;'S#x;'S;=`$Z<%lO#xR+jU!}QOr#xs!Q#x!Q![*t![;'S#x;'S;=`$Z<%lO#xR,TS#SP!}QOr#xs;'S#x;'S;=`$Z<%lO#xR,fU!}QOr#xs}#x}!O,x!O;'S#x;'S;=`$Z<%lO#xR-PS!}Q!aPOr#xs;'S#x;'S;=`$Z<%lO#xV-fb!}Q!{S!iPOY.nYZ#xZ].n]^#x^r.nrs/ts}.n}!O#x!O!^.n!^!a#x!a!c.n!c!}-]!}#T.n#T#o-]#o#p#x#p;'S.n;'S;=`0i<%lO.nU.u_!}Q!{SOY.nYZ#xZ].n]^#x^r.nrs/ts}.n}!O#x!O!^.n!^!a#x!a#o.n#o#p#x#p;'S.n;'S;=`0i<%lO.nS/yW!{SOY/tZ]/t^}/t!O!^/t!a#o/t#p;'S/t;'S;=`0c<%lO/tS0fP;=`<%l/tU0lP;=`<%l.nR0vS#OP!}QOr#xs;'S#x;'S;=`$Z<%lO#xR1ZS#PP!}QOr#xs;'S#x;'S;=`$Z<%lO#x",
tokenizers: [relationshipStart, 0, 1, 2],
topRules: {"RequirementDiagram":[0,3]},
specialized: [{term: 71, get: (value: keyof typeof spec_word) => spec_word[value] || -1}],
tokenPrec: 428
})

View File

@@ -0,0 +1,24 @@
import { ExternalTokenizer } from '@lezer/lr';
import { RelationshipStart } from './requirement.grammar.terms';
const notAllowedCodePoints = [-1, 45, 60, 62, 10, 13, 123, 61];
export const relationshipStart = new ExternalTokenizer((input) => {
if (notAllowedCodePoints.includes(input.next) || input.next === 32) return;
let peek;
let tokens = '';
let count = 0;
do {
peek = input.peek(count);
if (peek === -1) return;
tokens += String.fromCodePoint(peek);
count++;
} while (!notAllowedCodePoints.includes(peek));
if (peek === 45 || peek === 60) {
tokens = tokens.slice(0, -1).trim();
input.acceptToken(RelationshipStart, tokens.length);
}
});

View File

@@ -0,0 +1,14 @@
import { styleTags } from '@lezer/highlight';
import { sequenceTags } from '../../tags';
export const sequenceHighlighting = styleTags({
DiagramName: sequenceTags.diagramName,
NodeText: sequenceTags.nodeText,
Keyword1: sequenceTags.keyword1,
Keyword2: sequenceTags.keyword2,
LineComment: sequenceTags.lineComment,
'Arrow ArrowSuffix': sequenceTags.arrow,
Position: sequenceTags.position,
MessageText1: sequenceTags.messageText1,
MessageText2: sequenceTags.messageText2,
});

View File

@@ -0,0 +1,114 @@
@top SequenceDiagram {
document
}
@skip { spaces }
document {
DiagramName newlines? |
DiagramName newlines subDocument newlines? |
DiagramName newlines subDocument (newlines subDocument)+ newlines?
}
subDocument {
LineComment |
NodeText Arrow ArrowSuffix? newlines? NodeText ":" MessageText1 |
(Create | Destroy)? (Participant | Actor)? NodeText (As NodeText)? |
(Activate | Deactivate) NodeText |
Note Position NodeText ("," NodeText)? ":" MessageText1 |
Keyword MessageText2 |
Autonumber |
End |
Link NodeText ":" MessageText1
}
MessageText1 {
messageText
}
MessageText2 {
messageText
}
ArrowSuffix {
"+" | "-"
}
Link[group=Keyword1] {
link | links
}
Keyword[group=Keyword1] {
alt |
and |
box |
break |
critical |
else |
loop |
opt |
option |
par |
rect
}
DiagramName { kw<"sequenceDiagram"> }
kw<term> { @specialize<identifier, term> }
@external tokens messageTextToken from "./tokens" { messageText }
@external tokens textTokens from "./tokens" {
Activate[group=Keyword1],
Autonumber[group=Keyword1],
Create[group=Keyword1],
Deactivate[group=Keyword1],
Destroy[group=Keyword1],
End[group=Keyword1],
Note[group=Keyword1],
Actor[group=Keyword2],
As[group=Keyword2],
Participant[group=Keyword2],
NodeText,
Position,
alt,
and,
box,
break,
critical,
else,
link,
links
loop,
opt,
option,
par,
rect
}
@tokens {
spaces { @whitespace+ }
newlines { $[\n]+ }
LineComment { "%%" ![\n]* }
identifierChar { @asciiLetter | $[$\u{a1}-\u{10ffff}] }
word { identifierChar (identifierChar | @digit)* }
identifier { word }
Arrow {
"->" |
"-->" |
"->>" |
"-->>" |
"-x" |
"--x" |
"-)" |
"--)"
}
@precedence {
newlines,
spaces,
Arrow,
identifier
}
}
@external propSource sequenceHighlighting from "./highlight"

View File

@@ -0,0 +1,26 @@
export declare const _break: number;
export declare const _else: number;
export declare const Activate: number;
export declare const Actor: number;
export declare const alt: number;
export declare const and: number;
export declare const As: number;
export declare const Autonumber: number;
export declare const box: number;
export declare const Create: number;
export declare const critical: number;
export declare const Deactivate: number;
export declare const Destroy: number;
export declare const End: number;
export declare const link: number;
export declare const links: number;
export declare const loop: number;
export declare const messageText: number;
export declare const NodeText: number;
export declare const Note: number;
export declare const opt: number;
export declare const option: number;
export declare const par: number;
export declare const Participant: number;
export declare const Position: number;
export declare const rect: number;

View File

@@ -0,0 +1,37 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
export const
messageText = 24,
Activate = 1,
Autonumber = 2,
Create = 3,
Deactivate = 4,
Destroy = 5,
End = 6,
Note = 7,
Actor = 8,
As = 9,
Participant = 10,
NodeText = 11,
Position = 12,
alt = 25,
and = 26,
box = 27,
_break = 28,
critical = 29,
_else = 30,
link = 31,
links = 32,
loop = 33,
opt = 34,
option = 35,
par = 36,
rect = 37,
SequenceDiagram = 13,
DiagramName = 14,
LineComment = 15,
Arrow = 16,
ArrowSuffix = 17,
MessageText1 = 18,
Keyword = 19,
MessageText2 = 20,
Link = 21

View File

@@ -0,0 +1,270 @@
import { describe, it, expect } from 'vitest';
import { parser } from './sequence.parser.grammar';
/**
* Sequence Diagram Grammar 测试
*
* 测试目标:验证标准的 Mermaid Sequence Diagram 语法是否能正确解析,不应该出现错误节点(⚠)
*/
describe('Sequence Diagram Grammar 解析测试', () => {
/**
* 辅助函数:解析代码并返回语法树
*/
function parseCode(code: string) {
const tree = parser.parse(code);
return tree;
}
/**
* 辅助函数:检查语法树中是否有错误节点
*/
function hasErrorNodes(tree: any): { hasError: boolean; errors: Array<{ name: string; from: number; to: number; text: string }> } {
const errors: Array<{ name: string; from: number; to: number; text: string }> = [];
tree.iterate({
enter: (node: any) => {
if (node.name === '⚠') {
errors.push({
name: node.name,
from: node.from,
to: node.to,
text: tree.toString().substring(node.from, node.to)
});
}
}
});
return {
hasError: errors.length > 0,
errors
};
}
/**
* 辅助函数:打印语法树结构(用于调试)
*/
function printTree(tree: any, code: string, maxDepth = 5) {
const lines: string[] = [];
tree.iterate({
enter: (node: any) => {
const depth = getNodeDepth(tree, node);
if (depth > maxDepth) return false; // 限制深度
const indent = ' '.repeat(depth);
const text = code.substring(node.from, Math.min(node.to, node.from + 30));
const displayText = text.length === 30 ? text + '...' : text;
lines.push(`${indent}${node.name} [${node.from}-${node.to}]: "${displayText.replace(/\n/g, '\\n')}"`);
}
});
return lines.join('\n');
}
/**
* 获取节点深度
*/
function getNodeDepth(tree: any, targetNode: any): number {
let depth = 0;
let current = targetNode;
while (current.parent) {
depth++;
current = current.parent;
}
return depth;
}
it('应该正确解析基础的 sequenceDiagram 声明', () => {
const code = `sequenceDiagram
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带 participant 的序列图', () => {
const code = `sequenceDiagram
participant Alice
participant Bob
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析简单的消息传递', () => {
const code = `sequenceDiagram
Alice->John: Hello John, how are you?
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带虚线箭头的消息', () => {
const code = `sequenceDiagram
Alice-->John: Hello
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带异步箭头的消息', () => {
const code = `sequenceDiagram
Alice->>John: Hello
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带激活/停用的序列图', () => {
const code = `sequenceDiagram
Alice->>John: Hello
activate John
John-->>Alice: Hi there!
deactivate John
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带 note 的序列图', () => {
const code = `sequenceDiagram
Alice->John: Hello John
Note over Alice,John: A typical interaction
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带 alt/else 的序列图', () => {
const code = `sequenceDiagram
Alice->John: Hello John
alt is sick
John-->Alice: Not so good
else is well
John-->Alice: Feeling fresh
end
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析带 loop 的序列图', () => {
const code = `sequenceDiagram
Alice->John: Hello John
loop Every minute
John-->Alice: Great!
end
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
it('应该正确解析完整的序列图示例', () => {
const code = `sequenceDiagram
participant Alice
participant Bob
Alice->>John: Hello John, how are you?
loop HealthCheck
John->>John: Fight against hypochondria
end
Note right of John: Rational thoughts!
John-->>Alice: Great!
John->>Bob: How about you?
Bob-->>John: Jolly good!
`;
const tree = parseCode(code);
const result = hasErrorNodes(tree);
if (result.hasError) {
console.log('语法树:');
console.log(printTree(tree, code));
console.log('错误节点:', result.errors);
}
expect(result.hasError).toBe(false);
});
});

View File

@@ -0,0 +1,3 @@
import { LRParser } from '@lezer/lr';
export declare const parser: LRParser;

View File

@@ -0,0 +1,24 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
import {LRParser} from "@lezer/lr"
import {messageTextToken, textTokens} from "./tokens"
import {sequenceHighlighting} from "./highlight"
const spec_identifier = {__proto__:null,sequenceDiagram:84}
export const parser = LRParser.deserialize({
version: 14,
states: "'nOVQSOOO[QSO'#DUQOQSOOOOQO'#Cj'#CjO#QQUO,59pOOQP'#Co'#CoOOQQ'#Cq'#CqOOQO'#DY'#DYO#XQUO'#DYO#gQUO'#DYO#lQUO'#DYO#wQUO'#DYO#|QUO'#DYO$RQTO'#DYO$WQUO'#DYO$]QSO1G/[O$eQYO,59tO$sQUO,59tO$xQUO,59tO%TQUO,59tOOQO,59t,59tO%YQUO,59tOOQO'#Cp'#CpO%_QSO,59tO%dQUO7+$vO%kQSO7+$vOOQQ'#Cm'#CmO%sQSO1G/`O%xQUO1G/`O%}QUO1G/`OOQO1G/`1G/`O&VQUO1G/`O&[QUO1G/`O&gQSO1G/`O&oQTO1G/`OOQO,59^,59^O&tQUO<<HbOOQO-E6p-E6pO&oQTO7+$zO&{QSO7+$zO'QQUO7+$zOOQO7+$z7+$zO'VQUO7+$zOOQO'#Cn'#CnPdQUO'#CrOOQO<<Hf<<HfO&oQTO<<HfO'[QSO<<HfOOQOAN>QAN>QO&oQTOAN>QOOQOG23lG23l",
stateData: "'g~OwOS~OzRO~O{SOgxX~OPZOQVORYOSZOTYOUVOV[OWXOYXOZWO_VOiTOjTOkTOlTOmTOnTOoUOpUOqTOrTOsTOtTOuTO~Ogxa~PdOXaO``Og|X{|X~OZbO~OWcOYcOZbO~OZdO~O[eO~OhfO~OZgO~O{hOgxi~OZkO{lO}jO!OjO~OZnO~OXoOg|a{|a~OZpO~OZqO~O!PrO~Ogxq~PdO{tOgxq~O!PvO~OZwO~OZwO{xO~OZyO~OXzOg|i{|i~O!PvO!QxO~Oh{O~Ogxy~PdO!P!OO~OZ!PO~OZ}O~O!P!RO~O{w`y`~",
goto: "#S}PPPPPPPPPPPPPP!OPP!R!U!b!h!k!qPPPPPPPPPPPPPPPPP!wPPP!zRPORm`QyrQ}vQ!Q!OR!S!RX]Sht|Rd]X^Sht|Qi_RuiRQOQ_SVsht|",
nodeNames: "⚠ Activate Autonumber Create Deactivate Destroy End Note Actor As Participant NodeText Position SequenceDiagram DiagramName LineComment Arrow ArrowSuffix MessageText1 Keyword MessageText2 Link",
maxTerm: 48,
nodeProps: [
["group", -9,1,2,3,4,5,6,7,19,21,"Keyword1",-3,8,9,10,"Keyword2"]
],
propSources: [sequenceHighlighting],
skippedNodes: [0],
repeatNodeCount: 1,
tokenData: "(x~RmXY!|YZ#qZ^!|pq!|tu$nuv%`{|%}|}&S}!O&X![!]'T!c!}$n#T#o$n#y#z!|$f$g!|$g#BY$n#BY#BZ'Y#BZ$IS$n$IS$I_'Y$I_$I|$n$I|$JO'Y$JO$JT$n$JT$JU'Y$JU$KV$n$KV$KW'Y$KW&FU$n&FU&FV'Y&FV;'S$n;'S;=`%Y<%lO$n~#RYw~X^!|pq!|#y#z!|$f$g!|#BY#BZ!|$IS$I_!|$I|$JO!|$JT$JU!|$KV$KW!|&FU&FV!|~#x[{~w~XY!|YZ#qZ^!|pq!|#y#z!|$f$g!|#BY#BZ!|$IS$I_!|$I|$JO!|$JT$JU!|$KV$KW!|&FU&FV!|~$sVy~tu$n!Q![$n!c!}$n#T#o$n$g;'S$n;'S;=`%Y<%lO$n~%]P;=`<%l$n~%cPuv%f~%kS_~OY%fZ;'S%f;'S;=`%w<%lO%f~%zP;=`<%l%f~&SO}~~&XO!Q~R&^S!OQyz&j}!O&o!`!a&{#l#m&jP&oO`PP&rRyz&j!`!a&{#l#m&jP'QP`P!`!a&j~'YO!P~~'agw~y~X^!|pq!|tu$n!Q![$n!c!}$n#T#o$n#y#z!|$f$g!|$g#BY$n#BY#BZ'Y#BZ$IS$n$IS$I_'Y$I_$I|$n$I|$JO'Y$JO$JT$n$JT$JU'Y$JU$KV$n$KV$KW'Y$KW&FU$n&FU&FV'Y&FV;'S$n;'S;=`%Y<%lO$n",
tokenizers: [messageTextToken, textTokens, 0, 1],
topRules: {"SequenceDiagram":[0,13]},
specialized: [{term: 41, get: (value: keyof typeof spec_identifier) => spec_identifier[value] || -1}],
tokenPrec: 293
})

View File

@@ -0,0 +1,129 @@
import { ExternalTokenizer } from '@lezer/lr';
import {
_break,
_else,
Activate,
Actor,
alt,
and,
As,
Autonumber,
box,
Create,
critical,
Deactivate,
Destroy,
End,
link,
links,
loop,
messageText,
NodeText,
Note,
opt,
option,
par,
Participant,
Position,
rect,
} from './sequence.grammar.terms';
const skipCodePoints = [-1, 9, 10, 13, 32, 37];
const arrowSuffixCodePoints = [43, 45];
const notAllowedCodePoints = [44, 58, 62];
const notAllowed2Chars = ['->', '-x', '-)', ' -', ' '];
const notAllowed3Chars = ['-->', '->>', '--x', '--)', ' as'];
const keywordMap: { [key: string]: number } = {
'left of': Position,
'right of': Position,
activate: Activate,
actor: Actor,
alt: alt,
and: and,
as: As,
autonumber: Autonumber,
box: box,
break: _break,
create: Create,
critical: critical,
deactivate: Deactivate,
destroy: Destroy,
else: _else,
end: End,
link: link,
links: links,
loop: loop,
note: Note,
opt: opt,
option: option,
over: Position,
par: par,
participant: Participant,
rect: rect,
};
const keywords = Object.keys(keywordMap);
export const messageTextToken = new ExternalTokenizer((input) => {
if (skipCodePoints.includes(input.next)) return;
while (input.next !== 10 && input.next !== -1) {
input.advance();
}
input.acceptToken(messageText);
});
export const textTokens = new ExternalTokenizer((input) => {
if (
skipCodePoints.includes(input.next) ||
arrowSuffixCodePoints.includes(input.next)
)
return;
const isArrowNext = () => {
if (input.peek(0) === -1 || input.peek(1) === -1 || input.peek(2) === -1)
return false;
let result =
String.fromCodePoint(input.peek(0)) + String.fromCodePoint(input.peek(1));
if (notAllowed2Chars.includes(result)) return true;
result += String.fromCodePoint(input.peek(2));
if (notAllowed3Chars.includes(result)) return true;
return false;
};
let tokens = '';
while (
!notAllowedCodePoints.includes(input.next) &&
!isArrowNext() &&
input.next !== 10 &&
input.next !== -1
) {
tokens += String.fromCodePoint(input.next);
input.advance();
}
const activeKeyword = keywords.filter((keyword) => {
if (keyword === tokens) {
return tokens.toLowerCase().startsWith(keyword);
}
return tokens.toLowerCase().startsWith(keyword + ' ');
});
if (activeKeyword.length > 0) {
input.acceptToken(
keywordMap[activeKeyword[0]],
activeKeyword[0].length - tokens.length
);
return;
}
input.acceptToken(NodeText);
});

View File

@@ -0,0 +1,76 @@
import { Tag, tags as t } from '@lezer/highlight';
export const mermaidTags = {
diagramName: Tag.define(t.typeName),
};
export const mindmapTags = {
diagramName: Tag.define(mermaidTags.diagramName),
lineText1: Tag.define(),
lineText2: Tag.define(),
lineText3: Tag.define(),
lineText4: Tag.define(),
lineText5: Tag.define(),
};
export const pieTags = {
diagramName: Tag.define(mermaidTags.diagramName),
lineComment: Tag.define(t.lineComment),
number: Tag.define(t.number),
showData: Tag.define(t.keyword),
string: Tag.define(t.string),
title: Tag.define(t.keyword),
titleText: Tag.define(t.string),
};
export const flowchartTags = {
diagramName: Tag.define(mermaidTags.diagramName),
keyword: Tag.define(t.keyword),
lineComment: Tag.define(t.lineComment),
link: Tag.define(t.contentSeparator),
nodeEdge: Tag.define(t.contentSeparator),
nodeEdgeText: Tag.define(t.string),
nodeId: Tag.define(t.variableName),
nodeText: Tag.define(t.string),
number: Tag.define(t.number),
orientation: Tag.define(t.modifier),
string: Tag.define(t.string),
};
export const sequenceTags = {
diagramName: Tag.define(mermaidTags.diagramName),
arrow: Tag.define(t.contentSeparator),
keyword1: Tag.define(t.keyword),
keyword2: Tag.define(t.controlKeyword),
lineComment: Tag.define(t.lineComment),
messageText1: Tag.define(t.string),
messageText2: Tag.define(t.content),
nodeText: Tag.define(t.variableName),
position: Tag.define(t.modifier),
};
export const journeyTags = {
diagramName: Tag.define(mermaidTags.diagramName),
actor: Tag.define(t.variableName),
keyword: Tag.define(t.keyword),
lineComment: Tag.define(t.lineComment),
score: Tag.define(t.number),
text: Tag.define(t.string),
};
export const requirementTags = {
diagramName: Tag.define(mermaidTags.diagramName),
arrow: Tag.define(t.contentSeparator),
keyword: Tag.define(t.keyword),
lineComment: Tag.define(t.lineComment),
number: Tag.define(t.number),
quotedString: Tag.define(t.string),
unquotedString: Tag.define(t.content),
};
export const ganttTags = {
diagramName: Tag.define(mermaidTags.diagramName),
keyword: Tag.define(t.keyword),
lineComment: Tag.define(t.lineComment),
string: Tag.define(t.string),
};

View File

@@ -0,0 +1,38 @@
export enum DiagramType {
Mermaid = 'MermaidDiagram',
Mindmap = 'MindmapDiagram',
Pie = 'PieDiagram',
Flowchart = 'FlowchartDiagram',
Sequence = 'SequenceDiagram',
Journey = 'JourneyDiagram',
Requirement = 'RequirementDiagram',
Gantt = 'GanttDiagram',
}
export enum MermaidDescriptionName {
Mermaid = 'mermaid',
Mindmap = 'mindmap',
Pie = 'pie',
Flowchart = 'flowchart',
Sequence = 'sequenceDiagram',
Journey = 'journey',
Requirement = 'requirementDiagram',
Gantt = 'gantt',
}
export enum MermaidLanguageType {
Mermaid = 'mermaid',
Mindmap = 'mindmap',
Pie = 'pie',
Flowchart = 'flowchart',
Sequence = 'sequence',
Journey = 'journey',
Requirement = 'requirement',
Gantt = 'gantt',
}
export enum MermaidAlias {
Graph = 'graph',
Sequence = 'sequence',
Requirement = 'requirement',
}