tangled
alpha
login
or
join now
nonbinary.computer
/
or1-design
0
fork
atom
OR-1 dataflow CPU sketch
0
fork
atom
overview
issues
pulls
pipelines
test: add tests for IR-to-JSON conversion
Orual
2 weeks ago
f0d21b33
5df3bf85
+404
1 changed file
expand all
collapse all
unified
split
tests
test_dfgraph_json.py
+404
tests/test_dfgraph_json.py
···
1
1
+
"""Tests for IR-to-JSON conversion (graph_to_json).
2
2
+
3
3
+
Tests verify:
4
4
+
- dataflow-renderer.AC1.1 (fully allocated graph): Valid dfasm renders as JSON with nodes, edges, metadata
5
5
+
- dataflow-renderer.AC1.1 (partially resolved graph): Incomplete programs produce partial JSON with errors
6
6
+
- Error nodes flagged: Nodes with errors marked as has_error: true
7
7
+
- Function regions: Function regions appear in JSON with correct node_ids
8
8
+
- Parse error case: Parse failures produce empty JSON with parse_error string
9
9
+
"""
10
10
+
11
11
+
from dfgraph.graph_json import graph_to_json
12
12
+
from dfgraph.pipeline import run_progressive, PipelineStage
13
13
+
from asm.ir import RegionKind
14
14
+
15
15
+
16
16
+
class TestFullyAllocatedGraph:
17
17
+
"""Test graph_to_json on fully allocated programs (stage=allocate)."""
18
18
+
19
19
+
def test_simple_const_add_chain_json(self):
20
20
+
"""AC1.1: Simple CONST→ADD chain renders with full node/edge data."""
21
21
+
source = """\
22
22
+
@system pe=2, sm=0
23
23
+
&c1|pe0 <| const, 3
24
24
+
&c2|pe0 <| const, 7
25
25
+
&result|pe0 <| add
26
26
+
&output|pe1 <| pass
27
27
+
&c1|pe0 |> &result|pe0:L
28
28
+
&c2|pe0 |> &result|pe0:R
29
29
+
&result|pe0 |> &output|pe1:L
30
30
+
"""
31
31
+
result = run_progressive(source)
32
32
+
json_out = graph_to_json(result)
33
33
+
34
34
+
# Verify stage reached
35
35
+
assert result.stage == PipelineStage.ALLOCATE
36
36
+
assert json_out["stage"] == "allocate"
37
37
+
38
38
+
# Verify nodes are present with all required fields
39
39
+
assert len(json_out["nodes"]) >= 4
40
40
+
node_ids = {n["id"] for n in json_out["nodes"]}
41
41
+
assert "&c1" in node_ids
42
42
+
assert "&c2" in node_ids
43
43
+
assert "&result" in node_ids
44
44
+
assert "&output" in node_ids
45
45
+
46
46
+
# Check a node has full allocation data
47
47
+
result_node = next(n for n in json_out["nodes"] if n["id"] == "&result")
48
48
+
assert result_node["opcode"] == "add"
49
49
+
assert result_node["category"] == "arithmetic"
50
50
+
assert result_node["pe"] == 0
51
51
+
assert result_node["iram_offset"] is not None
52
52
+
assert result_node["ctx"] is not None
53
53
+
assert result_node["has_error"] is False
54
54
+
55
55
+
# Verify edges are present
56
56
+
assert len(json_out["edges"]) >= 3
57
57
+
edges = json_out["edges"]
58
58
+
assert any(e["source"] == "&c1" and e["target"] == "&result" for e in edges)
59
59
+
assert any(e["source"] == "&result" and e["target"] == "&output" for e in edges)
60
60
+
61
61
+
# Verify metadata
62
62
+
assert json_out["metadata"]["pe_count"] == 2
63
63
+
assert json_out["metadata"]["sm_count"] == 0
64
64
+
assert json_out["parse_error"] is None
65
65
+
assert len(json_out["errors"]) == 0
66
66
+
67
67
+
def test_sm_program_json(self):
68
68
+
"""AC1.1: SM operations include sm_id in nodes."""
69
69
+
source = """\
70
70
+
@system pe=2, sm=1
71
71
+
&trigger|pe0 <| const, 1
72
72
+
&reader|pe0 <| read
73
73
+
&relay|pe1 <| pass
74
74
+
&trigger|pe0 |> &reader|pe0:L
75
75
+
&reader|pe0 |> &relay|pe1:L
76
76
+
"""
77
77
+
result = run_progressive(source)
78
78
+
json_out = graph_to_json(result)
79
79
+
80
80
+
assert result.stage == PipelineStage.ALLOCATE
81
81
+
assert json_out["stage"] == "allocate"
82
82
+
83
83
+
# Check nodes
84
84
+
reader_node = next(n for n in json_out["nodes"] if n["id"] == "&reader")
85
85
+
assert reader_node["opcode"] == "read"
86
86
+
assert reader_node["category"] == "memory"
87
87
+
assert reader_node["pe"] == 0
88
88
+
assert reader_node["iram_offset"] is not None
89
89
+
90
90
+
# Check metadata reflects SM
91
91
+
assert json_out["metadata"]["sm_count"] == 1
92
92
+
93
93
+
def test_edge_port_serialization(self):
94
94
+
"""Edges include source/target ports correctly."""
95
95
+
source = """\
96
96
+
@system pe=1, sm=0
97
97
+
&a|pe0 <| const, 5
98
98
+
&b|pe0 <| add
99
99
+
&c|pe0 <| pass
100
100
+
&a|pe0 |> &b|pe0:L
101
101
+
&b|pe0 |> &c|pe0:R
102
102
+
"""
103
103
+
result = run_progressive(source)
104
104
+
json_out = graph_to_json(result)
105
105
+
106
106
+
# Find the edge from a to b
107
107
+
edge_ab = next(e for e in json_out["edges"] if e["source"] == "&a" and e["target"] == "&b")
108
108
+
assert edge_ab["port"] == "L" # Destination input port
109
109
+
110
110
+
# Find the edge from b to c
111
111
+
edge_bc = next(e for e in json_out["edges"] if e["source"] == "&b" and e["target"] == "&c")
112
112
+
assert edge_bc["port"] == "R" # Destination input port
113
113
+
114
114
+
115
115
+
class TestPartiallyResolvedGraph:
116
116
+
"""Test graph_to_json on graphs with errors (stage=resolve or lower)."""
117
117
+
118
118
+
def test_undefined_reference_json(self):
119
119
+
"""AC1.1: Undefined reference creates error but partial graph still renders."""
120
120
+
source = """\
121
121
+
@system pe=1, sm=0
122
122
+
&a <| const, 5
123
123
+
&b <| add
124
124
+
&a |> &b:L
125
125
+
&b |> &undefined:R
126
126
+
"""
127
127
+
result = run_progressive(source)
128
128
+
json_out = graph_to_json(result)
129
129
+
130
130
+
# Should stop at resolve (error prevents further stages)
131
131
+
assert result.stage == PipelineStage.RESOLVE
132
132
+
assert json_out["stage"] == "resolve"
133
133
+
134
134
+
# But we still get nodes that were lowered
135
135
+
node_ids = {n["id"] for n in json_out["nodes"]}
136
136
+
assert "&a" in node_ids
137
137
+
assert "&b" in node_ids
138
138
+
139
139
+
# Nodes should not have allocation data (stopped at resolve)
140
140
+
# Note: pe is set by explicit placement, iram_offset and ctx are set by allocate
141
141
+
for node in json_out["nodes"]:
142
142
+
assert node["iram_offset"] is None
143
143
+
assert node["ctx"] is None
144
144
+
145
145
+
# Error list should be populated
146
146
+
assert len(json_out["errors"]) > 0
147
147
+
assert any("undefined" in e["message"].lower() for e in json_out["errors"])
148
148
+
149
149
+
def test_error_nodes_flagged(self):
150
150
+
"""Nodes on error lines are flagged with has_error: true."""
151
151
+
source = """\
152
152
+
@system pe=1, sm=0
153
153
+
&a <| const, 5
154
154
+
&b <| add
155
155
+
&a |> &b:L
156
156
+
&b |> &undefined:R
157
157
+
"""
158
158
+
result = run_progressive(source)
159
159
+
json_out = graph_to_json(result)
160
160
+
161
161
+
# The edge referencing undefined will have an error
162
162
+
# which means the destination node (if it exists) or source gets flagged
163
163
+
assert len(json_out["errors"]) > 0
164
164
+
error_line = json_out["errors"][0]["line"]
165
165
+
166
166
+
# Check if any node on this error line is flagged
167
167
+
# (The edge error is at the line with &b |> &undefined...)
168
168
+
error_flagged = any(n["has_error"] for n in json_out["nodes"])
169
169
+
assert error_flagged or any(e["has_error"] for e in json_out["edges"])
170
170
+
171
171
+
172
172
+
class TestFunctionRegions:
173
173
+
"""Test function region serialization."""
174
174
+
175
175
+
def test_function_regions_json(self):
176
176
+
"""Function regions appear in JSON with correct tag, kind, and node_ids."""
177
177
+
source = """\
178
178
+
@system pe=2, sm=0
179
179
+
180
180
+
$func1 |> {
181
181
+
&a|pe0 <| const, 1
182
182
+
&b|pe0 <| add
183
183
+
&a|pe0 |> &b|pe0:L
184
184
+
}
185
185
+
186
186
+
$func2 |> {
187
187
+
&c|pe1 <| const, 2
188
188
+
}
189
189
+
"""
190
190
+
result = run_progressive(source)
191
191
+
json_out = graph_to_json(result)
192
192
+
193
193
+
# Check regions
194
194
+
regions = json_out["regions"]
195
195
+
assert len(regions) >= 2
196
196
+
197
197
+
# Find function regions by tag
198
198
+
func1_region = next((r for r in regions if r["tag"] == "$func1"), None)
199
199
+
func2_region = next((r for r in regions if r["tag"] == "$func2"), None)
200
200
+
201
201
+
assert func1_region is not None
202
202
+
assert func1_region["kind"] == "function"
203
203
+
assert set(func1_region["node_ids"]) == {"$func1.&a", "$func1.&b"}
204
204
+
205
205
+
assert func2_region is not None
206
206
+
assert func2_region["kind"] == "function"
207
207
+
assert set(func2_region["node_ids"]) == {"$func2.&c"}
208
208
+
209
209
+
210
210
+
class TestParseErrorCase:
211
211
+
"""Test graph_to_json on parse failures."""
212
212
+
213
213
+
def test_parse_error_json(self):
214
214
+
"""Parse error produces empty JSON with parse_error string."""
215
215
+
source = """\
216
216
+
@system pe=1, sm=0
217
217
+
&invalid syntax [[[
218
218
+
"""
219
219
+
result = run_progressive(source)
220
220
+
json_out = graph_to_json(result)
221
221
+
222
222
+
# Should be in parse error stage
223
223
+
assert result.stage == PipelineStage.PARSE_ERROR
224
224
+
assert json_out["stage"] == "parse_error"
225
225
+
226
226
+
# Should have no nodes/edges
227
227
+
assert json_out["nodes"] == []
228
228
+
assert json_out["edges"] == []
229
229
+
assert json_out["regions"] == []
230
230
+
231
231
+
# Should have parse_error string
232
232
+
assert json_out["parse_error"] is not None
233
233
+
assert len(json_out["parse_error"]) > 0
234
234
+
235
235
+
# metadata should have zeros
236
236
+
assert json_out["metadata"]["pe_count"] == 0
237
237
+
assert json_out["metadata"]["sm_count"] == 0
238
238
+
239
239
+
240
240
+
class TestJsonStructure:
241
241
+
"""Test JSON output structure compliance."""
242
242
+
243
243
+
def test_node_structure(self):
244
244
+
"""Each node has all required fields."""
245
245
+
source = """\
246
246
+
@system pe=1, sm=0
247
247
+
&a|pe0 <| const, 5
248
248
+
"""
249
249
+
result = run_progressive(source)
250
250
+
json_out = graph_to_json(result)
251
251
+
252
252
+
node = json_out["nodes"][0]
253
253
+
required_fields = {"id", "opcode", "category", "colour", "const", "pe",
254
254
+
"iram_offset", "ctx", "has_error", "loc"}
255
255
+
assert set(node.keys()) >= required_fields
256
256
+
257
257
+
# Location should have required fields
258
258
+
loc = node["loc"]
259
259
+
assert "line" in loc
260
260
+
assert "column" in loc
261
261
+
262
262
+
def test_edge_structure(self):
263
263
+
"""Each edge has all required fields."""
264
264
+
source = """\
265
265
+
@system pe=1, sm=0
266
266
+
&a|pe0 <| const, 5
267
267
+
&b|pe0 <| add
268
268
+
&a|pe0 |> &b|pe0:L
269
269
+
"""
270
270
+
result = run_progressive(source)
271
271
+
json_out = graph_to_json(result)
272
272
+
273
273
+
edge = json_out["edges"][0]
274
274
+
required_fields = {"source", "target", "port", "source_port", "has_error"}
275
275
+
assert set(edge.keys()) >= required_fields
276
276
+
277
277
+
def test_error_structure(self):
278
278
+
"""Each error has required fields."""
279
279
+
source = """\
280
280
+
@system pe=1, sm=0
281
281
+
&a|pe0 <| const, 5
282
282
+
&b|pe0 |> &undefined|pe0:L
283
283
+
"""
284
284
+
result = run_progressive(source)
285
285
+
json_out = graph_to_json(result)
286
286
+
287
287
+
if len(json_out["errors"]) > 0:
288
288
+
error = json_out["errors"][0]
289
289
+
required_fields = {"line", "column", "category", "message", "suggestions"}
290
290
+
assert set(error.keys()) >= required_fields
291
291
+
292
292
+
def test_metadata_structure(self):
293
293
+
"""Metadata has all required fields."""
294
294
+
source = "@system pe=2, sm=1"
295
295
+
result = run_progressive(source)
296
296
+
json_out = graph_to_json(result)
297
297
+
298
298
+
metadata = json_out["metadata"]
299
299
+
required_fields = {"stage", "pe_count", "sm_count"}
300
300
+
assert set(metadata.keys()) >= required_fields
301
301
+
assert metadata["pe_count"] == 2
302
302
+
assert metadata["sm_count"] == 1
303
303
+
304
304
+
def test_top_level_structure(self):
305
305
+
"""Top-level JSON has all required fields."""
306
306
+
source = "@system pe=1, sm=0"
307
307
+
result = run_progressive(source)
308
308
+
json_out = graph_to_json(result)
309
309
+
310
310
+
required_fields = {"type", "stage", "nodes", "edges", "regions",
311
311
+
"errors", "parse_error", "metadata"}
312
312
+
assert set(json_out.keys()) >= required_fields
313
313
+
assert json_out["type"] == "graph_update"
314
314
+
315
315
+
316
316
+
class TestEmptyProgram:
317
317
+
"""Test minimal/empty programs."""
318
318
+
319
319
+
def test_system_only_json(self):
320
320
+
"""Program with only @system pragma produces valid JSON."""
321
321
+
source = "@system pe=2, sm=1"
322
322
+
result = run_progressive(source)
323
323
+
json_out = graph_to_json(result)
324
324
+
325
325
+
assert json_out["stage"] == "allocate"
326
326
+
assert json_out["nodes"] == []
327
327
+
assert json_out["edges"] == []
328
328
+
assert json_out["metadata"]["pe_count"] == 2
329
329
+
assert json_out["metadata"]["sm_count"] == 1
330
330
+
331
331
+
332
332
+
class TestColourMapping:
333
333
+
"""Test opcode-to-colour mapping."""
334
334
+
335
335
+
def test_arithmetic_colour(self):
336
336
+
"""Arithmetic ops get correct colour."""
337
337
+
source = """\
338
338
+
@system pe=1, sm=0
339
339
+
&add_node|pe0 <| add
340
340
+
"""
341
341
+
result = run_progressive(source)
342
342
+
json_out = graph_to_json(result)
343
343
+
344
344
+
node = next(n for n in json_out["nodes"] if n["id"] == "&add_node")
345
345
+
assert node["category"] == "arithmetic"
346
346
+
assert node["colour"] == "#4a90d9" # arithmetic blue
347
347
+
348
348
+
def test_memory_colour(self):
349
349
+
"""Memory ops get correct colour."""
350
350
+
source = """\
351
351
+
@system pe=1, sm=0
352
352
+
&read_node|pe0 <| read
353
353
+
"""
354
354
+
result = run_progressive(source)
355
355
+
json_out = graph_to_json(result)
356
356
+
357
357
+
node = next(n for n in json_out["nodes"] if n["id"] == "&read_node")
358
358
+
assert node["category"] == "memory"
359
359
+
assert node["colour"] == "#ff5722" # memory red
360
360
+
361
361
+
def test_routing_colour(self):
362
362
+
"""Routing ops get correct colour."""
363
363
+
source = """\
364
364
+
@system pe=1, sm=0
365
365
+
&pass_node|pe0 <| pass
366
366
+
"""
367
367
+
result = run_progressive(source)
368
368
+
json_out = graph_to_json(result)
369
369
+
370
370
+
node = next(n for n in json_out["nodes"] if n["id"] == "&pass_node")
371
371
+
assert node["category"] == "routing"
372
372
+
assert node["colour"] == "#9c27b0" # routing purple
373
373
+
374
374
+
375
375
+
class TestMultipleRegions:
376
376
+
"""Test handling of nested and multiple regions."""
377
377
+
378
378
+
def test_multiple_functions_and_locations(self):
379
379
+
"""Multiple function and location regions handled correctly."""
380
380
+
source = """\
381
381
+
@system pe=2, sm=0
382
382
+
383
383
+
$func1 |> {
384
384
+
&a|pe0 <| const, 1
385
385
+
}
386
386
+
387
387
+
@loc1
388
388
+
389
389
+
$func2 |> {
390
390
+
&c|pe0 <| add
391
391
+
}
392
392
+
"""
393
393
+
result = run_progressive(source)
394
394
+
json_out = graph_to_json(result)
395
395
+
396
396
+
# Only FUNCTION regions should appear in output
397
397
+
regions = json_out["regions"]
398
398
+
function_regions = [r for r in regions if r["kind"] == "function"]
399
399
+
400
400
+
# Should have 2 function regions
401
401
+
assert len(function_regions) == 2
402
402
+
tags = {r["tag"] for r in function_regions}
403
403
+
assert "$func1" in tags
404
404
+
assert "$func2" in tags