OR-1 dataflow CPU sketch

test: add tests for IR-to-JSON conversion

Orual f0d21b33 5df3bf85

+404
+404
tests/test_dfgraph_json.py
··· 1 + """Tests for IR-to-JSON conversion (graph_to_json). 2 + 3 + Tests verify: 4 + - dataflow-renderer.AC1.1 (fully allocated graph): Valid dfasm renders as JSON with nodes, edges, metadata 5 + - dataflow-renderer.AC1.1 (partially resolved graph): Incomplete programs produce partial JSON with errors 6 + - Error nodes flagged: Nodes with errors marked as has_error: true 7 + - Function regions: Function regions appear in JSON with correct node_ids 8 + - Parse error case: Parse failures produce empty JSON with parse_error string 9 + """ 10 + 11 + from dfgraph.graph_json import graph_to_json 12 + from dfgraph.pipeline import run_progressive, PipelineStage 13 + from asm.ir import RegionKind 14 + 15 + 16 + class TestFullyAllocatedGraph: 17 + """Test graph_to_json on fully allocated programs (stage=allocate).""" 18 + 19 + def test_simple_const_add_chain_json(self): 20 + """AC1.1: Simple CONST→ADD chain renders with full node/edge data.""" 21 + source = """\ 22 + @system pe=2, sm=0 23 + &c1|pe0 <| const, 3 24 + &c2|pe0 <| const, 7 25 + &result|pe0 <| add 26 + &output|pe1 <| pass 27 + &c1|pe0 |> &result|pe0:L 28 + &c2|pe0 |> &result|pe0:R 29 + &result|pe0 |> &output|pe1:L 30 + """ 31 + result = run_progressive(source) 32 + json_out = graph_to_json(result) 33 + 34 + # Verify stage reached 35 + assert result.stage == PipelineStage.ALLOCATE 36 + assert json_out["stage"] == "allocate" 37 + 38 + # Verify nodes are present with all required fields 39 + assert len(json_out["nodes"]) >= 4 40 + node_ids = {n["id"] for n in json_out["nodes"]} 41 + assert "&c1" in node_ids 42 + assert "&c2" in node_ids 43 + assert "&result" in node_ids 44 + assert "&output" in node_ids 45 + 46 + # Check a node has full allocation data 47 + result_node = next(n for n in json_out["nodes"] if n["id"] == "&result") 48 + assert result_node["opcode"] == "add" 49 + assert result_node["category"] == "arithmetic" 50 + assert result_node["pe"] == 0 51 + assert result_node["iram_offset"] is not None 52 + assert result_node["ctx"] is not None 53 + assert result_node["has_error"] is False 54 + 55 + # Verify edges are present 56 + assert len(json_out["edges"]) >= 3 57 + edges = json_out["edges"] 58 + assert any(e["source"] == "&c1" and e["target"] == "&result" for e in edges) 59 + assert any(e["source"] == "&result" and e["target"] == "&output" for e in edges) 60 + 61 + # Verify metadata 62 + assert json_out["metadata"]["pe_count"] == 2 63 + assert json_out["metadata"]["sm_count"] == 0 64 + assert json_out["parse_error"] is None 65 + assert len(json_out["errors"]) == 0 66 + 67 + def test_sm_program_json(self): 68 + """AC1.1: SM operations include sm_id in nodes.""" 69 + source = """\ 70 + @system pe=2, sm=1 71 + &trigger|pe0 <| const, 1 72 + &reader|pe0 <| read 73 + &relay|pe1 <| pass 74 + &trigger|pe0 |> &reader|pe0:L 75 + &reader|pe0 |> &relay|pe1:L 76 + """ 77 + result = run_progressive(source) 78 + json_out = graph_to_json(result) 79 + 80 + assert result.stage == PipelineStage.ALLOCATE 81 + assert json_out["stage"] == "allocate" 82 + 83 + # Check nodes 84 + reader_node = next(n for n in json_out["nodes"] if n["id"] == "&reader") 85 + assert reader_node["opcode"] == "read" 86 + assert reader_node["category"] == "memory" 87 + assert reader_node["pe"] == 0 88 + assert reader_node["iram_offset"] is not None 89 + 90 + # Check metadata reflects SM 91 + assert json_out["metadata"]["sm_count"] == 1 92 + 93 + def test_edge_port_serialization(self): 94 + """Edges include source/target ports correctly.""" 95 + source = """\ 96 + @system pe=1, sm=0 97 + &a|pe0 <| const, 5 98 + &b|pe0 <| add 99 + &c|pe0 <| pass 100 + &a|pe0 |> &b|pe0:L 101 + &b|pe0 |> &c|pe0:R 102 + """ 103 + result = run_progressive(source) 104 + json_out = graph_to_json(result) 105 + 106 + # Find the edge from a to b 107 + edge_ab = next(e for e in json_out["edges"] if e["source"] == "&a" and e["target"] == "&b") 108 + assert edge_ab["port"] == "L" # Destination input port 109 + 110 + # Find the edge from b to c 111 + edge_bc = next(e for e in json_out["edges"] if e["source"] == "&b" and e["target"] == "&c") 112 + assert edge_bc["port"] == "R" # Destination input port 113 + 114 + 115 + class TestPartiallyResolvedGraph: 116 + """Test graph_to_json on graphs with errors (stage=resolve or lower).""" 117 + 118 + def test_undefined_reference_json(self): 119 + """AC1.1: Undefined reference creates error but partial graph still renders.""" 120 + source = """\ 121 + @system pe=1, sm=0 122 + &a <| const, 5 123 + &b <| add 124 + &a |> &b:L 125 + &b |> &undefined:R 126 + """ 127 + result = run_progressive(source) 128 + json_out = graph_to_json(result) 129 + 130 + # Should stop at resolve (error prevents further stages) 131 + assert result.stage == PipelineStage.RESOLVE 132 + assert json_out["stage"] == "resolve" 133 + 134 + # But we still get nodes that were lowered 135 + node_ids = {n["id"] for n in json_out["nodes"]} 136 + assert "&a" in node_ids 137 + assert "&b" in node_ids 138 + 139 + # Nodes should not have allocation data (stopped at resolve) 140 + # Note: pe is set by explicit placement, iram_offset and ctx are set by allocate 141 + for node in json_out["nodes"]: 142 + assert node["iram_offset"] is None 143 + assert node["ctx"] is None 144 + 145 + # Error list should be populated 146 + assert len(json_out["errors"]) > 0 147 + assert any("undefined" in e["message"].lower() for e in json_out["errors"]) 148 + 149 + def test_error_nodes_flagged(self): 150 + """Nodes on error lines are flagged with has_error: true.""" 151 + source = """\ 152 + @system pe=1, sm=0 153 + &a <| const, 5 154 + &b <| add 155 + &a |> &b:L 156 + &b |> &undefined:R 157 + """ 158 + result = run_progressive(source) 159 + json_out = graph_to_json(result) 160 + 161 + # The edge referencing undefined will have an error 162 + # which means the destination node (if it exists) or source gets flagged 163 + assert len(json_out["errors"]) > 0 164 + error_line = json_out["errors"][0]["line"] 165 + 166 + # Check if any node on this error line is flagged 167 + # (The edge error is at the line with &b |> &undefined...) 168 + error_flagged = any(n["has_error"] for n in json_out["nodes"]) 169 + assert error_flagged or any(e["has_error"] for e in json_out["edges"]) 170 + 171 + 172 + class TestFunctionRegions: 173 + """Test function region serialization.""" 174 + 175 + def test_function_regions_json(self): 176 + """Function regions appear in JSON with correct tag, kind, and node_ids.""" 177 + source = """\ 178 + @system pe=2, sm=0 179 + 180 + $func1 |> { 181 + &a|pe0 <| const, 1 182 + &b|pe0 <| add 183 + &a|pe0 |> &b|pe0:L 184 + } 185 + 186 + $func2 |> { 187 + &c|pe1 <| const, 2 188 + } 189 + """ 190 + result = run_progressive(source) 191 + json_out = graph_to_json(result) 192 + 193 + # Check regions 194 + regions = json_out["regions"] 195 + assert len(regions) >= 2 196 + 197 + # Find function regions by tag 198 + func1_region = next((r for r in regions if r["tag"] == "$func1"), None) 199 + func2_region = next((r for r in regions if r["tag"] == "$func2"), None) 200 + 201 + assert func1_region is not None 202 + assert func1_region["kind"] == "function" 203 + assert set(func1_region["node_ids"]) == {"$func1.&a", "$func1.&b"} 204 + 205 + assert func2_region is not None 206 + assert func2_region["kind"] == "function" 207 + assert set(func2_region["node_ids"]) == {"$func2.&c"} 208 + 209 + 210 + class TestParseErrorCase: 211 + """Test graph_to_json on parse failures.""" 212 + 213 + def test_parse_error_json(self): 214 + """Parse error produces empty JSON with parse_error string.""" 215 + source = """\ 216 + @system pe=1, sm=0 217 + &invalid syntax [[[ 218 + """ 219 + result = run_progressive(source) 220 + json_out = graph_to_json(result) 221 + 222 + # Should be in parse error stage 223 + assert result.stage == PipelineStage.PARSE_ERROR 224 + assert json_out["stage"] == "parse_error" 225 + 226 + # Should have no nodes/edges 227 + assert json_out["nodes"] == [] 228 + assert json_out["edges"] == [] 229 + assert json_out["regions"] == [] 230 + 231 + # Should have parse_error string 232 + assert json_out["parse_error"] is not None 233 + assert len(json_out["parse_error"]) > 0 234 + 235 + # metadata should have zeros 236 + assert json_out["metadata"]["pe_count"] == 0 237 + assert json_out["metadata"]["sm_count"] == 0 238 + 239 + 240 + class TestJsonStructure: 241 + """Test JSON output structure compliance.""" 242 + 243 + def test_node_structure(self): 244 + """Each node has all required fields.""" 245 + source = """\ 246 + @system pe=1, sm=0 247 + &a|pe0 <| const, 5 248 + """ 249 + result = run_progressive(source) 250 + json_out = graph_to_json(result) 251 + 252 + node = json_out["nodes"][0] 253 + required_fields = {"id", "opcode", "category", "colour", "const", "pe", 254 + "iram_offset", "ctx", "has_error", "loc"} 255 + assert set(node.keys()) >= required_fields 256 + 257 + # Location should have required fields 258 + loc = node["loc"] 259 + assert "line" in loc 260 + assert "column" in loc 261 + 262 + def test_edge_structure(self): 263 + """Each edge has all required fields.""" 264 + source = """\ 265 + @system pe=1, sm=0 266 + &a|pe0 <| const, 5 267 + &b|pe0 <| add 268 + &a|pe0 |> &b|pe0:L 269 + """ 270 + result = run_progressive(source) 271 + json_out = graph_to_json(result) 272 + 273 + edge = json_out["edges"][0] 274 + required_fields = {"source", "target", "port", "source_port", "has_error"} 275 + assert set(edge.keys()) >= required_fields 276 + 277 + def test_error_structure(self): 278 + """Each error has required fields.""" 279 + source = """\ 280 + @system pe=1, sm=0 281 + &a|pe0 <| const, 5 282 + &b|pe0 |> &undefined|pe0:L 283 + """ 284 + result = run_progressive(source) 285 + json_out = graph_to_json(result) 286 + 287 + if len(json_out["errors"]) > 0: 288 + error = json_out["errors"][0] 289 + required_fields = {"line", "column", "category", "message", "suggestions"} 290 + assert set(error.keys()) >= required_fields 291 + 292 + def test_metadata_structure(self): 293 + """Metadata has all required fields.""" 294 + source = "@system pe=2, sm=1" 295 + result = run_progressive(source) 296 + json_out = graph_to_json(result) 297 + 298 + metadata = json_out["metadata"] 299 + required_fields = {"stage", "pe_count", "sm_count"} 300 + assert set(metadata.keys()) >= required_fields 301 + assert metadata["pe_count"] == 2 302 + assert metadata["sm_count"] == 1 303 + 304 + def test_top_level_structure(self): 305 + """Top-level JSON has all required fields.""" 306 + source = "@system pe=1, sm=0" 307 + result = run_progressive(source) 308 + json_out = graph_to_json(result) 309 + 310 + required_fields = {"type", "stage", "nodes", "edges", "regions", 311 + "errors", "parse_error", "metadata"} 312 + assert set(json_out.keys()) >= required_fields 313 + assert json_out["type"] == "graph_update" 314 + 315 + 316 + class TestEmptyProgram: 317 + """Test minimal/empty programs.""" 318 + 319 + def test_system_only_json(self): 320 + """Program with only @system pragma produces valid JSON.""" 321 + source = "@system pe=2, sm=1" 322 + result = run_progressive(source) 323 + json_out = graph_to_json(result) 324 + 325 + assert json_out["stage"] == "allocate" 326 + assert json_out["nodes"] == [] 327 + assert json_out["edges"] == [] 328 + assert json_out["metadata"]["pe_count"] == 2 329 + assert json_out["metadata"]["sm_count"] == 1 330 + 331 + 332 + class TestColourMapping: 333 + """Test opcode-to-colour mapping.""" 334 + 335 + def test_arithmetic_colour(self): 336 + """Arithmetic ops get correct colour.""" 337 + source = """\ 338 + @system pe=1, sm=0 339 + &add_node|pe0 <| add 340 + """ 341 + result = run_progressive(source) 342 + json_out = graph_to_json(result) 343 + 344 + node = next(n for n in json_out["nodes"] if n["id"] == "&add_node") 345 + assert node["category"] == "arithmetic" 346 + assert node["colour"] == "#4a90d9" # arithmetic blue 347 + 348 + def test_memory_colour(self): 349 + """Memory ops get correct colour.""" 350 + source = """\ 351 + @system pe=1, sm=0 352 + &read_node|pe0 <| read 353 + """ 354 + result = run_progressive(source) 355 + json_out = graph_to_json(result) 356 + 357 + node = next(n for n in json_out["nodes"] if n["id"] == "&read_node") 358 + assert node["category"] == "memory" 359 + assert node["colour"] == "#ff5722" # memory red 360 + 361 + def test_routing_colour(self): 362 + """Routing ops get correct colour.""" 363 + source = """\ 364 + @system pe=1, sm=0 365 + &pass_node|pe0 <| pass 366 + """ 367 + result = run_progressive(source) 368 + json_out = graph_to_json(result) 369 + 370 + node = next(n for n in json_out["nodes"] if n["id"] == "&pass_node") 371 + assert node["category"] == "routing" 372 + assert node["colour"] == "#9c27b0" # routing purple 373 + 374 + 375 + class TestMultipleRegions: 376 + """Test handling of nested and multiple regions.""" 377 + 378 + def test_multiple_functions_and_locations(self): 379 + """Multiple function and location regions handled correctly.""" 380 + source = """\ 381 + @system pe=2, sm=0 382 + 383 + $func1 |> { 384 + &a|pe0 <| const, 1 385 + } 386 + 387 + @loc1 388 + 389 + $func2 |> { 390 + &c|pe0 <| add 391 + } 392 + """ 393 + result = run_progressive(source) 394 + json_out = graph_to_json(result) 395 + 396 + # Only FUNCTION regions should appear in output 397 + regions = json_out["regions"] 398 + function_regions = [r for r in regions if r["kind"] == "function"] 399 + 400 + # Should have 2 function regions 401 + assert len(function_regions) == 2 402 + tags = {r["tag"] for r in function_regions} 403 + assert "$func1" in tags 404 + assert "$func2" in tags