A very experimental PLC implementation which uses BFT consensus for decentralization

Implement streaming export via websockets

gbl08ma.com 3a3acdb4 5ae01b9f

verified
+215 -43
+1 -1
README.md
··· 52 52 53 53 - **PLC HTTP API:** Full support for GET and POST operations (resolve DID docs, logs, audit logs, and submit new operations), with the detail that only sequence-based export pagination is supported 54 54 - didplcbft is already able to work as a decentralized, standard-compliant PLC implementation, independently from plc.directory 55 + - Supports **Websocket-based Export** equivalent to that of the official plc.directory service (described [here](https://github.com/did-method-plc/did-method-plc/pull/129)). 55 56 - **Validation:** On-chain validation of PLC operations to ensure history integrity. 56 57 - **Node-to node fast syncing:** Support for snapshot-based sync, to quickly bring new replicas online, making use of the facilities offered by CometBFT. 57 58 - A custom compact serialization format is used, able to archive/transmit the entire directory using around 30 GB of space/data transfer as of January 2026. ··· 65 66 66 67 ### Planned 67 68 68 - - **Websocket-based Export** equivalent to that of the official plc.directory service. 69 69 - **Bi-directional Sync:** submitting operations observed on the didplcbft network back to the official plc.directory, while still deferring to operations served by the latter in case of conflict. 70 70 - **Spam Prevention:** developing a non-currency-based throttling mechanism. 71 71 - For example, by gossipping hashes of IP addresses and AS numbers across the network in order to limit how quickly spammers can create new identities in the PLC. The challenge is that certain entities (e.g. Bluesky's own official PDSs) will naturally need to create many more identities than others... maybe some sort of allowlisting mechanism would need to be implemented.
+3 -1
abciapp/range_challenge.go
··· 186 186 } 187 187 defer func() { 188 188 err := c.nodeEventBus.Unsubscribe(context.Background(), subscriber, cmttypes.EventQueryNewBlockHeader) 189 - _ = err 189 + if err != nil { 190 + c.logger.Error("failed to unsubscribe from new blocks", "error", stacktrace.Propagate(err)) 191 + } 190 192 }() 191 193 192 194 for {
+193 -26
httpapi/server.go
··· 14 14 "strings" 15 15 "sync" 16 16 "sync/atomic" 17 + "syscall" 17 18 "time" 18 19 19 20 "github.com/bluesky-social/indigo/atproto/atcrypto" 21 + "github.com/coder/websocket" 22 + "github.com/coder/websocket/wsjson" 20 23 "github.com/did-method-plc/go-didplc" 21 24 "github.com/gbl08ma/stacktrace" 25 + "github.com/google/uuid" 22 26 cbornode "github.com/ipfs/go-ipld-cbor" 23 27 "github.com/rs/cors" 24 28 "github.com/samber/lo" 25 29 30 + cmtlog "github.com/cometbft/cometbft/libs/log" 31 + "github.com/cometbft/cometbft/rpc/core" 32 + cmttypes "github.com/cometbft/cometbft/types" 26 33 "tangled.org/gbl08ma.com/didplcbft/abciapp" 27 34 "tangled.org/gbl08ma.com/didplcbft/plc" 28 35 "tangled.org/gbl08ma.com/didplcbft/transaction" ··· 31 38 32 39 // Server represents the HTTP server for the PLC directory. 33 40 type Server struct { 41 + logger cmtlog.Logger 34 42 txFactory *transaction.Factory 35 43 plc plc.ReadPLC 36 44 router *http.ServeMux 37 45 mempoolSubmitter types.MempoolSubmitter 46 + nodeEventBus *cmttypes.EventBus 38 47 srv http.Server 39 48 handlerTimeout time.Duration 40 49 proto string ··· 45 54 } 46 55 47 56 // NewServer creates a new instance of the Server. 48 - func NewServer(txFactory *transaction.Factory, plc plc.ReadPLC, mempoolSubmitter types.MempoolSubmitter, listenAddr string, handlerTimeout time.Duration) (*Server, error) { 57 + func NewServer( 58 + logger cmtlog.Logger, 59 + txFactory *transaction.Factory, 60 + plc plc.ReadPLC, 61 + mempoolSubmitter types.MempoolSubmitter, 62 + nodeEventBus *cmttypes.EventBus, 63 + listenAddr string, 64 + handlerTimeout time.Duration) (*Server, error) { 49 65 s := &Server{ 66 + logger: logger, 50 67 txFactory: txFactory, 51 68 plc: plc, 52 69 router: http.NewServeMux(), 53 70 mempoolSubmitter: mempoolSubmitter, 71 + nodeEventBus: nodeEventBus, 54 72 srv: http.Server{Addr: listenAddr}, 55 73 handlerTimeout: handlerTimeout, 56 74 } ··· 58 76 59 77 handler := cors.Default().Handler(s.router) 60 78 61 - timeoutMsg, _ := json.Marshal(map[string]string{"message": "Internal server timeout"}) 62 - 63 - handler = http.TimeoutHandler(handler, s.handlerTimeout, string(timeoutMsg)) 64 - 65 79 s.srv.Handler = handler 66 80 67 81 parts := strings.SplitN(listenAddr, "://", 2) ··· 78 92 79 93 // setupRoutes configures the routes for the server. 80 94 func (s *Server) setupRoutes() { 81 - s.router.HandleFunc("GET /{did}", s.makeDIDHandler(s.handleResolveDID)) 82 - s.router.HandleFunc("POST /{did}", s.makeDIDHandler(s.handleCreatePLC)) 83 - s.router.HandleFunc("GET /{did}/log", s.makeDIDHandler(s.handleGetPLCLog)) 84 - s.router.HandleFunc("GET /{did}/log/audit", s.makeDIDHandler(s.handleGetPLCAuditLog)) 85 - s.router.HandleFunc("GET /{did}/log/last", s.makeDIDHandler(s.handleGetLastOp)) 86 - s.router.HandleFunc("GET /{did}/data", s.makeDIDHandler(s.handleGetPLCData)) 87 - s.router.HandleFunc("GET /export", s.handleExport) 95 + wrapInTimeout := func(fn http.HandlerFunc) http.Handler { 96 + timeoutMsg, _ := json.Marshal(map[string]string{"message": "Internal server timeout"}) 97 + 98 + return http.TimeoutHandler(http.HandlerFunc(fn), s.handlerTimeout, string(timeoutMsg)) 99 + } 100 + s.router.Handle("GET /{did}", wrapInTimeout(s.makeDIDHandler(s.handleResolveDID))) 101 + s.router.Handle("POST /{did}", wrapInTimeout(s.makeDIDHandler(s.handleCreatePLC))) 102 + s.router.Handle("GET /{did}/log", wrapInTimeout(s.makeDIDHandler(s.handleGetPLCLog))) 103 + s.router.Handle("GET /{did}/log/audit", wrapInTimeout(s.makeDIDHandler(s.handleGetPLCAuditLog))) 104 + s.router.Handle("GET /{did}/log/last", wrapInTimeout(s.makeDIDHandler(s.handleGetLastOp))) 105 + s.router.Handle("GET /{did}/data", wrapInTimeout(s.makeDIDHandler(s.handleGetPLCData))) 106 + s.router.Handle("GET /export", wrapInTimeout(s.handleExport)) 107 + s.router.HandleFunc("/export/stream", s.handleExportStream) 88 108 89 109 // TODO expose pprof only if enabled in [plc] settings 90 110 s.router.HandleFunc("/debug/pprof/", pprof.Index) ··· 143 163 func (s *Server) handleResolveDID(w http.ResponseWriter, r *http.Request, did string) { 144 164 ctx := context.Background() 145 165 doc, err := s.plc.Resolve(ctx, s.txFactory.ReadCommitted(), did) 146 - if handlePLCError(w, err, did) { 166 + if s.handlePLCError(w, err, did) { 147 167 return 148 168 } 149 169 ··· 221 241 } 222 242 223 243 txBytes, err := cbornode.DumpObject(tx) 224 - if handlePLCError(w, err, "") { 244 + if s.handlePLCError(w, err, "") { 225 245 return 226 246 } 227 247 ··· 229 249 // in practice we expect operations to be included in about one second 230 250 result, err := s.mempoolSubmitter.BroadcastTx(r.Context(), txBytes, true) 231 251 // TODO more robust error handling 232 - if handlePLCError(w, err, "") { 252 + if s.handlePLCError(w, err, "") { 233 253 return 234 254 } 235 255 ··· 249 269 // handleGetPLCLog handles the GET /{did}/log endpoint. 250 270 func (s *Server) handleGetPLCLog(w http.ResponseWriter, r *http.Request, did string) { 251 271 ops, err := s.plc.OperationLog(r.Context(), s.txFactory.ReadCommitted(), did) 252 - if handlePLCError(w, err, did) { 272 + if s.handlePLCError(w, err, did) { 253 273 return 254 274 } 255 275 ··· 260 280 // handleGetPLCAuditLog handles the GET /{did}/log/audit endpoint. 261 281 func (s *Server) handleGetPLCAuditLog(w http.ResponseWriter, r *http.Request, did string) { 262 282 entries, err := s.plc.AuditLog(r.Context(), s.txFactory.ReadCommitted(), did) 263 - if handlePLCError(w, err, did) { 283 + if s.handlePLCError(w, err, did) { 264 284 return 265 285 } 266 286 ··· 271 291 // handleGetLastOp handles the GET /{did}/log/last endpoint. 272 292 func (s *Server) handleGetLastOp(w http.ResponseWriter, r *http.Request, did string) { 273 293 op, err := s.plc.LastOperation(r.Context(), s.txFactory.ReadCommitted(), did) 274 - if handlePLCError(w, err, did) { 294 + if s.handlePLCError(w, err, did) { 275 295 return 276 296 } 277 297 ··· 282 302 // handleGetPLCData handles the GET /{did}/data endpoint. 283 303 func (s *Server) handleGetPLCData(w http.ResponseWriter, r *http.Request, did string) { 284 304 data, err := s.plc.Data(r.Context(), s.txFactory.ReadCommitted(), did) 285 - if handlePLCError(w, err, did) { 305 + if s.handlePLCError(w, err, did) { 286 306 return 287 307 } 288 308 ··· 304 324 json.NewEncoder(w).Encode(resp) 305 325 } 306 326 327 + type jsonEntry struct { 328 + Seq uint64 `json:"seq"` 329 + Type string `json:"type"` 330 + *didplc.LogEntry 331 + } 332 + 307 333 // handleExport handles the GET /export endpoint. 308 334 func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) { 309 335 query := r.URL.Query() ··· 332 358 } 333 359 334 360 entries, err := s.plc.Export(r.Context(), s.txFactory.ReadCommitted(), after, count) 335 - if handlePLCError(w, err, "") { 361 + if s.handlePLCError(w, err, "") { 336 362 return 337 363 } 338 364 339 365 w.Header().Set("Content-Type", "application/jsonlines") 340 366 341 - type jsonEntry struct { 342 - Seq uint64 `json:"seq"` 343 - Type string `json:"type"` 344 - *didplc.LogEntry 345 - } 346 367 for _, entry := range entries { 347 368 json.NewEncoder(w).Encode(jsonEntry{ 348 369 Seq: entry.Seq, ··· 352 373 } 353 374 } 354 375 376 + func (s *Server) handleExportStream(w http.ResponseWriter, r *http.Request) { 377 + c, err := websocket.Accept(w, r, nil) 378 + if s.handlePLCError(w, err, "") { 379 + return 380 + } 381 + defer c.CloseNow() 382 + 383 + cursorStr := r.URL.Query().Get("cursor") 384 + cursor := uint64(0) 385 + operationCount, err := s.txFactory.ReadCommitted().CountOperations() 386 + if err != nil { 387 + s.logger.Error("Export stream failed to get operation count", "error", stacktrace.Propagate(err)) 388 + c.Close(websocket.StatusInternalError, "internal error") 389 + return 390 + } 391 + 392 + if cursorStr != "" { 393 + // the official implementation always uses status code 1000 (websocket.StatusNormalClosure) for these errors 394 + cursor, err = strconv.ParseUint(cursorStr, 10, 64) 395 + if err != nil { 396 + c.Close(websocket.StatusNormalClosure, "InvalidCursor") // not specified in the spec, but should be good enough 397 + return 398 + } 399 + 400 + // validate cursor against operationCount 401 + if cursor > operationCount { 402 + c.Close(websocket.StatusNormalClosure, "FutureCursor") // as in the spec 403 + return 404 + } 405 + } else { 406 + cursor = operationCount 407 + } 408 + 409 + uuid, err := uuid.NewRandom() 410 + if err != nil { 411 + s.logger.Error("Export stream failed to generate UUID", "error", stacktrace.Propagate(err)) 412 + c.Close(websocket.StatusInternalError, "internal error") 413 + return 414 + } 415 + subscriber := uuid.String() 416 + 417 + ctx, cancelCtx := context.WithCancel(r.Context()) 418 + 419 + // We do not expect to read anything from the websocket 420 + ctx = c.CloseRead(ctx) 421 + 422 + newBlockCh := make(chan struct{}, 1) 423 + wg := sync.WaitGroup{} 424 + wg.Go(func() { 425 + defer close(newBlockCh) 426 + 427 + subCtx, cancel := context.WithTimeout(ctx, core.SubscribeTimeout) 428 + defer cancel() 429 + 430 + blocksSub, err := s.nodeEventBus.Subscribe(subCtx, subscriber, cmttypes.EventQueryNewBlockHeader) 431 + if err != nil { 432 + s.logger.Error("Export stream failed to subscribe to new block headers", "error", stacktrace.Propagate(err)) 433 + c.Close(websocket.StatusInternalError, "internal error") 434 + return 435 + } 436 + defer func() { 437 + err := s.nodeEventBus.Unsubscribe(context.Background(), subscriber, cmttypes.EventQueryNewBlockHeader) 438 + if err != nil { 439 + s.logger.Error("Export stream failed to unsubscribe from new block headers", "error", stacktrace.Propagate(err)) 440 + } 441 + }() 442 + 443 + for { 444 + select { 445 + case <-ctx.Done(): 446 + return 447 + case <-blocksSub.Out(): 448 + // We can't block here! Otherwise our subscription will be terminated for not consuming messages fast enough 449 + select { 450 + case newBlockCh <- struct{}{}: 451 + default: 452 + } 453 + case <-blocksSub.Canceled(): 454 + err := blocksSub.Err() 455 + if err != nil { 456 + s.logger.Error("blocksSub was canceled with error", "error", stacktrace.Propagate(err)) 457 + } 458 + return 459 + } 460 + } 461 + }) 462 + 463 + defer wg.Wait() 464 + defer cancelCtx() 465 + 466 + const numEntriesPerBatch = 100 467 + for { 468 + entries, err := s.plc.Export(ctx, s.txFactory.ReadCommitted(), cursor, numEntriesPerBatch) 469 + if err != nil { 470 + s.logger.Error("Export stream failed to export entries", "error", stacktrace.Propagate(err)) 471 + c.Close(websocket.StatusInternalError, "internal error") 472 + return 473 + } 474 + 475 + for _, entry := range entries { 476 + // 1 week is the same as what the official implementation appears to allow 477 + // TODO make configurable (the official implementation has it configurable) 478 + // TODO consider whether we really need to implement this limitation, as much like what happens with slow consumers, 479 + // we probably don't have any problems dealing with old entries, unlike the official implementation's "outbox"? 480 + if time.Since(entry.CreatedAt) > 7*24*time.Hour { 481 + c.Close(websocket.StatusNormalClosure, "OutdatedCursor") // as in the spec 482 + return 483 + } 484 + err := wsjson.Write(ctx, c, jsonEntry{ 485 + Seq: entry.Seq, 486 + Type: "sequenced_op", 487 + LogEntry: lo.ToPtr(entry.ToDIDPLCLogEntry()), 488 + }) 489 + if err != nil { 490 + if !errors.Is(err, context.Canceled) && !errors.Is(err, syscall.EPIPE) { 491 + s.logger.Error("Export stream failed to write entry", "error", stacktrace.Propagate(err)) 492 + } 493 + c.Close(websocket.StatusInternalError, "internal error") 494 + return 495 + } 496 + 497 + cursor = entry.Seq 498 + } 499 + 500 + if len(entries) == numEntriesPerBatch { 501 + // there's a chance there's already more available, so immediately try fetching and sending again 502 + continue 503 + } 504 + 505 + // TODO periodically check CountOperations and compare against cursor 506 + // to see if we need to close with reason ConsumerTooSlow 507 + // (the official implementation needs this because their "outbox" has some buffers, but our buffers are the same size regardless of "where" in the stream we are: do we really need to check for slow consumers?) 508 + // (does a slow consumer cause us to spend any more resources than a fast one, or one that resorts to using the non-HTTP endpoint? I don't think so) 509 + 510 + select { 511 + case <-ctx.Done(): 512 + return 513 + case _, ok := <-newBlockCh: 514 + if !ok { 515 + return 516 + } 517 + } 518 + } 519 + } 520 + 355 521 // handlePLCError handles errors from the PLC interface and sends the appropriate HTTP response. 356 - func handlePLCError(w http.ResponseWriter, err error, did string) bool { 522 + func (s *Server) handlePLCError(w http.ResponseWriter, err error, did string) bool { 357 523 if err == nil { 358 524 return false 359 525 } ··· 363 529 case errors.Is(err, plc.ErrDIDGone): 364 530 sendErrorResponse(w, http.StatusGone, fmt.Sprintf("DID not available: %s", did)) 365 531 default: 532 + s.logger.Error("PLC server returning internal server error", "did", did, "error", stacktrace.Propagate(err)) 366 533 sendErrorResponse(w, http.StatusInternalServerError, "Internal server error") 367 534 } 368 535 return true
+17 -14
httpapi/server_test.go
··· 10 10 "testing" 11 11 "time" 12 12 13 + cmtlog "github.com/cometbft/cometbft/libs/log" 13 14 "github.com/did-method-plc/go-didplc" 14 15 "github.com/stretchr/testify/require" 15 16 "tangled.org/gbl08ma.com/didplcbft/plc" ··· 17 18 "tangled.org/gbl08ma.com/didplcbft/transaction" 18 19 "tangled.org/gbl08ma.com/didplcbft/types" 19 20 ) 21 + 22 + var testLogger = cmtlog.NewNopLogger() 20 23 21 24 // MockReadPLC is a mock implementation of the ReadPLC interface for testing. 22 25 type MockReadPLC struct { ··· 108 111 txFactory, _, _ := testutil.NewTestTxFactory(t) 109 112 110 113 t.Run("Test Resolve DID", func(t *testing.T) { 111 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 114 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 112 115 require.NoError(t, err) 113 116 114 117 req, err := http.NewRequest("GET", "/did:plc:test", nil) ··· 123 126 124 127 t.Run("Test Resolve DID Not Found", func(t *testing.T) { 125 128 mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "notfound"} 126 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 129 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 127 130 require.NoError(t, err) 128 131 129 132 req, err := http.NewRequest("GET", "/did:plc:test", nil) ··· 138 141 139 142 t.Run("Test Resolve DID Gone", func(t *testing.T) { 140 143 mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "gone"} 141 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 144 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 142 145 require.NoError(t, err) 143 146 144 147 req, err := http.NewRequest("GET", "/did:plc:test", nil) ··· 153 156 154 157 t.Run("Test Resolve DID Internal Error", func(t *testing.T) { 155 158 mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "internal"} 156 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 159 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 157 160 require.NoError(t, err) 158 161 159 162 req, err := http.NewRequest("GET", "/did:plc:test", nil) ··· 167 170 }) 168 171 169 172 t.Run("Test Create PLC Operation", func(t *testing.T) { 170 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 173 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 171 174 require.NoError(t, err) 172 175 173 176 op := map[string]interface{}{ ··· 191 194 }) 192 195 193 196 t.Run("Test Get PLC Log", func(t *testing.T) { 194 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 197 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 195 198 require.NoError(t, err) 196 199 197 200 req, err := http.NewRequest("GET", "/did:plc:test/log", nil) ··· 205 208 206 209 t.Run("Test Get PLC Log Not Found", func(t *testing.T) { 207 210 mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "notfound"} 208 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 211 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 209 212 require.NoError(t, err) 210 213 211 214 req, err := http.NewRequest("GET", "/did:plc:test/log", nil) ··· 219 222 }) 220 223 221 224 t.Run("Test Get PLC Audit Log", func(t *testing.T) { 222 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 225 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 223 226 require.NoError(t, err) 224 227 225 228 req, err := http.NewRequest("GET", "/did:plc:test/log/audit", nil) ··· 232 235 }) 233 236 234 237 t.Run("Test Get Last Operation", func(t *testing.T) { 235 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 238 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 236 239 require.NoError(t, err) 237 240 238 241 req, err := http.NewRequest("GET", "/did:plc:test/log/last", nil) ··· 246 249 247 250 t.Run("Test Get Last Operation Internal Error", func(t *testing.T) { 248 251 mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "internal"} 249 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 252 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 250 253 require.NoError(t, err) 251 254 252 255 req, err := http.NewRequest("GET", "/did:plc:test/log/last", nil) ··· 260 263 }) 261 264 262 265 t.Run("Test Get PLC Data", func(t *testing.T) { 263 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 266 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 264 267 require.NoError(t, err) 265 268 266 269 req, err := http.NewRequest("GET", "/did:plc:test/data", nil) ··· 274 277 275 278 t.Run("Test Get PLC Data Not Found", func(t *testing.T) { 276 279 mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "notfound"} 277 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 280 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 278 281 require.NoError(t, err) 279 282 280 283 req, err := http.NewRequest("GET", "/did:plc:test/data", nil) ··· 288 291 }) 289 292 290 293 t.Run("Test Export", func(t *testing.T) { 291 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 294 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 292 295 require.NoError(t, err) 293 296 294 297 req, err := http.NewRequest("GET", "/export?count=10", nil) ··· 302 305 303 306 t.Run("Test Export Internal Error", func(t *testing.T) { 304 307 mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "internal"} 305 - server, err := NewServer(txFactory, mockPLC, nil, "tcp://127.0.0.1:8080", 15*time.Second) 308 + server, err := NewServer(testLogger, txFactory, mockPLC, nil, nil, "tcp://127.0.0.1:8080", 15*time.Second) 306 309 require.NoError(t, err) 307 310 308 311 req, err := http.NewRequest("GET", "/export?count=10", nil)
+1 -1
main.go
··· 179 179 }() 180 180 181 181 if config.PLC.ListenAddress != "" { 182 - plcAPIServer, err := httpapi.NewServer(txFactory, plc, mempoolSubmitter, config.PLC.ListenAddress, 30*time.Second) 182 + plcAPIServer, err := httpapi.NewServer(logger.With("module", "plcapi"), txFactory, plc, mempoolSubmitter, node.EventBus(), config.PLC.ListenAddress, 10*time.Second) 183 183 if err != nil { 184 184 log.Fatalf("Creating PLC API server: %v", err) 185 185 }