this repo has no description

Store rev properly like a good boy

lewis 94ae11e2 76f1d622

+28
.sqlx/query-1091e22985de9cd9d24f55975eaa8bb1ea40bbf5237b328a031ba9d3b3d5a5e0.json
··· 1 + { 2 + "db_name": "PostgreSQL", 3 + "query": "SELECT uk.key_bytes, uk.encryption_version\n FROM user_keys uk\n JOIN users u ON uk.user_id = u.id\n WHERE u.did = $1", 4 + "describe": { 5 + "columns": [ 6 + { 7 + "ordinal": 0, 8 + "name": "key_bytes", 9 + "type_info": "Bytea" 10 + }, 11 + { 12 + "ordinal": 1, 13 + "name": "encryption_version", 14 + "type_info": "Int4" 15 + } 16 + ], 17 + "parameters": { 18 + "Left": [ 19 + "Text" 20 + ] 21 + }, 22 + "nullable": [ 23 + false, 24 + true 25 + ] 26 + }, 27 + "hash": "1091e22985de9cd9d24f55975eaa8bb1ea40bbf5237b328a031ba9d3b3d5a5e0" 28 + }
-26
.sqlx/query-1261d14a3763b98464b212d001c9a11da30a55869128320de6d62693415953f5.json
··· 1 - { 2 - "db_name": "PostgreSQL", 3 - "query": "SELECT cid, data FROM blocks", 4 - "describe": { 5 - "columns": [ 6 - { 7 - "ordinal": 0, 8 - "name": "cid", 9 - "type_info": "Bytea" 10 - }, 11 - { 12 - "ordinal": 1, 13 - "name": "data", 14 - "type_info": "Bytea" 15 - } 16 - ], 17 - "parameters": { 18 - "Left": [] 19 - }, 20 - "nullable": [ 21 - false, 22 - false 23 - ] 24 - }, 25 - "hash": "1261d14a3763b98464b212d001c9a11da30a55869128320de6d62693415953f5" 26 - }
+8 -2
.sqlx/query-1bff90667ece9e1e44e20e3477df1473bafa06610e54f10d9b3cb2cc06469854.json .sqlx/query-5c322bbdf9cecab9077c937bd322e49200ac2b8931da1dfe6e55d56087fc1d35.json
··· 1 1 { 2 2 "db_name": "PostgreSQL", 3 - "query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status\n FROM repo_seq\n WHERE seq > $1\n ORDER BY seq ASC\n LIMIT $2\n ", 3 + "query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev\n FROM repo_seq\n WHERE seq > $1 AND seq < $2\n ORDER BY seq ASC\n ", 4 4 "describe": { 5 5 "columns": [ 6 6 { ··· 67 67 "ordinal": 12, 68 68 "name": "status", 69 69 "type_info": "Text" 70 + }, 71 + { 72 + "ordinal": 13, 73 + "name": "rev", 74 + "type_info": "Text" 70 75 } 71 76 ], 72 77 "parameters": { ··· 88 93 true, 89 94 true, 90 95 true, 96 + true, 91 97 true 92 98 ] 93 99 }, 94 - "hash": "1bff90667ece9e1e44e20e3477df1473bafa06610e54f10d9b3cb2cc06469854" 100 + "hash": "5c322bbdf9cecab9077c937bd322e49200ac2b8931da1dfe6e55d56087fc1d35" 95 101 }
+8 -2
.sqlx/query-239555df14c147a09096beb28f2ff0b093523c27e9527cd1f623c9a87b05b532.json .sqlx/query-130dd93754cc36188e01255166aa65603f909c2b181fa0caa7796c62d4bc60e1.json
··· 1 1 { 2 2 "db_name": "PostgreSQL", 3 - "query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status\n FROM repo_seq\n WHERE seq > $1\n ORDER BY seq ASC\n ", 3 + "query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev\n FROM repo_seq\n WHERE seq > $1\n ORDER BY seq ASC\n ", 4 4 "describe": { 5 5 "columns": [ 6 6 { ··· 67 67 "ordinal": 12, 68 68 "name": "status", 69 69 "type_info": "Text" 70 + }, 71 + { 72 + "ordinal": 13, 73 + "name": "rev", 74 + "type_info": "Text" 70 75 } 71 76 ], 72 77 "parameters": { ··· 87 92 true, 88 93 true, 89 94 true, 95 + true, 90 96 true 91 97 ] 92 98 }, 93 - "hash": "239555df14c147a09096beb28f2ff0b093523c27e9527cd1f623c9a87b05b532" 99 + "hash": "130dd93754cc36188e01255166aa65603f909c2b181fa0caa7796c62d4bc60e1" 94 100 }
+3 -2
.sqlx/query-3792c455a955b8cf2c70c8aa76635083354c87330101ea0ea69d30f2a5b4b960.json .sqlx/query-48f2ff46677b5dc26a4ca9ac0b4e86ebdb3a9862d006aa0e21bfa2d7b25a8f71.json
··· 1 1 { 2 2 "db_name": "PostgreSQL", 3 - "query": "\n INSERT INTO repo_seq (did, event_type, commit_cid)\n VALUES ($1, 'sync', $2)\n RETURNING seq\n ", 3 + "query": "\n INSERT INTO repo_seq (did, event_type, commit_cid, rev)\n VALUES ($1, 'sync', $2, $3)\n RETURNING seq\n ", 4 4 "describe": { 5 5 "columns": [ 6 6 { ··· 12 12 "parameters": { 13 13 "Left": [ 14 14 "Text", 15 + "Text", 15 16 "Text" 16 17 ] 17 18 }, ··· 19 20 false 20 21 ] 21 22 }, 22 - "hash": "3792c455a955b8cf2c70c8aa76635083354c87330101ea0ea69d30f2a5b4b960" 23 + "hash": "48f2ff46677b5dc26a4ca9ac0b4e86ebdb3a9862d006aa0e21bfa2d7b25a8f71" 23 24 }
+8 -2
.sqlx/query-44b78996f9799398f384d9aebb36d01c27738d4677b7cae7ea6697f3f5135388.json .sqlx/query-7a4016fed3eb3a16d6eb267013751af47ad6e8c9595711fe6c9d41121f904cb4.json
··· 1 1 { 2 2 "db_name": "PostgreSQL", 3 - "query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status\n FROM repo_seq\n WHERE seq = $1\n ", 3 + "query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev\n FROM repo_seq\n WHERE seq = $1\n ", 4 4 "describe": { 5 5 "columns": [ 6 6 { ··· 67 67 "ordinal": 12, 68 68 "name": "status", 69 69 "type_info": "Text" 70 + }, 71 + { 72 + "ordinal": 13, 73 + "name": "rev", 74 + "type_info": "Text" 70 75 } 71 76 ], 72 77 "parameters": { ··· 87 92 true, 88 93 true, 89 94 true, 95 + true, 90 96 true 91 97 ] 92 98 }, 93 - "hash": "44b78996f9799398f384d9aebb36d01c27738d4677b7cae7ea6697f3f5135388" 99 + "hash": "7a4016fed3eb3a16d6eb267013751af47ad6e8c9595711fe6c9d41121f904cb4" 94 100 }
+8 -2
.sqlx/query-777386dcbf2aa2785a6c16abdccbd8f751893039fb6bc2363d9760ca0d8a8a56.json .sqlx/query-cc28150d6e1e1823a918d6dcf7744209614ef7b8298c210ddd65ac44da5c551a.json
··· 1 1 { 2 2 "db_name": "PostgreSQL", 3 - "query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status\n FROM repo_seq\n WHERE seq > $1 AND seq < $2\n ORDER BY seq ASC\n ", 3 + "query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev\n FROM repo_seq\n WHERE seq > $1\n ORDER BY seq ASC\n LIMIT $2\n ", 4 4 "describe": { 5 5 "columns": [ 6 6 { ··· 67 67 "ordinal": 12, 68 68 "name": "status", 69 69 "type_info": "Text" 70 + }, 71 + { 72 + "ordinal": 13, 73 + "name": "rev", 74 + "type_info": "Text" 70 75 } 71 76 ], 72 77 "parameters": { ··· 88 93 true, 89 94 true, 90 95 true, 96 + true, 91 97 true 92 98 ] 93 99 }, 94 - "hash": "777386dcbf2aa2785a6c16abdccbd8f751893039fb6bc2363d9760ca0d8a8a56" 100 + "hash": "cc28150d6e1e1823a918d6dcf7744209614ef7b8298c210ddd65ac44da5c551a" 95 101 }
+8 -7
Dockerfile
··· 1 - # Stage 1: Build frontend with Deno 2 1 FROM denoland/deno:alpine AS frontend-builder 3 2 WORKDIR /frontend 4 3 COPY frontend/ ./ 5 4 RUN deno task build 6 - # Stage 2: Build Rust backend 5 + 7 6 FROM rust:1.92-alpine AS builder 8 - RUN apk add ca-certificates openssl openssl-dev pkgconfig 7 + RUN apk add ca-certificates openssl openssl-dev openssl-libs-static pkgconfig musl-dev 9 8 WORKDIR /app 10 9 COPY Cargo.toml Cargo.lock ./ 11 - RUN mkdir src && echo "fn main() {}" > src/main.rs && cargo build --release && rm -rf src 12 10 COPY src ./src 13 11 COPY tests ./tests 14 12 COPY migrations ./migrations 15 13 COPY .sqlx ./.sqlx 16 - RUN touch src/main.rs && cargo build --release 17 - # Stage 3: Final image 14 + RUN --mount=type=cache,target=/usr/local/cargo/registry \ 15 + --mount=type=cache,target=/app/target \ 16 + cargo build --release && \ 17 + cp target/release/tranquil-pds /tmp/tranquil-pds 18 + 18 19 FROM alpine:3.23 19 - COPY --from=builder /app/target/release/tranquil-pds /usr/local/bin/tranquil-pds 20 + COPY --from=builder /tmp/tranquil-pds /usr/local/bin/tranquil-pds 20 21 COPY --from=builder /app/migrations /app/migrations 21 22 COPY --from=frontend-builder /frontend/dist /app/frontend/dist 22 23 WORKDIR /app
+1
migrations/20251238_add_rev_to_repo_seq.sql
··· 1 + ALTER TABLE repo_seq ADD COLUMN rev TEXT;
+107 -3
src/api/repo/import.rs
··· 1 1 use crate::api::ApiError; 2 + use crate::api::repo::record::create_signed_commit; 2 3 use crate::state::AppState; 3 4 use crate::sync::import::{ImportError, apply_import, parse_car}; 4 5 use crate::sync::verify::CarVerifier; ··· 9 10 http::StatusCode, 10 11 response::{IntoResponse, Response}, 11 12 }; 13 + use jacquard::types::{integer::LimitedU32, string::Tid}; 14 + use jacquard_repo::storage::BlockStore; 15 + use k256::ecdsa::SigningKey; 12 16 use serde_json::json; 13 17 use tracing::{debug, error, info, warn}; 14 18 ··· 312 316 .and_then(|s| s.parse().ok()) 313 317 .unwrap_or(DEFAULT_MAX_BLOCKS); 314 318 match apply_import(&state.db, user_id, root, blocks, max_blocks).await { 315 - Ok(records) => { 319 + Ok(import_result) => { 316 320 info!( 317 321 "Successfully imported {} records for user {}", 318 - records.len(), 322 + import_result.records.len(), 319 323 did 320 324 ); 325 + let key_row = match sqlx::query!( 326 + r#"SELECT uk.key_bytes, uk.encryption_version 327 + FROM user_keys uk 328 + JOIN users u ON uk.user_id = u.id 329 + WHERE u.did = $1"#, 330 + did 331 + ) 332 + .fetch_optional(&state.db) 333 + .await 334 + { 335 + Ok(Some(row)) => row, 336 + Ok(None) => { 337 + error!("No signing key found for user {}", did); 338 + return ( 339 + StatusCode::INTERNAL_SERVER_ERROR, 340 + Json(json!({"error": "InternalError", "message": "Signing key not found"})), 341 + ) 342 + .into_response(); 343 + } 344 + Err(e) => { 345 + error!("DB error fetching signing key: {:?}", e); 346 + return ( 347 + StatusCode::INTERNAL_SERVER_ERROR, 348 + Json(json!({"error": "InternalError"})), 349 + ) 350 + .into_response(); 351 + } 352 + }; 353 + let key_bytes = match crate::config::decrypt_key(&key_row.key_bytes, key_row.encryption_version) { 354 + Ok(k) => k, 355 + Err(e) => { 356 + error!("Failed to decrypt signing key: {}", e); 357 + return ( 358 + StatusCode::INTERNAL_SERVER_ERROR, 359 + Json(json!({"error": "InternalError"})), 360 + ) 361 + .into_response(); 362 + } 363 + }; 364 + let signing_key = match SigningKey::from_slice(&key_bytes) { 365 + Ok(k) => k, 366 + Err(e) => { 367 + error!("Invalid signing key: {:?}", e); 368 + return ( 369 + StatusCode::INTERNAL_SERVER_ERROR, 370 + Json(json!({"error": "InternalError"})), 371 + ) 372 + .into_response(); 373 + } 374 + }; 375 + let new_rev = Tid::now(LimitedU32::MIN); 376 + let new_rev_str = new_rev.to_string(); 377 + let (commit_bytes, _sig) = match create_signed_commit( 378 + did, 379 + import_result.data_cid, 380 + &new_rev_str, 381 + None, 382 + &signing_key, 383 + ) { 384 + Ok(result) => result, 385 + Err(e) => { 386 + error!("Failed to create new commit: {}", e); 387 + return ( 388 + StatusCode::INTERNAL_SERVER_ERROR, 389 + Json(json!({"error": "InternalError"})), 390 + ) 391 + .into_response(); 392 + } 393 + }; 394 + let new_root_cid: cid::Cid = match state.block_store.put(&commit_bytes).await { 395 + Ok(cid) => cid, 396 + Err(e) => { 397 + error!("Failed to store new commit block: {:?}", e); 398 + return ( 399 + StatusCode::INTERNAL_SERVER_ERROR, 400 + Json(json!({"error": "InternalError"})), 401 + ) 402 + .into_response(); 403 + } 404 + }; 405 + let new_root_str = new_root_cid.to_string(); 406 + if let Err(e) = sqlx::query!( 407 + "UPDATE repos SET repo_root_cid = $1, updated_at = NOW() WHERE user_id = $2", 408 + new_root_str, 409 + user_id 410 + ) 411 + .execute(&state.db) 412 + .await 413 + { 414 + error!("Failed to update repo root: {:?}", e); 415 + return ( 416 + StatusCode::INTERNAL_SERVER_ERROR, 417 + Json(json!({"error": "InternalError"})), 418 + ) 419 + .into_response(); 420 + } 421 + info!( 422 + "Created new commit for imported repo: cid={}, rev={}", 423 + new_root_str, new_rev_str 424 + ); 321 425 if !is_migration { 322 - if let Err(e) = sequence_import_event(&state, did, &root.to_string()).await { 426 + if let Err(e) = sequence_import_event(&state, did, &new_root_str).await { 323 427 warn!("Failed to sequence import event: {:?}", e); 324 428 } 325 429 }
+5 -3
src/api/repo/record/utils.rs
··· 321 321 .await 322 322 .map_err(|e| format!("Failed to commit transaction: {}", e))?; 323 323 if is_account_active { 324 - let _ = sequence_sync_event(state, did, &new_root_cid.to_string()).await; 324 + let _ = sequence_sync_event(state, did, &new_root_cid.to_string(), Some(&rev_str)).await; 325 325 } 326 326 Ok(CommitResult { 327 327 commit_cid: new_root_cid, ··· 470 470 state: &AppState, 471 471 did: &str, 472 472 commit_cid: &str, 473 + rev: Option<&str>, 473 474 ) -> Result<i64, String> { 474 475 let seq_row = sqlx::query!( 475 476 r#" 476 - INSERT INTO repo_seq (did, event_type, commit_cid) 477 - VALUES ($1, 'sync', $2) 477 + INSERT INTO repo_seq (did, event_type, commit_cid, rev) 478 + VALUES ($1, 'sync', $2, $3) 478 479 RETURNING seq 479 480 "#, 480 481 did, 481 482 commit_cid, 483 + rev, 482 484 ) 483 485 .fetch_one(&state.db) 484 486 .await
+32 -3
src/api/server/account_status.rs
··· 8 8 response::{IntoResponse, Response}, 9 9 }; 10 10 use bcrypt::verify; 11 + use cid::Cid; 11 12 use chrono::{Duration, Utc}; 13 + use jacquard_repo::commit::Commit; 14 + use jacquard_repo::storage::BlockStore; 12 15 use k256::ecdsa::SigningKey; 13 16 use serde::{Deserialize, Serialize}; 14 17 use serde_json::json; 18 + use std::str::FromStr; 15 19 use tracing::{error, info, warn}; 16 20 use uuid::Uuid; 17 21 ··· 245 249 } 246 250 } else if did.starts_with("did:web:") { 247 251 let client = reqwest::Client::new(); 248 - let did_path = &did[8..]; 249 - let url = format!("https://{}/.well-known/did.json", did_path.replace(':', "/")); 252 + let host_and_path = &did[8..]; 253 + let decoded = host_and_path.replace("%3A", ":"); 254 + let parts: Vec<&str> = decoded.split(':').collect(); 255 + let (host, path_parts) = if parts.len() > 1 && parts[1].chars().all(|c| c.is_ascii_digit()) { 256 + (format!("{}:{}", parts[0], parts[1]), parts[2..].to_vec()) 257 + } else { 258 + (parts[0].to_string(), parts[1..].to_vec()) 259 + }; 260 + let scheme = if host.starts_with("localhost") || host.starts_with("127.") || host.contains(':') { 261 + "http" 262 + } else { 263 + "https" 264 + }; 265 + let url = if path_parts.is_empty() { 266 + format!("{}://{}/.well-known/did.json", scheme, host) 267 + } else { 268 + format!("{}://{}/{}/did.json", scheme, host, path_parts.join("/")) 269 + }; 250 270 let resp = client.get(&url).send().await.map_err(|e| { 251 271 warn!("Failed to fetch did:web document for {}: {:?}", did, e); 252 272 ( ··· 381 401 .ok() 382 402 .flatten(); 383 403 if let Some(root_cid) = repo_root { 404 + let rev = if let Ok(cid) = Cid::from_str(&root_cid) { 405 + if let Ok(Some(block)) = state.block_store.get(&cid).await { 406 + Commit::from_cbor(&block).ok().map(|c| c.rev().to_string()) 407 + } else { 408 + None 409 + } 410 + } else { 411 + None 412 + }; 384 413 if let Err(e) = 385 - crate::api::repo::record::sequence_sync_event(&state, &did, &root_cid).await 414 + crate::api::repo::record::sequence_sync_event(&state, &did, &root_cid, rev.as_deref()).await 386 415 { 387 416 warn!("Failed to sequence sync event for activation: {}", e); 388 417 }
+1
src/sync/firehose.rs
··· 17 17 pub handle: Option<String>, 18 18 pub active: Option<bool>, 19 19 pub status: Option<String>, 20 + pub rev: Option<String>, 20 21 }
+7 -10
src/sync/import.rs
··· 255 255 pub prev: Option<String>, 256 256 } 257 257 258 + pub struct ImportResult { 259 + pub records: Vec<ImportedRecord>, 260 + pub data_cid: Cid, 261 + } 262 + 258 263 fn extract_commit_info(commit: &Ipld) -> Result<(Cid, CommitInfo), ImportError> { 259 264 let obj = match commit { 260 265 Ipld::Map(m) => m, ··· 299 304 root: Cid, 300 305 blocks: HashMap<Cid, Bytes>, 301 306 max_blocks: usize, 302 - ) -> Result<Vec<ImportedRecord>, ImportError> { 307 + ) -> Result<ImportResult, ImportError> { 303 308 if blocks.len() > max_blocks { 304 309 return Err(ImportError::SizeLimitExceeded); 305 310 } ··· 352 357 .await?; 353 358 } 354 359 } 355 - let root_str = root.to_string(); 356 - sqlx::query!( 357 - "UPDATE repos SET repo_root_cid = $1, updated_at = NOW() WHERE user_id = $2", 358 - root_str, 359 - user_id 360 - ) 361 - .execute(&mut *tx) 362 - .await?; 363 360 sqlx::query!("DELETE FROM records WHERE repo_id = $1", user_id) 364 361 .execute(&mut *tx) 365 362 .await?; ··· 385 382 blocks.len(), 386 383 records.len() 387 384 ); 388 - Ok(records) 385 + Ok(ImportResult { records, data_cid }) 389 386 } 390 387 391 388 #[cfg(test)]
+3 -3
src/sync/listener.rs
··· 33 33 let events = sqlx::query_as!( 34 34 SequencedEvent, 35 35 r#" 36 - SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status 36 + SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev 37 37 FROM repo_seq 38 38 WHERE seq > $1 39 39 ORDER BY seq ASC ··· 81 81 let gap_events = sqlx::query_as!( 82 82 SequencedEvent, 83 83 r#" 84 - SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status 84 + SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev 85 85 FROM repo_seq 86 86 WHERE seq > $1 AND seq < $2 87 87 ORDER BY seq ASC ··· 103 103 let event = sqlx::query_as!( 104 104 SequencedEvent, 105 105 r#" 106 - SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status 106 + SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev 107 107 FROM repo_seq 108 108 WHERE seq = $1 109 109 "#,
+1 -1
src/sync/subscribe_repos.rs
··· 66 66 let events = sqlx::query_as!( 67 67 SequencedEvent, 68 68 r#" 69 - SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status 69 + SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev 70 70 FROM repo_seq 71 71 WHERE seq > $1 72 72 ORDER BY seq ASC
+12 -4
src/sync/util.rs
··· 112 112 .get(&commit_cid) 113 113 .await? 114 114 .ok_or_else(|| anyhow::anyhow!("Commit block not found"))?; 115 - let rev = extract_rev_from_commit_bytes(&commit_bytes) 116 - .ok_or_else(|| anyhow::anyhow!("Could not extract rev from commit"))?; 115 + let rev = if let Some(ref stored_rev) = event.rev { 116 + stored_rev.clone() 117 + } else { 118 + extract_rev_from_commit_bytes(&commit_bytes) 119 + .ok_or_else(|| anyhow::anyhow!("Could not extract rev from commit"))? 120 + }; 117 121 let car_bytes = write_car_blocks(commit_cid, Some(commit_bytes), BTreeMap::new()).await?; 118 122 let frame = SyncFrame { 119 123 did: event.did.clone(), ··· 251 255 let commit_bytes = prefetched 252 256 .get(&commit_cid) 253 257 .ok_or_else(|| anyhow::anyhow!("Commit block not found in prefetched"))?; 254 - let rev = extract_rev_from_commit_bytes(commit_bytes) 255 - .ok_or_else(|| anyhow::anyhow!("Could not extract rev from commit"))?; 258 + let rev = if let Some(ref stored_rev) = event.rev { 259 + stored_rev.clone() 260 + } else { 261 + extract_rev_from_commit_bytes(commit_bytes) 262 + .ok_or_else(|| anyhow::anyhow!("Could not extract rev from commit"))? 263 + }; 256 264 let car_bytes = futures::executor::block_on(write_car_blocks( 257 265 commit_cid, 258 266 Some(commit_bytes.clone()),
+2 -2
tests/identity.rs
··· 357 357 assert!(also_known_as[0].as_str().unwrap().starts_with("at://")); 358 358 assert!(body["verificationMethods"]["atproto"].is_string()); 359 359 assert_eq!( 360 - body["services"]["atprotoPds"]["type"], 360 + body["services"]["atproto_pds"]["type"], 361 361 "AtprotoPersonalDataServer" 362 362 ); 363 - assert!(body["services"]["atprotoPds"]["endpoint"].is_string()); 363 + assert!(body["services"]["atproto_pds"]["endpoint"].is_string()); 364 364 } 365 365 366 366 #[tokio::test]
-1
tests/import_with_verification.rs
··· 10 10 use sha2::{Digest, Sha256}; 11 11 use sqlx::PgPool; 12 12 use std::collections::BTreeMap; 13 - use std::str::FromStr; 14 13 use wiremock::matchers::{method, path}; 15 14 use wiremock::{Mock, MockServer, ResponseTemplate}; 16 15