+28
.sqlx/query-1091e22985de9cd9d24f55975eaa8bb1ea40bbf5237b328a031ba9d3b3d5a5e0.json
+28
.sqlx/query-1091e22985de9cd9d24f55975eaa8bb1ea40bbf5237b328a031ba9d3b3d5a5e0.json
···
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT uk.key_bytes, uk.encryption_version\n FROM user_keys uk\n JOIN users u ON uk.user_id = u.id\n WHERE u.did = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "key_bytes",
9
+
"type_info": "Bytea"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "encryption_version",
14
+
"type_info": "Int4"
15
+
}
16
+
],
17
+
"parameters": {
18
+
"Left": [
19
+
"Text"
20
+
]
21
+
},
22
+
"nullable": [
23
+
false,
24
+
true
25
+
]
26
+
},
27
+
"hash": "1091e22985de9cd9d24f55975eaa8bb1ea40bbf5237b328a031ba9d3b3d5a5e0"
28
+
}
-26
.sqlx/query-1261d14a3763b98464b212d001c9a11da30a55869128320de6d62693415953f5.json
-26
.sqlx/query-1261d14a3763b98464b212d001c9a11da30a55869128320de6d62693415953f5.json
···
1
-
{
2
-
"db_name": "PostgreSQL",
3
-
"query": "SELECT cid, data FROM blocks",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"ordinal": 0,
8
-
"name": "cid",
9
-
"type_info": "Bytea"
10
-
},
11
-
{
12
-
"ordinal": 1,
13
-
"name": "data",
14
-
"type_info": "Bytea"
15
-
}
16
-
],
17
-
"parameters": {
18
-
"Left": []
19
-
},
20
-
"nullable": [
21
-
false,
22
-
false
23
-
]
24
-
},
25
-
"hash": "1261d14a3763b98464b212d001c9a11da30a55869128320de6d62693415953f5"
26
-
}
···
+8
-2
.sqlx/query-1bff90667ece9e1e44e20e3477df1473bafa06610e54f10d9b3cb2cc06469854.json
.sqlx/query-cc28150d6e1e1823a918d6dcf7744209614ef7b8298c210ddd65ac44da5c551a.json
+8
-2
.sqlx/query-1bff90667ece9e1e44e20e3477df1473bafa06610e54f10d9b3cb2cc06469854.json
.sqlx/query-cc28150d6e1e1823a918d6dcf7744209614ef7b8298c210ddd65ac44da5c551a.json
···
1
{
2
"db_name": "PostgreSQL",
3
-
"query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status\n FROM repo_seq\n WHERE seq > $1\n ORDER BY seq ASC\n LIMIT $2\n ",
4
"describe": {
5
"columns": [
6
{
···
67
"ordinal": 12,
68
"name": "status",
69
"type_info": "Text"
70
}
71
],
72
"parameters": {
···
88
true,
89
true,
90
true,
91
true
92
]
93
},
94
-
"hash": "1bff90667ece9e1e44e20e3477df1473bafa06610e54f10d9b3cb2cc06469854"
95
}
···
1
{
2
"db_name": "PostgreSQL",
3
+
"query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev\n FROM repo_seq\n WHERE seq > $1\n ORDER BY seq ASC\n LIMIT $2\n ",
4
"describe": {
5
"columns": [
6
{
···
67
"ordinal": 12,
68
"name": "status",
69
"type_info": "Text"
70
+
},
71
+
{
72
+
"ordinal": 13,
73
+
"name": "rev",
74
+
"type_info": "Text"
75
}
76
],
77
"parameters": {
···
93
true,
94
true,
95
true,
96
+
true,
97
true
98
]
99
},
100
+
"hash": "cc28150d6e1e1823a918d6dcf7744209614ef7b8298c210ddd65ac44da5c551a"
101
}
+8
-2
.sqlx/query-239555df14c147a09096beb28f2ff0b093523c27e9527cd1f623c9a87b05b532.json
.sqlx/query-130dd93754cc36188e01255166aa65603f909c2b181fa0caa7796c62d4bc60e1.json
+8
-2
.sqlx/query-239555df14c147a09096beb28f2ff0b093523c27e9527cd1f623c9a87b05b532.json
.sqlx/query-130dd93754cc36188e01255166aa65603f909c2b181fa0caa7796c62d4bc60e1.json
···
1
{
2
"db_name": "PostgreSQL",
3
-
"query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status\n FROM repo_seq\n WHERE seq > $1\n ORDER BY seq ASC\n ",
4
"describe": {
5
"columns": [
6
{
···
67
"ordinal": 12,
68
"name": "status",
69
"type_info": "Text"
70
}
71
],
72
"parameters": {
···
87
true,
88
true,
89
true,
90
true
91
]
92
},
93
-
"hash": "239555df14c147a09096beb28f2ff0b093523c27e9527cd1f623c9a87b05b532"
94
}
···
1
{
2
"db_name": "PostgreSQL",
3
+
"query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev\n FROM repo_seq\n WHERE seq > $1\n ORDER BY seq ASC\n ",
4
"describe": {
5
"columns": [
6
{
···
67
"ordinal": 12,
68
"name": "status",
69
"type_info": "Text"
70
+
},
71
+
{
72
+
"ordinal": 13,
73
+
"name": "rev",
74
+
"type_info": "Text"
75
}
76
],
77
"parameters": {
···
92
true,
93
true,
94
true,
95
+
true,
96
true
97
]
98
},
99
+
"hash": "130dd93754cc36188e01255166aa65603f909c2b181fa0caa7796c62d4bc60e1"
100
}
+3
-2
.sqlx/query-3792c455a955b8cf2c70c8aa76635083354c87330101ea0ea69d30f2a5b4b960.json
.sqlx/query-48f2ff46677b5dc26a4ca9ac0b4e86ebdb3a9862d006aa0e21bfa2d7b25a8f71.json
+3
-2
.sqlx/query-3792c455a955b8cf2c70c8aa76635083354c87330101ea0ea69d30f2a5b4b960.json
.sqlx/query-48f2ff46677b5dc26a4ca9ac0b4e86ebdb3a9862d006aa0e21bfa2d7b25a8f71.json
···
1
{
2
"db_name": "PostgreSQL",
3
-
"query": "\n INSERT INTO repo_seq (did, event_type, commit_cid)\n VALUES ($1, 'sync', $2)\n RETURNING seq\n ",
4
"describe": {
5
"columns": [
6
{
···
12
"parameters": {
13
"Left": [
14
"Text",
15
"Text"
16
]
17
},
···
19
false
20
]
21
},
22
-
"hash": "3792c455a955b8cf2c70c8aa76635083354c87330101ea0ea69d30f2a5b4b960"
23
}
···
1
{
2
"db_name": "PostgreSQL",
3
+
"query": "\n INSERT INTO repo_seq (did, event_type, commit_cid, rev)\n VALUES ($1, 'sync', $2, $3)\n RETURNING seq\n ",
4
"describe": {
5
"columns": [
6
{
···
12
"parameters": {
13
"Left": [
14
"Text",
15
+
"Text",
16
"Text"
17
]
18
},
···
20
false
21
]
22
},
23
+
"hash": "48f2ff46677b5dc26a4ca9ac0b4e86ebdb3a9862d006aa0e21bfa2d7b25a8f71"
24
}
+8
-2
.sqlx/query-44b78996f9799398f384d9aebb36d01c27738d4677b7cae7ea6697f3f5135388.json
.sqlx/query-7a4016fed3eb3a16d6eb267013751af47ad6e8c9595711fe6c9d41121f904cb4.json
+8
-2
.sqlx/query-44b78996f9799398f384d9aebb36d01c27738d4677b7cae7ea6697f3f5135388.json
.sqlx/query-7a4016fed3eb3a16d6eb267013751af47ad6e8c9595711fe6c9d41121f904cb4.json
···
1
{
2
"db_name": "PostgreSQL",
3
-
"query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status\n FROM repo_seq\n WHERE seq = $1\n ",
4
"describe": {
5
"columns": [
6
{
···
67
"ordinal": 12,
68
"name": "status",
69
"type_info": "Text"
70
}
71
],
72
"parameters": {
···
87
true,
88
true,
89
true,
90
true
91
]
92
},
93
-
"hash": "44b78996f9799398f384d9aebb36d01c27738d4677b7cae7ea6697f3f5135388"
94
}
···
1
{
2
"db_name": "PostgreSQL",
3
+
"query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev\n FROM repo_seq\n WHERE seq = $1\n ",
4
"describe": {
5
"columns": [
6
{
···
67
"ordinal": 12,
68
"name": "status",
69
"type_info": "Text"
70
+
},
71
+
{
72
+
"ordinal": 13,
73
+
"name": "rev",
74
+
"type_info": "Text"
75
}
76
],
77
"parameters": {
···
92
true,
93
true,
94
true,
95
+
true,
96
true
97
]
98
},
99
+
"hash": "7a4016fed3eb3a16d6eb267013751af47ad6e8c9595711fe6c9d41121f904cb4"
100
}
+8
-2
.sqlx/query-777386dcbf2aa2785a6c16abdccbd8f751893039fb6bc2363d9760ca0d8a8a56.json
.sqlx/query-5c322bbdf9cecab9077c937bd322e49200ac2b8931da1dfe6e55d56087fc1d35.json
+8
-2
.sqlx/query-777386dcbf2aa2785a6c16abdccbd8f751893039fb6bc2363d9760ca0d8a8a56.json
.sqlx/query-5c322bbdf9cecab9077c937bd322e49200ac2b8931da1dfe6e55d56087fc1d35.json
···
1
{
2
"db_name": "PostgreSQL",
3
-
"query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status\n FROM repo_seq\n WHERE seq > $1 AND seq < $2\n ORDER BY seq ASC\n ",
4
"describe": {
5
"columns": [
6
{
···
67
"ordinal": 12,
68
"name": "status",
69
"type_info": "Text"
70
}
71
],
72
"parameters": {
···
88
true,
89
true,
90
true,
91
true
92
]
93
},
94
-
"hash": "777386dcbf2aa2785a6c16abdccbd8f751893039fb6bc2363d9760ca0d8a8a56"
95
}
···
1
{
2
"db_name": "PostgreSQL",
3
+
"query": "\n SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev\n FROM repo_seq\n WHERE seq > $1 AND seq < $2\n ORDER BY seq ASC\n ",
4
"describe": {
5
"columns": [
6
{
···
67
"ordinal": 12,
68
"name": "status",
69
"type_info": "Text"
70
+
},
71
+
{
72
+
"ordinal": 13,
73
+
"name": "rev",
74
+
"type_info": "Text"
75
}
76
],
77
"parameters": {
···
93
true,
94
true,
95
true,
96
+
true,
97
true
98
]
99
},
100
+
"hash": "5c322bbdf9cecab9077c937bd322e49200ac2b8931da1dfe6e55d56087fc1d35"
101
}
+8
-7
Dockerfile
+8
-7
Dockerfile
···
1
-
# Stage 1: Build frontend with Deno
2
FROM denoland/deno:alpine AS frontend-builder
3
WORKDIR /frontend
4
COPY frontend/ ./
5
RUN deno task build
6
-
# Stage 2: Build Rust backend
7
FROM rust:1.92-alpine AS builder
8
-
RUN apk add ca-certificates openssl openssl-dev pkgconfig
9
WORKDIR /app
10
COPY Cargo.toml Cargo.lock ./
11
-
RUN mkdir src && echo "fn main() {}" > src/main.rs && cargo build --release && rm -rf src
12
COPY src ./src
13
COPY tests ./tests
14
COPY migrations ./migrations
15
COPY .sqlx ./.sqlx
16
-
RUN touch src/main.rs && cargo build --release
17
-
# Stage 3: Final image
18
FROM alpine:3.23
19
-
COPY --from=builder /app/target/release/tranquil-pds /usr/local/bin/tranquil-pds
20
COPY --from=builder /app/migrations /app/migrations
21
COPY --from=frontend-builder /frontend/dist /app/frontend/dist
22
WORKDIR /app
···
1
FROM denoland/deno:alpine AS frontend-builder
2
WORKDIR /frontend
3
COPY frontend/ ./
4
RUN deno task build
5
+
6
FROM rust:1.92-alpine AS builder
7
+
RUN apk add ca-certificates openssl openssl-dev openssl-libs-static pkgconfig musl-dev
8
WORKDIR /app
9
COPY Cargo.toml Cargo.lock ./
10
COPY src ./src
11
COPY tests ./tests
12
COPY migrations ./migrations
13
COPY .sqlx ./.sqlx
14
+
RUN --mount=type=cache,target=/usr/local/cargo/registry \
15
+
--mount=type=cache,target=/app/target \
16
+
cargo build --release && \
17
+
cp target/release/tranquil-pds /tmp/tranquil-pds
18
+
19
FROM alpine:3.23
20
+
COPY --from=builder /tmp/tranquil-pds /usr/local/bin/tranquil-pds
21
COPY --from=builder /app/migrations /app/migrations
22
COPY --from=frontend-builder /frontend/dist /app/frontend/dist
23
WORKDIR /app
+1
migrations/20251238_add_rev_to_repo_seq.sql
+1
migrations/20251238_add_rev_to_repo_seq.sql
···
···
1
+
ALTER TABLE repo_seq ADD COLUMN rev TEXT;
+107
-3
src/api/repo/import.rs
+107
-3
src/api/repo/import.rs
···
1
use crate::api::ApiError;
2
use crate::state::AppState;
3
use crate::sync::import::{ImportError, apply_import, parse_car};
4
use crate::sync::verify::CarVerifier;
···
9
http::StatusCode,
10
response::{IntoResponse, Response},
11
};
12
use serde_json::json;
13
use tracing::{debug, error, info, warn};
14
···
312
.and_then(|s| s.parse().ok())
313
.unwrap_or(DEFAULT_MAX_BLOCKS);
314
match apply_import(&state.db, user_id, root, blocks, max_blocks).await {
315
-
Ok(records) => {
316
info!(
317
"Successfully imported {} records for user {}",
318
-
records.len(),
319
did
320
);
321
if !is_migration {
322
-
if let Err(e) = sequence_import_event(&state, did, &root.to_string()).await {
323
warn!("Failed to sequence import event: {:?}", e);
324
}
325
}
···
1
use crate::api::ApiError;
2
+
use crate::api::repo::record::create_signed_commit;
3
use crate::state::AppState;
4
use crate::sync::import::{ImportError, apply_import, parse_car};
5
use crate::sync::verify::CarVerifier;
···
10
http::StatusCode,
11
response::{IntoResponse, Response},
12
};
13
+
use jacquard::types::{integer::LimitedU32, string::Tid};
14
+
use jacquard_repo::storage::BlockStore;
15
+
use k256::ecdsa::SigningKey;
16
use serde_json::json;
17
use tracing::{debug, error, info, warn};
18
···
316
.and_then(|s| s.parse().ok())
317
.unwrap_or(DEFAULT_MAX_BLOCKS);
318
match apply_import(&state.db, user_id, root, blocks, max_blocks).await {
319
+
Ok(import_result) => {
320
info!(
321
"Successfully imported {} records for user {}",
322
+
import_result.records.len(),
323
did
324
);
325
+
let key_row = match sqlx::query!(
326
+
r#"SELECT uk.key_bytes, uk.encryption_version
327
+
FROM user_keys uk
328
+
JOIN users u ON uk.user_id = u.id
329
+
WHERE u.did = $1"#,
330
+
did
331
+
)
332
+
.fetch_optional(&state.db)
333
+
.await
334
+
{
335
+
Ok(Some(row)) => row,
336
+
Ok(None) => {
337
+
error!("No signing key found for user {}", did);
338
+
return (
339
+
StatusCode::INTERNAL_SERVER_ERROR,
340
+
Json(json!({"error": "InternalError", "message": "Signing key not found"})),
341
+
)
342
+
.into_response();
343
+
}
344
+
Err(e) => {
345
+
error!("DB error fetching signing key: {:?}", e);
346
+
return (
347
+
StatusCode::INTERNAL_SERVER_ERROR,
348
+
Json(json!({"error": "InternalError"})),
349
+
)
350
+
.into_response();
351
+
}
352
+
};
353
+
let key_bytes = match crate::config::decrypt_key(&key_row.key_bytes, key_row.encryption_version) {
354
+
Ok(k) => k,
355
+
Err(e) => {
356
+
error!("Failed to decrypt signing key: {}", e);
357
+
return (
358
+
StatusCode::INTERNAL_SERVER_ERROR,
359
+
Json(json!({"error": "InternalError"})),
360
+
)
361
+
.into_response();
362
+
}
363
+
};
364
+
let signing_key = match SigningKey::from_slice(&key_bytes) {
365
+
Ok(k) => k,
366
+
Err(e) => {
367
+
error!("Invalid signing key: {:?}", e);
368
+
return (
369
+
StatusCode::INTERNAL_SERVER_ERROR,
370
+
Json(json!({"error": "InternalError"})),
371
+
)
372
+
.into_response();
373
+
}
374
+
};
375
+
let new_rev = Tid::now(LimitedU32::MIN);
376
+
let new_rev_str = new_rev.to_string();
377
+
let (commit_bytes, _sig) = match create_signed_commit(
378
+
did,
379
+
import_result.data_cid,
380
+
&new_rev_str,
381
+
None,
382
+
&signing_key,
383
+
) {
384
+
Ok(result) => result,
385
+
Err(e) => {
386
+
error!("Failed to create new commit: {}", e);
387
+
return (
388
+
StatusCode::INTERNAL_SERVER_ERROR,
389
+
Json(json!({"error": "InternalError"})),
390
+
)
391
+
.into_response();
392
+
}
393
+
};
394
+
let new_root_cid: cid::Cid = match state.block_store.put(&commit_bytes).await {
395
+
Ok(cid) => cid,
396
+
Err(e) => {
397
+
error!("Failed to store new commit block: {:?}", e);
398
+
return (
399
+
StatusCode::INTERNAL_SERVER_ERROR,
400
+
Json(json!({"error": "InternalError"})),
401
+
)
402
+
.into_response();
403
+
}
404
+
};
405
+
let new_root_str = new_root_cid.to_string();
406
+
if let Err(e) = sqlx::query!(
407
+
"UPDATE repos SET repo_root_cid = $1, updated_at = NOW() WHERE user_id = $2",
408
+
new_root_str,
409
+
user_id
410
+
)
411
+
.execute(&state.db)
412
+
.await
413
+
{
414
+
error!("Failed to update repo root: {:?}", e);
415
+
return (
416
+
StatusCode::INTERNAL_SERVER_ERROR,
417
+
Json(json!({"error": "InternalError"})),
418
+
)
419
+
.into_response();
420
+
}
421
+
info!(
422
+
"Created new commit for imported repo: cid={}, rev={}",
423
+
new_root_str, new_rev_str
424
+
);
425
if !is_migration {
426
+
if let Err(e) = sequence_import_event(&state, did, &new_root_str).await {
427
warn!("Failed to sequence import event: {:?}", e);
428
}
429
}
+5
-3
src/api/repo/record/utils.rs
+5
-3
src/api/repo/record/utils.rs
···
321
.await
322
.map_err(|e| format!("Failed to commit transaction: {}", e))?;
323
if is_account_active {
324
-
let _ = sequence_sync_event(state, did, &new_root_cid.to_string()).await;
325
}
326
Ok(CommitResult {
327
commit_cid: new_root_cid,
···
470
state: &AppState,
471
did: &str,
472
commit_cid: &str,
473
) -> Result<i64, String> {
474
let seq_row = sqlx::query!(
475
r#"
476
-
INSERT INTO repo_seq (did, event_type, commit_cid)
477
-
VALUES ($1, 'sync', $2)
478
RETURNING seq
479
"#,
480
did,
481
commit_cid,
482
)
483
.fetch_one(&state.db)
484
.await
···
321
.await
322
.map_err(|e| format!("Failed to commit transaction: {}", e))?;
323
if is_account_active {
324
+
let _ = sequence_sync_event(state, did, &new_root_cid.to_string(), Some(&rev_str)).await;
325
}
326
Ok(CommitResult {
327
commit_cid: new_root_cid,
···
470
state: &AppState,
471
did: &str,
472
commit_cid: &str,
473
+
rev: Option<&str>,
474
) -> Result<i64, String> {
475
let seq_row = sqlx::query!(
476
r#"
477
+
INSERT INTO repo_seq (did, event_type, commit_cid, rev)
478
+
VALUES ($1, 'sync', $2, $3)
479
RETURNING seq
480
"#,
481
did,
482
commit_cid,
483
+
rev,
484
)
485
.fetch_one(&state.db)
486
.await
+32
-3
src/api/server/account_status.rs
+32
-3
src/api/server/account_status.rs
···
8
response::{IntoResponse, Response},
9
};
10
use bcrypt::verify;
11
use chrono::{Duration, Utc};
12
use k256::ecdsa::SigningKey;
13
use serde::{Deserialize, Serialize};
14
use serde_json::json;
15
use tracing::{error, info, warn};
16
use uuid::Uuid;
17
···
245
}
246
} else if did.starts_with("did:web:") {
247
let client = reqwest::Client::new();
248
-
let did_path = &did[8..];
249
-
let url = format!("https://{}/.well-known/did.json", did_path.replace(':', "/"));
250
let resp = client.get(&url).send().await.map_err(|e| {
251
warn!("Failed to fetch did:web document for {}: {:?}", did, e);
252
(
···
381
.ok()
382
.flatten();
383
if let Some(root_cid) = repo_root {
384
if let Err(e) =
385
-
crate::api::repo::record::sequence_sync_event(&state, &did, &root_cid).await
386
{
387
warn!("Failed to sequence sync event for activation: {}", e);
388
}
···
8
response::{IntoResponse, Response},
9
};
10
use bcrypt::verify;
11
+
use cid::Cid;
12
use chrono::{Duration, Utc};
13
+
use jacquard_repo::commit::Commit;
14
+
use jacquard_repo::storage::BlockStore;
15
use k256::ecdsa::SigningKey;
16
use serde::{Deserialize, Serialize};
17
use serde_json::json;
18
+
use std::str::FromStr;
19
use tracing::{error, info, warn};
20
use uuid::Uuid;
21
···
249
}
250
} else if did.starts_with("did:web:") {
251
let client = reqwest::Client::new();
252
+
let host_and_path = &did[8..];
253
+
let decoded = host_and_path.replace("%3A", ":");
254
+
let parts: Vec<&str> = decoded.split(':').collect();
255
+
let (host, path_parts) = if parts.len() > 1 && parts[1].chars().all(|c| c.is_ascii_digit()) {
256
+
(format!("{}:{}", parts[0], parts[1]), parts[2..].to_vec())
257
+
} else {
258
+
(parts[0].to_string(), parts[1..].to_vec())
259
+
};
260
+
let scheme = if host.starts_with("localhost") || host.starts_with("127.") || host.contains(':') {
261
+
"http"
262
+
} else {
263
+
"https"
264
+
};
265
+
let url = if path_parts.is_empty() {
266
+
format!("{}://{}/.well-known/did.json", scheme, host)
267
+
} else {
268
+
format!("{}://{}/{}/did.json", scheme, host, path_parts.join("/"))
269
+
};
270
let resp = client.get(&url).send().await.map_err(|e| {
271
warn!("Failed to fetch did:web document for {}: {:?}", did, e);
272
(
···
401
.ok()
402
.flatten();
403
if let Some(root_cid) = repo_root {
404
+
let rev = if let Ok(cid) = Cid::from_str(&root_cid) {
405
+
if let Ok(Some(block)) = state.block_store.get(&cid).await {
406
+
Commit::from_cbor(&block).ok().map(|c| c.rev().to_string())
407
+
} else {
408
+
None
409
+
}
410
+
} else {
411
+
None
412
+
};
413
if let Err(e) =
414
+
crate::api::repo::record::sequence_sync_event(&state, &did, &root_cid, rev.as_deref()).await
415
{
416
warn!("Failed to sequence sync event for activation: {}", e);
417
}
+1
src/sync/firehose.rs
+1
src/sync/firehose.rs
+7
-10
src/sync/import.rs
+7
-10
src/sync/import.rs
···
255
pub prev: Option<String>,
256
}
257
258
fn extract_commit_info(commit: &Ipld) -> Result<(Cid, CommitInfo), ImportError> {
259
let obj = match commit {
260
Ipld::Map(m) => m,
···
299
root: Cid,
300
blocks: HashMap<Cid, Bytes>,
301
max_blocks: usize,
302
-
) -> Result<Vec<ImportedRecord>, ImportError> {
303
if blocks.len() > max_blocks {
304
return Err(ImportError::SizeLimitExceeded);
305
}
···
352
.await?;
353
}
354
}
355
-
let root_str = root.to_string();
356
-
sqlx::query!(
357
-
"UPDATE repos SET repo_root_cid = $1, updated_at = NOW() WHERE user_id = $2",
358
-
root_str,
359
-
user_id
360
-
)
361
-
.execute(&mut *tx)
362
-
.await?;
363
sqlx::query!("DELETE FROM records WHERE repo_id = $1", user_id)
364
.execute(&mut *tx)
365
.await?;
···
385
blocks.len(),
386
records.len()
387
);
388
-
Ok(records)
389
}
390
391
#[cfg(test)]
···
255
pub prev: Option<String>,
256
}
257
258
+
pub struct ImportResult {
259
+
pub records: Vec<ImportedRecord>,
260
+
pub data_cid: Cid,
261
+
}
262
+
263
fn extract_commit_info(commit: &Ipld) -> Result<(Cid, CommitInfo), ImportError> {
264
let obj = match commit {
265
Ipld::Map(m) => m,
···
304
root: Cid,
305
blocks: HashMap<Cid, Bytes>,
306
max_blocks: usize,
307
+
) -> Result<ImportResult, ImportError> {
308
if blocks.len() > max_blocks {
309
return Err(ImportError::SizeLimitExceeded);
310
}
···
357
.await?;
358
}
359
}
360
sqlx::query!("DELETE FROM records WHERE repo_id = $1", user_id)
361
.execute(&mut *tx)
362
.await?;
···
382
blocks.len(),
383
records.len()
384
);
385
+
Ok(ImportResult { records, data_cid })
386
}
387
388
#[cfg(test)]
+3
-3
src/sync/listener.rs
+3
-3
src/sync/listener.rs
···
33
let events = sqlx::query_as!(
34
SequencedEvent,
35
r#"
36
-
SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status
37
FROM repo_seq
38
WHERE seq > $1
39
ORDER BY seq ASC
···
81
let gap_events = sqlx::query_as!(
82
SequencedEvent,
83
r#"
84
-
SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status
85
FROM repo_seq
86
WHERE seq > $1 AND seq < $2
87
ORDER BY seq ASC
···
103
let event = sqlx::query_as!(
104
SequencedEvent,
105
r#"
106
-
SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status
107
FROM repo_seq
108
WHERE seq = $1
109
"#,
···
33
let events = sqlx::query_as!(
34
SequencedEvent,
35
r#"
36
+
SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev
37
FROM repo_seq
38
WHERE seq > $1
39
ORDER BY seq ASC
···
81
let gap_events = sqlx::query_as!(
82
SequencedEvent,
83
r#"
84
+
SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev
85
FROM repo_seq
86
WHERE seq > $1 AND seq < $2
87
ORDER BY seq ASC
···
103
let event = sqlx::query_as!(
104
SequencedEvent,
105
r#"
106
+
SELECT seq, did, created_at, event_type, commit_cid, prev_cid, prev_data_cid, ops, blobs, blocks_cids, handle, active, status, rev
107
FROM repo_seq
108
WHERE seq = $1
109
"#,
+1
-1
src/sync/subscribe_repos.rs
+1
-1
src/sync/subscribe_repos.rs
+12
-4
src/sync/util.rs
+12
-4
src/sync/util.rs
···
112
.get(&commit_cid)
113
.await?
114
.ok_or_else(|| anyhow::anyhow!("Commit block not found"))?;
115
-
let rev = extract_rev_from_commit_bytes(&commit_bytes)
116
-
.ok_or_else(|| anyhow::anyhow!("Could not extract rev from commit"))?;
117
let car_bytes = write_car_blocks(commit_cid, Some(commit_bytes), BTreeMap::new()).await?;
118
let frame = SyncFrame {
119
did: event.did.clone(),
···
251
let commit_bytes = prefetched
252
.get(&commit_cid)
253
.ok_or_else(|| anyhow::anyhow!("Commit block not found in prefetched"))?;
254
-
let rev = extract_rev_from_commit_bytes(commit_bytes)
255
-
.ok_or_else(|| anyhow::anyhow!("Could not extract rev from commit"))?;
256
let car_bytes = futures::executor::block_on(write_car_blocks(
257
commit_cid,
258
Some(commit_bytes.clone()),
···
112
.get(&commit_cid)
113
.await?
114
.ok_or_else(|| anyhow::anyhow!("Commit block not found"))?;
115
+
let rev = if let Some(ref stored_rev) = event.rev {
116
+
stored_rev.clone()
117
+
} else {
118
+
extract_rev_from_commit_bytes(&commit_bytes)
119
+
.ok_or_else(|| anyhow::anyhow!("Could not extract rev from commit"))?
120
+
};
121
let car_bytes = write_car_blocks(commit_cid, Some(commit_bytes), BTreeMap::new()).await?;
122
let frame = SyncFrame {
123
did: event.did.clone(),
···
255
let commit_bytes = prefetched
256
.get(&commit_cid)
257
.ok_or_else(|| anyhow::anyhow!("Commit block not found in prefetched"))?;
258
+
let rev = if let Some(ref stored_rev) = event.rev {
259
+
stored_rev.clone()
260
+
} else {
261
+
extract_rev_from_commit_bytes(commit_bytes)
262
+
.ok_or_else(|| anyhow::anyhow!("Could not extract rev from commit"))?
263
+
};
264
let car_bytes = futures::executor::block_on(write_car_blocks(
265
commit_cid,
266
Some(commit_bytes.clone()),
+2
-2
tests/identity.rs
+2
-2
tests/identity.rs
···
357
assert!(also_known_as[0].as_str().unwrap().starts_with("at://"));
358
assert!(body["verificationMethods"]["atproto"].is_string());
359
assert_eq!(
360
-
body["services"]["atprotoPds"]["type"],
361
"AtprotoPersonalDataServer"
362
);
363
-
assert!(body["services"]["atprotoPds"]["endpoint"].is_string());
364
}
365
366
#[tokio::test]
···
357
assert!(also_known_as[0].as_str().unwrap().starts_with("at://"));
358
assert!(body["verificationMethods"]["atproto"].is_string());
359
assert_eq!(
360
+
body["services"]["atproto_pds"]["type"],
361
"AtprotoPersonalDataServer"
362
);
363
+
assert!(body["services"]["atproto_pds"]["endpoint"].is_string());
364
}
365
366
#[tokio::test]
-1
tests/import_with_verification.rs
-1
tests/import_with_verification.rs