+1
-1
src/api/admin/account/delete.rs
+1
-1
src/api/admin/account/delete.rs
+4
-3
src/api/admin/account/update.rs
+4
-3
src/api/admin/account/update.rs
···
2
2
use crate::api::error::ApiError;
3
3
use crate::auth::BearerAuthAdmin;
4
4
use crate::state::AppState;
5
-
use crate::types::{Did, PlainPassword};
5
+
use crate::types::{Did, Handle, PlainPassword};
6
6
use axum::{
7
7
Json,
8
8
extract::State,
···
103
103
let _ = state.cache.delete(&format!("handle:{}", old)).await;
104
104
}
105
105
let _ = state.cache.delete(&format!("handle:{}", handle)).await;
106
+
let handle_typed = Handle::new_unchecked(&handle);
106
107
if let Err(e) = crate::api::repo::record::sequence_identity_event(
107
108
&state,
108
-
did.as_str(),
109
-
Some(&handle),
109
+
did,
110
+
Some(&handle_typed),
110
111
)
111
112
.await
112
113
{
+10
-8
src/api/admin/status.rs
+10
-8
src/api/admin/status.rs
···
1
1
use crate::api::error::ApiError;
2
2
use crate::auth::BearerAuthAdmin;
3
3
use crate::state::AppState;
4
+
use crate::types::Did;
4
5
use axum::{
5
6
Json,
6
7
extract::{Query, State},
···
183
184
let subject_type = input.subject.get("$type").and_then(|t| t.as_str());
184
185
match subject_type {
185
186
Some("com.atproto.admin.defs#repoRef") => {
186
-
let did = input.subject.get("did").and_then(|d| d.as_str());
187
-
if let Some(did) = did {
187
+
let did_str = input.subject.get("did").and_then(|d| d.as_str());
188
+
if let Some(did_str) = did_str {
189
+
let did = Did::new_unchecked(did_str);
188
190
let mut tx = match state.db.begin().await {
189
191
Ok(tx) => tx,
190
192
Err(e) => {
···
201
203
if let Err(e) = sqlx::query!(
202
204
"UPDATE users SET takedown_ref = $1 WHERE did = $2",
203
205
takedown_ref,
204
-
did
206
+
did.as_str()
205
207
)
206
208
.execute(&mut *tx)
207
209
.await
···
217
219
let result = if deactivated.applied {
218
220
sqlx::query!(
219
221
"UPDATE users SET deactivated_at = NOW() WHERE did = $1",
220
-
did
222
+
did.as_str()
221
223
)
222
224
.execute(&mut *tx)
223
225
.await
224
226
} else {
225
-
sqlx::query!("UPDATE users SET deactivated_at = NULL WHERE did = $1", did)
227
+
sqlx::query!("UPDATE users SET deactivated_at = NULL WHERE did = $1", did.as_str())
226
228
.execute(&mut *tx)
227
229
.await
228
230
};
···
249
251
};
250
252
if let Err(e) = crate::api::repo::record::sequence_account_event(
251
253
&state,
252
-
did,
254
+
&did,
253
255
!takedown.applied,
254
256
status,
255
257
)
···
266
268
};
267
269
if let Err(e) = crate::api::repo::record::sequence_account_event(
268
270
&state,
269
-
did,
271
+
&did,
270
272
!deactivated.applied,
271
273
status,
272
274
)
···
276
278
}
277
279
}
278
280
if let Ok(Some(handle)) =
279
-
sqlx::query_scalar!("SELECT handle FROM users WHERE did = $1", did)
281
+
sqlx::query_scalar!("SELECT handle FROM users WHERE did = $1", did.as_str())
280
282
.fetch_optional(&state.db)
281
283
.await
282
284
{
+12
-9
src/api/delegation.rs
+12
-9
src/api/delegation.rs
···
4
4
use crate::delegation::{self, DelegationActionType};
5
5
use crate::oauth::db as oauth_db;
6
6
use crate::state::{AppState, RateLimitKind};
7
-
use crate::types::{Did, Handle};
7
+
use crate::types::{Did, Handle, Nsid, Rkey};
8
8
use crate::util::extract_client_ip;
9
9
use axum::{
10
10
Json,
···
568
568
.into_response();
569
569
}
570
570
571
-
let did = genesis_result.did;
571
+
let did = Did::new_unchecked(&genesis_result.did);
572
+
let handle = Handle::new_unchecked(&handle);
572
573
info!(did = %did, handle = %handle, controller = %&auth.0.did, "Created DID for delegated account");
573
574
574
575
let mut tx = match state.db.begin().await {
···
585
586
account_type, preferred_comms_channel
586
587
) VALUES ($1, $2, $3, NULL, FALSE, 'delegated'::account_type, 'email'::comms_channel) RETURNING id"#,
587
588
)
588
-
.bind(&handle)
589
+
.bind(handle.as_str())
589
590
.bind(&email)
590
-
.bind(&did)
591
+
.bind(did.as_str())
591
592
.fetch_one(&mut *tx)
592
593
.await;
593
594
···
633
634
if let Err(e) = sqlx::query!(
634
635
r#"INSERT INTO account_delegations (delegated_did, controller_did, granted_scopes, granted_by)
635
636
VALUES ($1, $2, $3, $4)"#,
636
-
did,
637
-
&auth.0.did,
637
+
did.as_str(),
638
+
auth.0.did.as_str(),
638
639
input.controller_scopes,
639
-
&auth.0.did
640
+
auth.0.did.as_str()
640
641
)
641
642
.execute(&mut *tx)
642
643
.await
···
736
737
"$type": "app.bsky.actor.profile",
737
738
"displayName": handle
738
739
});
740
+
let profile_collection = Nsid::new_unchecked("app.bsky.actor.profile");
741
+
let profile_rkey = Rkey::new_unchecked("self");
739
742
if let Err(e) = crate::api::repo::record::create_record_internal(
740
743
&state,
741
744
&did,
742
-
"app.bsky.actor.profile",
743
-
"self",
745
+
&profile_collection,
746
+
&profile_rkey,
744
747
&profile_record,
745
748
)
746
749
.await
+14
-9
src/api/identity/account.rs
+14
-9
src/api/identity/account.rs
···
4
4
use crate::auth::{ServiceTokenVerifier, is_service_token};
5
5
use crate::plc::{PlcClient, create_genesis_operation, signing_key_to_did_key};
6
6
use crate::state::{AppState, RateLimitKind};
7
-
use crate::types::{Did, Handle, PlainPassword};
7
+
use crate::types::{Did, Handle, Nsid, PlainPassword, Rkey};
8
8
use crate::validation::validate_password;
9
9
use axum::{
10
10
Json,
···
710
710
}
711
711
};
712
712
let rev = Tid::now(LimitedU32::MIN);
713
+
let did_for_commit = Did::new_unchecked(&did);
713
714
let (commit_bytes, _sig) =
714
-
match create_signed_commit(&did, mst_root, rev.as_ref(), None, &signing_key) {
715
+
match create_signed_commit(&did_for_commit, mst_root, rev.as_ref(), None, &signing_key) {
715
716
Ok(result) => result,
716
717
Err(e) => {
717
718
error!("Error creating genesis commit: {:?}", e);
···
793
794
return ApiError::InternalError(None).into_response();
794
795
}
795
796
if !is_migration && !is_did_web_byod {
797
+
let did_typed = Did::new_unchecked(&did);
798
+
let handle_typed = Handle::new_unchecked(&handle);
796
799
if let Err(e) =
797
-
crate::api::repo::record::sequence_identity_event(&state, &did, Some(&handle)).await
800
+
crate::api::repo::record::sequence_identity_event(&state, &did_typed, Some(&handle_typed)).await
798
801
{
799
802
warn!("Failed to sequence identity event for {}: {}", did, e);
800
803
}
801
804
if let Err(e) =
802
-
crate::api::repo::record::sequence_account_event(&state, &did, true, None).await
805
+
crate::api::repo::record::sequence_account_event(&state, &did_typed, true, None).await
803
806
{
804
807
warn!("Failed to sequence account event for {}: {}", did, e);
805
808
}
806
809
if let Err(e) = crate::api::repo::record::sequence_genesis_commit(
807
810
&state,
808
-
&did,
811
+
&did_typed,
809
812
&commit_cid,
810
813
&mst_root,
811
814
&rev_str,
···
816
819
}
817
820
if let Err(e) = crate::api::repo::record::sequence_sync_event(
818
821
&state,
819
-
&did,
822
+
&did_typed,
820
823
&commit_cid_str,
821
824
Some(rev.as_ref()),
822
825
)
···
828
831
"$type": "app.bsky.actor.profile",
829
832
"displayName": input.handle
830
833
});
834
+
let profile_collection = Nsid::new_unchecked("app.bsky.actor.profile");
835
+
let profile_rkey = Rkey::new_unchecked("self");
831
836
if let Err(e) = crate::api::repo::record::create_record_internal(
832
837
&state,
833
-
&did,
834
-
"app.bsky.actor.profile",
835
-
"self",
838
+
&did_typed,
839
+
&profile_collection,
840
+
&profile_rkey,
836
841
&profile_record,
837
842
)
838
843
.await
+7
-3
src/api/identity/did.rs
+7
-3
src/api/identity/did.rs
···
2
2
use crate::auth::BearerAuthAllowDeactivated;
3
3
use crate::plc::signing_key_to_did_key;
4
4
use crate::state::AppState;
5
+
use crate::types::Handle;
5
6
use axum::{
6
7
Json,
7
8
extract::{Path, Query, State},
···
669
670
format!("{}.{}", new_handle, hostname)
670
671
};
671
672
if full_handle == current_handle {
673
+
let handle_typed = Handle::new_unchecked(&full_handle);
672
674
if let Err(e) =
673
-
crate::api::repo::record::sequence_identity_event(&state, &did, Some(&full_handle))
675
+
crate::api::repo::record::sequence_identity_event(&state, &did, Some(&handle_typed))
674
676
.await
675
677
{
676
678
warn!("Failed to sequence identity event for handle update: {}", e);
···
692
694
full_handle
693
695
} else {
694
696
if new_handle == current_handle {
697
+
let handle_typed = Handle::new_unchecked(&new_handle);
695
698
if let Err(e) =
696
-
crate::api::repo::record::sequence_identity_event(&state, &did, Some(&new_handle))
699
+
crate::api::repo::record::sequence_identity_event(&state, &did, Some(&handle_typed))
697
700
.await
698
701
{
699
702
warn!("Failed to sequence identity event for handle update: {}", e);
···
749
752
.await;
750
753
}
751
754
let _ = state.cache.delete(&format!("handle:{}", handle)).await;
755
+
let handle_typed = Handle::new_unchecked(&handle);
752
756
if let Err(e) =
753
-
crate::api::repo::record::sequence_identity_event(&state, &did, Some(&handle)).await
757
+
crate::api::repo::record::sequence_identity_event(&state, &did, Some(&handle_typed)).await
754
758
{
755
759
warn!("Failed to sequence identity event for handle update: {}", e);
756
760
}
+190
-139
src/api/repo/record/batch.rs
+190
-139
src/api/repo/record/batch.rs
···
6
6
use crate::delegation::{self, DelegationActionType};
7
7
use crate::repo::tracking::TrackingBlockStore;
8
8
use crate::state::AppState;
9
-
use crate::types::{AtIdentifier, AtUri, Nsid, Rkey};
9
+
use crate::types::{AtIdentifier, AtUri, Did, Nsid, Rkey};
10
10
use axum::{
11
11
Json,
12
12
extract::State,
···
22
22
use tracing::{error, info};
23
23
24
24
const MAX_BATCH_WRITES: usize = 200;
25
+
26
+
struct WriteAccumulator {
27
+
mst: Mst<TrackingBlockStore>,
28
+
results: Vec<WriteResult>,
29
+
ops: Vec<RecordOp>,
30
+
modified_keys: Vec<String>,
31
+
all_blob_cids: Vec<String>,
32
+
}
33
+
34
+
async fn process_single_write(
35
+
write: &WriteOp,
36
+
acc: WriteAccumulator,
37
+
did: &Did,
38
+
validate: Option<bool>,
39
+
tracking_store: &TrackingBlockStore,
40
+
) -> Result<WriteAccumulator, Response> {
41
+
let WriteAccumulator {
42
+
mst,
43
+
mut results,
44
+
mut ops,
45
+
mut modified_keys,
46
+
mut all_blob_cids,
47
+
} = acc;
48
+
49
+
match write {
50
+
WriteOp::Create {
51
+
collection,
52
+
rkey,
53
+
value,
54
+
} => {
55
+
let validation_status = match validate {
56
+
Some(false) => None,
57
+
_ => {
58
+
let require_lexicon = validate == Some(true);
59
+
match validate_record_with_status(
60
+
value,
61
+
collection,
62
+
rkey.as_ref(),
63
+
require_lexicon,
64
+
) {
65
+
Ok(status) => Some(status),
66
+
Err(err_response) => return Err(*err_response),
67
+
}
68
+
}
69
+
};
70
+
all_blob_cids.extend(extract_blob_cids(value));
71
+
let rkey = rkey.clone().unwrap_or_else(Rkey::generate);
72
+
let record_ipld = crate::util::json_to_ipld(value);
73
+
let record_bytes = serde_ipld_dagcbor::to_vec(&record_ipld).map_err(|_| {
74
+
ApiError::InvalidRecord("Failed to serialize record".into()).into_response()
75
+
})?;
76
+
let record_cid = tracking_store.put(&record_bytes).await.map_err(|_| {
77
+
ApiError::InternalError(Some("Failed to store record".into())).into_response()
78
+
})?;
79
+
let key = format!("{}/{}", collection, rkey);
80
+
modified_keys.push(key.clone());
81
+
let new_mst = mst.add(&key, record_cid).await.map_err(|_| {
82
+
ApiError::InternalError(Some("Failed to add to MST".into())).into_response()
83
+
})?;
84
+
let uri = AtUri::from_parts(did, collection, &rkey);
85
+
results.push(WriteResult::CreateResult {
86
+
uri,
87
+
cid: record_cid.to_string(),
88
+
validation_status: validation_status.map(|s| s.to_string()),
89
+
});
90
+
ops.push(RecordOp::Create {
91
+
collection: collection.clone(),
92
+
rkey: rkey.clone(),
93
+
cid: record_cid,
94
+
});
95
+
Ok(WriteAccumulator {
96
+
mst: new_mst,
97
+
results,
98
+
ops,
99
+
modified_keys,
100
+
all_blob_cids,
101
+
})
102
+
}
103
+
WriteOp::Update {
104
+
collection,
105
+
rkey,
106
+
value,
107
+
} => {
108
+
let validation_status = match validate {
109
+
Some(false) => None,
110
+
_ => {
111
+
let require_lexicon = validate == Some(true);
112
+
match validate_record_with_status(
113
+
value,
114
+
collection,
115
+
Some(rkey),
116
+
require_lexicon,
117
+
) {
118
+
Ok(status) => Some(status),
119
+
Err(err_response) => return Err(*err_response),
120
+
}
121
+
}
122
+
};
123
+
all_blob_cids.extend(extract_blob_cids(value));
124
+
let record_ipld = crate::util::json_to_ipld(value);
125
+
let record_bytes = serde_ipld_dagcbor::to_vec(&record_ipld).map_err(|_| {
126
+
ApiError::InvalidRecord("Failed to serialize record".into()).into_response()
127
+
})?;
128
+
let record_cid = tracking_store.put(&record_bytes).await.map_err(|_| {
129
+
ApiError::InternalError(Some("Failed to store record".into())).into_response()
130
+
})?;
131
+
let key = format!("{}/{}", collection, rkey);
132
+
modified_keys.push(key.clone());
133
+
let prev_record_cid = mst.get(&key).await.ok().flatten();
134
+
let new_mst = mst.update(&key, record_cid).await.map_err(|_| {
135
+
ApiError::InternalError(Some("Failed to update MST".into())).into_response()
136
+
})?;
137
+
let uri = AtUri::from_parts(did, collection, rkey);
138
+
results.push(WriteResult::UpdateResult {
139
+
uri,
140
+
cid: record_cid.to_string(),
141
+
validation_status: validation_status.map(|s| s.to_string()),
142
+
});
143
+
ops.push(RecordOp::Update {
144
+
collection: collection.clone(),
145
+
rkey: rkey.clone(),
146
+
cid: record_cid,
147
+
prev: prev_record_cid,
148
+
});
149
+
Ok(WriteAccumulator {
150
+
mst: new_mst,
151
+
results,
152
+
ops,
153
+
modified_keys,
154
+
all_blob_cids,
155
+
})
156
+
}
157
+
WriteOp::Delete { collection, rkey } => {
158
+
let key = format!("{}/{}", collection, rkey);
159
+
modified_keys.push(key.clone());
160
+
let prev_record_cid = mst.get(&key).await.ok().flatten();
161
+
let new_mst = mst.delete(&key).await.map_err(|_| {
162
+
ApiError::InternalError(Some("Failed to delete from MST".into())).into_response()
163
+
})?;
164
+
results.push(WriteResult::DeleteResult {});
165
+
ops.push(RecordOp::Delete {
166
+
collection: collection.clone(),
167
+
rkey: rkey.clone(),
168
+
prev: prev_record_cid,
169
+
});
170
+
Ok(WriteAccumulator {
171
+
mst: new_mst,
172
+
results,
173
+
ops,
174
+
modified_keys,
175
+
all_blob_cids,
176
+
})
177
+
}
178
+
}
179
+
}
180
+
181
+
async fn process_writes(
182
+
writes: &[WriteOp],
183
+
initial_mst: Mst<TrackingBlockStore>,
184
+
did: &Did,
185
+
validate: Option<bool>,
186
+
tracking_store: &TrackingBlockStore,
187
+
) -> Result<WriteAccumulator, Response> {
188
+
use futures::stream::{self, TryStreamExt};
189
+
let initial_acc = WriteAccumulator {
190
+
mst: initial_mst,
191
+
results: Vec::new(),
192
+
ops: Vec::new(),
193
+
modified_keys: Vec::new(),
194
+
all_blob_cids: Vec::new(),
195
+
};
196
+
stream::iter(writes.iter().map(Ok::<_, Response>))
197
+
.try_fold(initial_acc, |acc, write| async move {
198
+
process_single_write(write, acc, did, validate, tracking_store).await
199
+
})
200
+
.await
201
+
}
25
202
26
203
#[derive(Deserialize)]
27
204
#[serde(tag = "$type")]
···
237
414
_ => return ApiError::InternalError(Some("Failed to parse commit".into())).into_response(),
238
415
};
239
416
let original_mst = Mst::load(Arc::new(tracking_store.clone()), commit.data, None);
240
-
let mut mst = Mst::load(Arc::new(tracking_store.clone()), commit.data, None);
241
-
let mut results: Vec<WriteResult> = Vec::new();
242
-
let mut ops: Vec<RecordOp> = Vec::new();
243
-
let mut modified_keys: Vec<String> = Vec::new();
244
-
let mut all_blob_cids: Vec<String> = Vec::new();
245
-
for write in &input.writes {
246
-
match write {
247
-
WriteOp::Create {
248
-
collection,
249
-
rkey,
250
-
value,
251
-
} => {
252
-
let validation_status = if input.validate == Some(false) {
253
-
None
254
-
} else {
255
-
let require_lexicon = input.validate == Some(true);
256
-
match validate_record_with_status(
257
-
value,
258
-
collection,
259
-
rkey.as_ref().map(|r| r.as_str()),
260
-
require_lexicon,
261
-
) {
262
-
Ok(status) => Some(status),
263
-
Err(err_response) => return *err_response,
264
-
}
265
-
};
266
-
all_blob_cids.extend(extract_blob_cids(value));
267
-
let rkey = rkey.clone().unwrap_or_else(Rkey::generate);
268
-
let record_ipld = crate::util::json_to_ipld(value);
269
-
let mut record_bytes = Vec::new();
270
-
if serde_ipld_dagcbor::to_writer(&mut record_bytes, &record_ipld).is_err() {
271
-
return ApiError::InvalidRecord("Failed to serialize record".into())
272
-
.into_response();
273
-
}
274
-
let record_cid = match tracking_store.put(&record_bytes).await {
275
-
Ok(c) => c,
276
-
Err(_) => {
277
-
return ApiError::InternalError(Some("Failed to store record".into()))
278
-
.into_response();
279
-
}
280
-
};
281
-
let key = format!("{}/{}", collection, rkey);
282
-
modified_keys.push(key.clone());
283
-
mst = match mst.add(&key, record_cid).await {
284
-
Ok(m) => m,
285
-
Err(_) => {
286
-
return ApiError::InternalError(Some("Failed to add to MST".into()))
287
-
.into_response();
288
-
}
289
-
};
290
-
let uri = AtUri::from_parts(&did, collection, &rkey);
291
-
results.push(WriteResult::CreateResult {
292
-
uri,
293
-
cid: record_cid.to_string(),
294
-
validation_status: validation_status.map(|s| s.to_string()),
295
-
});
296
-
ops.push(RecordOp::Create {
297
-
collection: collection.to_string(),
298
-
rkey: rkey.to_string(),
299
-
cid: record_cid,
300
-
});
301
-
}
302
-
WriteOp::Update {
303
-
collection,
304
-
rkey,
305
-
value,
306
-
} => {
307
-
let validation_status = if input.validate == Some(false) {
308
-
None
309
-
} else {
310
-
let require_lexicon = input.validate == Some(true);
311
-
match validate_record_with_status(
312
-
value,
313
-
collection,
314
-
Some(rkey.as_str()),
315
-
require_lexicon,
316
-
) {
317
-
Ok(status) => Some(status),
318
-
Err(err_response) => return *err_response,
319
-
}
320
-
};
321
-
all_blob_cids.extend(extract_blob_cids(value));
322
-
let record_ipld = crate::util::json_to_ipld(value);
323
-
let mut record_bytes = Vec::new();
324
-
if serde_ipld_dagcbor::to_writer(&mut record_bytes, &record_ipld).is_err() {
325
-
return ApiError::InvalidRecord("Failed to serialize record".into())
326
-
.into_response();
327
-
}
328
-
let record_cid = match tracking_store.put(&record_bytes).await {
329
-
Ok(c) => c,
330
-
Err(_) => {
331
-
return ApiError::InternalError(Some("Failed to store record".into()))
332
-
.into_response();
333
-
}
334
-
};
335
-
let key = format!("{}/{}", collection, rkey);
336
-
modified_keys.push(key.clone());
337
-
let prev_record_cid = mst.get(&key).await.ok().flatten();
338
-
mst = match mst.update(&key, record_cid).await {
339
-
Ok(m) => m,
340
-
Err(_) => {
341
-
return ApiError::InternalError(Some("Failed to update MST".into()))
342
-
.into_response();
343
-
}
344
-
};
345
-
let uri = AtUri::from_parts(&did, collection, rkey);
346
-
results.push(WriteResult::UpdateResult {
347
-
uri,
348
-
cid: record_cid.to_string(),
349
-
validation_status: validation_status.map(|s| s.to_string()),
350
-
});
351
-
ops.push(RecordOp::Update {
352
-
collection: collection.to_string(),
353
-
rkey: rkey.to_string(),
354
-
cid: record_cid,
355
-
prev: prev_record_cid,
356
-
});
357
-
}
358
-
WriteOp::Delete { collection, rkey } => {
359
-
let key = format!("{}/{}", collection, rkey);
360
-
modified_keys.push(key.clone());
361
-
let prev_record_cid = mst.get(&key).await.ok().flatten();
362
-
mst = match mst.delete(&key).await {
363
-
Ok(m) => m,
364
-
Err(_) => {
365
-
return ApiError::InternalError(Some("Failed to delete from MST".into()))
366
-
.into_response();
367
-
}
368
-
};
369
-
results.push(WriteResult::DeleteResult {});
370
-
ops.push(RecordOp::Delete {
371
-
collection: collection.to_string(),
372
-
rkey: rkey.to_string(),
373
-
prev: prev_record_cid,
374
-
});
375
-
}
376
-
}
377
-
}
417
+
let initial_mst = Mst::load(Arc::new(tracking_store.clone()), commit.data, None);
418
+
let WriteAccumulator {
419
+
mst,
420
+
results,
421
+
ops,
422
+
modified_keys,
423
+
all_blob_cids,
424
+
} = match process_writes(&input.writes, initial_mst, &did, input.validate, &tracking_store).await
425
+
{
426
+
Ok(acc) => acc,
427
+
Err(response) => return response,
428
+
};
378
429
let new_mst_root = match mst.persist().await {
379
430
Ok(c) => c,
380
431
Err(_) => {
+2
-9
src/api/repo/record/delete.rs
+2
-9
src/api/repo/record/delete.rs
···
65
65
return e;
66
66
}
67
67
68
-
if crate::util::is_account_migrated(&state.db, &auth.did)
69
-
.await
70
-
.unwrap_or(false)
71
-
{
72
-
return ApiError::AccountMigrated.into_response();
73
-
}
74
-
75
68
let did = auth.did;
76
69
let user_id = auth.user_id;
77
70
let current_root_cid = auth.current_root_cid;
···
125
118
let collection_for_audit = input.collection.to_string();
126
119
let rkey_for_audit = input.rkey.to_string();
127
120
let op = RecordOp::Delete {
128
-
collection: input.collection.to_string(),
129
-
rkey: rkey_for_audit.clone(),
121
+
collection: input.collection.clone(),
122
+
rkey: input.rkey.clone(),
130
123
prev: prev_record_cid,
131
124
};
132
125
let mut new_mst_blocks = std::collections::BTreeMap::new();
+24
-23
src/api/repo/record/read.rs
+24
-23
src/api/repo/record/read.rs
···
13
13
use jacquard_repo::storage::BlockStore;
14
14
use serde::{Deserialize, Serialize};
15
15
use serde_json::{Map, Value, json};
16
-
use std::collections::HashMap;
17
16
use std::str::FromStr;
18
17
use tracing::error;
19
18
···
237
236
}
238
237
};
239
238
let last_rkey = rows.last().map(|(rkey, _)| rkey.clone());
240
-
let mut cid_to_rkey: HashMap<Cid, (String, String)> = HashMap::new();
241
-
let mut cids: Vec<Cid> = Vec::with_capacity(rows.len());
242
-
for (rkey, cid_str) in &rows {
243
-
if let Ok(cid) = Cid::from_str(cid_str) {
244
-
cid_to_rkey.insert(cid, (rkey.clone(), cid_str.clone()));
245
-
cids.push(cid);
246
-
}
247
-
}
239
+
let parsed_rows: Vec<(Cid, String, String)> = rows
240
+
.iter()
241
+
.filter_map(|(rkey, cid_str)| {
242
+
Cid::from_str(cid_str)
243
+
.ok()
244
+
.map(|cid| (cid, rkey.clone(), cid_str.clone()))
245
+
})
246
+
.collect();
247
+
let cids: Vec<Cid> = parsed_rows.iter().map(|(cid, _, _)| *cid).collect();
248
248
let blocks = match state.block_store.get_many(&cids).await {
249
249
Ok(b) => b,
250
250
Err(e) => {
···
252
252
return ApiError::InternalError(None).into_response();
253
253
}
254
254
};
255
-
let mut records = Vec::new();
256
-
for (cid, block_opt) in cids.iter().zip(blocks.into_iter()) {
257
-
if let Some(block) = block_opt
258
-
&& let Some((rkey, cid_str)) = cid_to_rkey.get(cid)
259
-
&& let Ok(ipld) = serde_ipld_dagcbor::from_slice::<Ipld>(&block)
260
-
{
261
-
let value = ipld_to_json(ipld);
262
-
records.push(json!({
263
-
"uri": format!("at://{}/{}/{}", input.repo, input.collection, rkey),
264
-
"cid": cid_str,
265
-
"value": value
266
-
}));
267
-
}
268
-
}
255
+
let records: Vec<Value> = parsed_rows
256
+
.iter()
257
+
.zip(blocks.into_iter())
258
+
.filter_map(|((_, rkey, cid_str), block_opt)| {
259
+
block_opt.and_then(|block| {
260
+
serde_ipld_dagcbor::from_slice::<Ipld>(&block).ok().map(|ipld| {
261
+
json!({
262
+
"uri": format!("at://{}/{}/{}", input.repo, input.collection, rkey),
263
+
"cid": cid_str,
264
+
"value": ipld_to_json(ipld)
265
+
})
266
+
})
267
+
})
268
+
})
269
+
.collect();
269
270
Json(ListRecordsOutput {
270
271
cursor: last_rkey,
271
272
records,
+104
-63
src/api/repo/record/utils.rs
+104
-63
src/api/repo/record/utils.rs
···
1
1
use crate::state::AppState;
2
+
use crate::types::{Did, Handle, Nsid, Rkey};
2
3
use bytes::Bytes;
3
4
use cid::Cid;
4
5
use jacquard::types::{integer::LimitedU32, string::Tid};
···
38
39
}
39
40
40
41
pub fn create_signed_commit(
41
-
did: &str,
42
+
did: &Did,
42
43
data: Cid,
43
44
rev: &str,
44
45
prev: Option<Cid>,
45
46
signing_key: &SigningKey,
46
47
) -> Result<(Vec<u8>, Bytes), String> {
47
-
let did =
48
-
jacquard::types::string::Did::new(did).map_err(|e| format!("Invalid DID: {:?}", e))?;
48
+
let did = jacquard::types::string::Did::new(did.as_str())
49
+
.map_err(|e| format!("Invalid DID: {:?}", e))?;
49
50
let rev =
50
51
jacquard::types::string::Tid::from_str(rev).map_err(|e| format!("Invalid TID: {:?}", e))?;
51
52
let unsigned = Commit::new_unsigned(did, data, rev, prev);
···
61
62
62
63
pub enum RecordOp {
63
64
Create {
64
-
collection: String,
65
-
rkey: String,
65
+
collection: Nsid,
66
+
rkey: Rkey,
66
67
cid: Cid,
67
68
},
68
69
Update {
69
-
collection: String,
70
-
rkey: String,
70
+
collection: Nsid,
71
+
rkey: Rkey,
71
72
cid: Cid,
72
73
prev: Option<Cid>,
73
74
},
74
75
Delete {
75
-
collection: String,
76
-
rkey: String,
76
+
collection: Nsid,
77
+
rkey: Rkey,
77
78
prev: Option<Cid>,
78
79
},
79
80
}
···
84
85
}
85
86
86
87
pub struct CommitParams<'a> {
87
-
pub did: &'a str,
88
+
pub did: &'a Did,
88
89
pub user_id: Uuid,
89
90
pub current_root_cid: Option<Cid>,
90
91
pub prev_data_cid: Option<Cid>,
···
218
219
.await
219
220
.map_err(|e| format!("DB Error (user_blocks delete obsolete): {}", e))?;
220
221
}
221
-
let mut upsert_collections: Vec<String> = Vec::new();
222
-
let mut upsert_rkeys: Vec<String> = Vec::new();
223
-
let mut upsert_cids: Vec<String> = Vec::new();
224
-
let mut delete_collections: Vec<String> = Vec::new();
225
-
let mut delete_rkeys: Vec<String> = Vec::new();
226
-
for op in &ops {
227
-
match op {
228
-
RecordOp::Create {
229
-
collection,
230
-
rkey,
231
-
cid,
232
-
}
233
-
| RecordOp::Update {
234
-
collection,
235
-
rkey,
236
-
cid,
237
-
..
238
-
} => {
239
-
upsert_collections.push(collection.clone());
240
-
upsert_rkeys.push(rkey.clone());
241
-
upsert_cids.push(cid.to_string());
242
-
}
222
+
let (upserts, deletes): (Vec<_>, Vec<_>) = ops.iter().partition(|op| {
223
+
matches!(op, RecordOp::Create { .. } | RecordOp::Update { .. })
224
+
});
225
+
let (upsert_collections, upsert_rkeys, upsert_cids): (Vec<String>, Vec<String>, Vec<String>) =
226
+
upserts
227
+
.into_iter()
228
+
.filter_map(|op| match op {
229
+
RecordOp::Create {
230
+
collection,
231
+
rkey,
232
+
cid,
233
+
}
234
+
| RecordOp::Update {
235
+
collection,
236
+
rkey,
237
+
cid,
238
+
..
239
+
} => Some((collection.to_string(), rkey.to_string(), cid.to_string())),
240
+
_ => None,
241
+
})
242
+
.fold(
243
+
(Vec::new(), Vec::new(), Vec::new()),
244
+
|(mut cols, mut rkeys, mut cids), (c, r, ci)| {
245
+
cols.push(c);
246
+
rkeys.push(r);
247
+
cids.push(ci);
248
+
(cols, rkeys, cids)
249
+
},
250
+
);
251
+
let (delete_collections, delete_rkeys): (Vec<String>, Vec<String>) = deletes
252
+
.into_iter()
253
+
.filter_map(|op| match op {
243
254
RecordOp::Delete {
244
255
collection, rkey, ..
245
-
} => {
246
-
delete_collections.push(collection.clone());
247
-
delete_rkeys.push(rkey.clone());
248
-
}
249
-
}
250
-
}
256
+
} => Some((collection.to_string(), rkey.to_string())),
257
+
_ => None,
258
+
})
259
+
.unzip();
251
260
if !upsert_collections.is_empty() {
252
261
sqlx::query!(
253
262
r#"
···
337
346
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
338
347
RETURNING seq
339
348
"#,
340
-
did,
349
+
did.as_str(),
341
350
event_type,
342
351
new_root_cid.to_string(),
343
352
prev_cid_str,
···
367
376
}
368
377
pub async fn create_record_internal(
369
378
state: &AppState,
370
-
did: &str,
371
-
collection: &str,
372
-
rkey: &str,
379
+
did: &Did,
380
+
collection: &Nsid,
381
+
rkey: &Rkey,
373
382
record: &serde_json::Value,
374
383
) -> Result<(String, Cid), String> {
375
384
use crate::repo::tracking::TrackingBlockStore;
376
385
use jacquard_repo::mst::Mst;
377
386
use std::sync::Arc;
378
-
let user_id: Uuid = sqlx::query_scalar!("SELECT id FROM users WHERE did = $1", did)
387
+
let user_id: Uuid = sqlx::query_scalar!("SELECT id FROM users WHERE did = $1", did.as_str())
379
388
.fetch_optional(&state.db)
380
389
.await
381
390
.map_err(|e| format!("DB error: {}", e))?
···
417
426
.await
418
427
.map_err(|e| format!("Failed to persist MST: {:?}", e))?;
419
428
let op = RecordOp::Create {
420
-
collection: collection.to_string(),
421
-
rkey: rkey.to_string(),
429
+
collection: collection.clone(),
430
+
rkey: rkey.clone(),
422
431
cid: record_cid,
423
432
};
424
433
let mut new_mst_blocks = std::collections::BTreeMap::new();
···
471
480
472
481
pub async fn sequence_identity_event(
473
482
state: &AppState,
474
-
did: &str,
475
-
handle: Option<&str>,
483
+
did: &Did,
484
+
handle: Option<&Handle>,
476
485
) -> Result<i64, String> {
486
+
let mut tx = state
487
+
.db
488
+
.begin()
489
+
.await
490
+
.map_err(|e| format!("Failed to begin transaction: {}", e))?;
477
491
let seq_row = sqlx::query!(
478
492
r#"
479
493
INSERT INTO repo_seq (did, event_type, handle)
480
494
VALUES ($1, 'identity', $2)
481
495
RETURNING seq
482
496
"#,
483
-
did,
484
-
handle,
497
+
did.as_str(),
498
+
handle.map(|h| h.as_str()),
485
499
)
486
-
.fetch_one(&state.db)
500
+
.fetch_one(&mut *tx)
487
501
.await
488
502
.map_err(|e| format!("DB Error (repo_seq identity): {}", e))?;
489
503
sqlx::query(&format!("NOTIFY repo_updates, '{}'", seq_row.seq))
490
-
.execute(&state.db)
504
+
.execute(&mut *tx)
491
505
.await
492
506
.map_err(|e| format!("DB Error (notify): {}", e))?;
507
+
tx.commit()
508
+
.await
509
+
.map_err(|e| format!("Failed to commit transaction: {}", e))?;
493
510
Ok(seq_row.seq)
494
511
}
495
512
pub async fn sequence_account_event(
496
513
state: &AppState,
497
-
did: &str,
514
+
did: &Did,
498
515
active: bool,
499
516
status: Option<&str>,
500
517
) -> Result<i64, String> {
518
+
let mut tx = state
519
+
.db
520
+
.begin()
521
+
.await
522
+
.map_err(|e| format!("Failed to begin transaction: {}", e))?;
501
523
let seq_row = sqlx::query!(
502
524
r#"
503
525
INSERT INTO repo_seq (did, event_type, active, status)
504
526
VALUES ($1, 'account', $2, $3)
505
527
RETURNING seq
506
528
"#,
507
-
did,
529
+
did.as_str(),
508
530
active,
509
531
status,
510
532
)
511
-
.fetch_one(&state.db)
533
+
.fetch_one(&mut *tx)
512
534
.await
513
535
.map_err(|e| format!("DB Error (repo_seq account): {}", e))?;
514
536
sqlx::query(&format!("NOTIFY repo_updates, '{}'", seq_row.seq))
515
-
.execute(&state.db)
537
+
.execute(&mut *tx)
516
538
.await
517
539
.map_err(|e| format!("DB Error (notify): {}", e))?;
540
+
tx.commit()
541
+
.await
542
+
.map_err(|e| format!("Failed to commit transaction: {}", e))?;
518
543
Ok(seq_row.seq)
519
544
}
520
545
pub async fn sequence_sync_event(
521
546
state: &AppState,
522
-
did: &str,
547
+
did: &Did,
523
548
commit_cid: &str,
524
549
rev: Option<&str>,
525
550
) -> Result<i64, String> {
551
+
let mut tx = state
552
+
.db
553
+
.begin()
554
+
.await
555
+
.map_err(|e| format!("Failed to begin transaction: {}", e))?;
526
556
let seq_row = sqlx::query!(
527
557
r#"
528
558
INSERT INTO repo_seq (did, event_type, commit_cid, rev)
529
559
VALUES ($1, 'sync', $2, $3)
530
560
RETURNING seq
531
561
"#,
532
-
did,
562
+
did.as_str(),
533
563
commit_cid,
534
564
rev,
535
565
)
536
-
.fetch_one(&state.db)
566
+
.fetch_one(&mut *tx)
537
567
.await
538
568
.map_err(|e| format!("DB Error (repo_seq sync): {}", e))?;
539
569
sqlx::query(&format!("NOTIFY repo_updates, '{}'", seq_row.seq))
540
-
.execute(&state.db)
570
+
.execute(&mut *tx)
541
571
.await
542
572
.map_err(|e| format!("DB Error (notify): {}", e))?;
573
+
tx.commit()
574
+
.await
575
+
.map_err(|e| format!("Failed to commit transaction: {}", e))?;
543
576
Ok(seq_row.seq)
544
577
}
545
578
546
579
pub async fn sequence_genesis_commit(
547
580
state: &AppState,
548
-
did: &str,
581
+
did: &Did,
549
582
commit_cid: &Cid,
550
583
mst_root_cid: &Cid,
551
584
rev: &str,
···
555
588
let blocks_cids: Vec<String> = vec![mst_root_cid.to_string(), commit_cid.to_string()];
556
589
let prev_cid: Option<&str> = None;
557
590
let commit_cid_str = commit_cid.to_string();
591
+
let mut tx = state
592
+
.db
593
+
.begin()
594
+
.await
595
+
.map_err(|e| format!("Failed to begin transaction: {}", e))?;
558
596
let seq_row = sqlx::query!(
559
597
r#"
560
598
INSERT INTO repo_seq (did, event_type, commit_cid, prev_cid, ops, blobs, blocks_cids, rev)
561
599
VALUES ($1, 'commit', $2, $3::TEXT, $4, $5, $6, $7)
562
600
RETURNING seq
563
601
"#,
564
-
did,
602
+
did.as_str(),
565
603
commit_cid_str,
566
604
prev_cid,
567
605
ops,
···
569
607
&blocks_cids,
570
608
rev
571
609
)
572
-
.fetch_one(&state.db)
610
+
.fetch_one(&mut *tx)
573
611
.await
574
612
.map_err(|e| format!("DB Error (repo_seq genesis commit): {}", e))?;
575
613
sqlx::query(&format!("NOTIFY repo_updates, '{}'", seq_row.seq))
576
-
.execute(&state.db)
614
+
.execute(&mut *tx)
577
615
.await
578
616
.map_err(|e| format!("DB Error (notify): {}", e))?;
617
+
tx.commit()
618
+
.await
619
+
.map_err(|e| format!("Failed to commit transaction: {}", e))?;
579
620
Ok(seq_row.seq)
580
621
}
+12
-7
src/api/repo/record/validation.rs
+12
-7
src/api/repo/record/validation.rs
···
1
1
use crate::api::error::ApiError;
2
+
use crate::types::{Nsid, Rkey};
2
3
use crate::validation::{RecordValidator, ValidationError, ValidationStatus};
3
4
use axum::response::Response;
4
5
5
-
pub fn validate_record(record: &serde_json::Value, collection: &str) -> Result<(), Box<Response>> {
6
+
pub fn validate_record(record: &serde_json::Value, collection: &Nsid) -> Result<(), Box<Response>> {
6
7
validate_record_with_rkey(record, collection, None)
7
8
}
8
9
9
10
pub fn validate_record_with_rkey(
10
11
record: &serde_json::Value,
11
-
collection: &str,
12
-
rkey: Option<&str>,
12
+
collection: &Nsid,
13
+
rkey: Option<&Rkey>,
13
14
) -> Result<(), Box<Response>> {
14
15
let validator = RecordValidator::new();
15
-
validation_error_to_response(validator.validate_with_rkey(record, collection, rkey))
16
+
validation_error_to_response(validator.validate_with_rkey(
17
+
record,
18
+
collection.as_str(),
19
+
rkey.map(|r| r.as_str()),
20
+
))
16
21
}
17
22
18
23
pub fn validate_record_with_status(
19
24
record: &serde_json::Value,
20
-
collection: &str,
21
-
rkey: Option<&str>,
25
+
collection: &Nsid,
26
+
rkey: Option<&Rkey>,
22
27
require_lexicon: bool,
23
28
) -> Result<ValidationStatus, Box<Response>> {
24
29
let validator = RecordValidator::new().require_lexicon(require_lexicon);
25
-
match validator.validate_with_rkey(record, collection, rkey) {
30
+
match validator.validate_with_rkey(record, collection.as_str(), rkey.map(|r| r.as_str())) {
26
31
Ok(status) => Ok(status),
27
32
Err(e) => Err(validation_error_to_box_response(e)),
28
33
}
+12
-12
src/api/repo/record/write.rs
+12
-12
src/api/repo/record/write.rs
···
21
21
use tracing::error;
22
22
use uuid::Uuid;
23
23
24
-
pub async fn has_verified_comms_channel(db: &PgPool, did: &str) -> Result<bool, sqlx::Error> {
24
+
pub async fn has_verified_comms_channel(db: &PgPool, did: &Did) -> Result<bool, sqlx::Error> {
25
25
let row = sqlx::query(
26
26
r#"
27
27
SELECT
···
33
33
WHERE did = $1
34
34
"#,
35
35
)
36
-
.bind(did)
36
+
.bind(did.as_str())
37
37
.fetch_optional(db)
38
38
.await?;
39
39
match row {
···
60
60
pub async fn prepare_repo_write(
61
61
state: &AppState,
62
62
headers: &HeaderMap,
63
-
repo_did: &str,
63
+
repo: &AtIdentifier,
64
64
http_method: &str,
65
65
http_uri: &str,
66
66
) -> Result<RepoWriteAuth, Response> {
···
96
96
}
97
97
response
98
98
})?;
99
-
if repo_did != auth_user.did {
99
+
if repo.as_str() != auth_user.did.as_str() {
100
100
return Err(
101
101
ApiError::InvalidRepo("Repo does not match authenticated user".into()).into_response(),
102
102
);
···
229
229
match validate_record_with_status(
230
230
&input.record,
231
231
&input.collection,
232
-
input.rkey.as_ref().map(|r| r.as_str()),
232
+
input.rkey.as_ref(),
233
233
require_lexicon,
234
234
) {
235
235
Ok(status) => Some(status),
···
259
259
_ => return ApiError::InternalError(Some("Failed to persist MST".into())).into_response(),
260
260
};
261
261
let op = RecordOp::Create {
262
-
collection: input.collection.to_string(),
263
-
rkey: rkey.to_string(),
262
+
collection: input.collection.clone(),
263
+
rkey: rkey.clone(),
264
264
cid: record_cid,
265
265
};
266
266
let mut new_mst_blocks = std::collections::BTreeMap::new();
···
443
443
match validate_record_with_status(
444
444
&input.record,
445
445
&input.collection,
446
-
Some(input.rkey.as_str()),
446
+
Some(&input.rkey),
447
447
require_lexicon,
448
448
) {
449
449
Ok(status) => Some(status),
···
510
510
};
511
511
let op = if existing_cid.is_some() {
512
512
RecordOp::Update {
513
-
collection: input.collection.to_string(),
514
-
rkey: input.rkey.to_string(),
513
+
collection: input.collection.clone(),
514
+
rkey: input.rkey.clone(),
515
515
cid: record_cid,
516
516
prev: existing_cid,
517
517
}
518
518
} else {
519
519
RecordOp::Create {
520
-
collection: input.collection.to_string(),
521
-
rkey: input.rkey.to_string(),
520
+
collection: input.collection.clone(),
521
+
rkey: input.rkey.clone(),
522
522
cid: record_cid,
523
523
}
524
524
};
+7
-6
src/api/server/account_status.rs
+7
-6
src/api/server/account_status.rs
···
3
3
use crate::cache::Cache;
4
4
use crate::plc::PlcClient;
5
5
use crate::state::AppState;
6
-
use crate::types::PlainPassword;
6
+
use crate::types::{Handle, PlainPassword};
7
7
use axum::{
8
8
Json,
9
9
extract::State,
···
449
449
did
450
450
);
451
451
if let Err(e) =
452
-
crate::api::repo::record::sequence_account_event(&state, did.as_str(), true, None)
452
+
crate::api::repo::record::sequence_account_event(&state, &did, true, None)
453
453
.await
454
454
{
455
455
warn!(
···
463
463
"[MIGRATION] activateAccount: Sequencing identity event for did={} handle={:?}",
464
464
did, handle
465
465
);
466
+
let handle_typed = handle.as_ref().map(|h| Handle::new_unchecked(h));
466
467
if let Err(e) = crate::api::repo::record::sequence_identity_event(
467
468
&state,
468
-
did.as_str(),
469
-
handle.as_deref(),
469
+
&did,
470
+
handle_typed.as_ref(),
470
471
)
471
472
.await
472
473
{
···
501
502
};
502
503
if let Err(e) = crate::api::repo::record::sequence_sync_event(
503
504
&state,
504
-
did.as_str(),
505
+
&did,
505
506
&root_cid,
506
507
rev.as_deref(),
507
508
)
···
609
610
}
610
611
if let Err(e) = crate::api::repo::record::sequence_account_event(
611
612
&state,
612
-
did.as_str(),
613
+
&did,
613
614
false,
614
615
Some("deactivated"),
615
616
)
+11
-7
src/api/server/passkey_account.rs
+11
-7
src/api/server/passkey_account.rs
···
20
20
use crate::api::repo::record::utils::create_signed_commit;
21
21
use crate::auth::{ServiceTokenVerifier, is_service_token};
22
22
use crate::state::{AppState, RateLimitKind};
23
-
use crate::types::{Did, Handle, PlainPassword};
23
+
use crate::types::{Did, Handle, Nsid, PlainPassword, Rkey};
24
24
use crate::validation::validate_password;
25
25
26
26
fn extract_client_ip(headers: &HeaderMap) -> String {
···
512
512
}
513
513
};
514
514
let rev = Tid::now(LimitedU32::MIN);
515
+
let did_typed = Did::new_unchecked(&did);
515
516
let (commit_bytes, _sig) =
516
-
match create_signed_commit(&did, mst_root, rev.as_ref(), None, &secret_key) {
517
+
match create_signed_commit(&did_typed, mst_root, rev.as_ref(), None, &secret_key) {
517
518
Ok(result) => result,
518
519
Err(e) => {
519
520
error!("Error creating genesis commit: {:?}", e);
···
600
601
}
601
602
602
603
if !is_byod_did_web {
604
+
let handle_typed = Handle::new_unchecked(&handle);
603
605
if let Err(e) =
604
-
crate::api::repo::record::sequence_identity_event(&state, &did, Some(&handle)).await
606
+
crate::api::repo::record::sequence_identity_event(&state, &did_typed, Some(&handle_typed)).await
605
607
{
606
608
warn!("Failed to sequence identity event for {}: {}", did, e);
607
609
}
608
610
if let Err(e) =
609
-
crate::api::repo::record::sequence_account_event(&state, &did, true, None).await
611
+
crate::api::repo::record::sequence_account_event(&state, &did_typed, true, None).await
610
612
{
611
613
warn!("Failed to sequence account event for {}: {}", did, e);
612
614
}
···
614
616
"$type": "app.bsky.actor.profile",
615
617
"displayName": handle
616
618
});
619
+
let profile_collection = Nsid::new_unchecked("app.bsky.actor.profile");
620
+
let profile_rkey = Rkey::new_unchecked("self");
617
621
if let Err(e) = crate::api::repo::record::create_record_internal(
618
622
&state,
619
-
&did,
620
-
"app.bsky.actor.profile",
621
-
"self",
623
+
&did_typed,
624
+
&profile_collection,
625
+
&profile_rkey,
622
626
&profile_record,
623
627
)
624
628
.await
+3
-2
tests/commit_signing.rs
+3
-2
tests/commit_signing.rs
···
3
3
use jacquard_repo::commit::Commit;
4
4
use k256::ecdsa::SigningKey;
5
5
use std::str::FromStr;
6
+
use tranquil_pds::Did;
6
7
7
8
#[test]
8
9
fn test_commit_signing_produces_valid_signature() {
···
98
99
use tranquil_pds::api::repo::record::utils::create_signed_commit;
99
100
100
101
let signing_key = SigningKey::random(&mut rand::thread_rng());
101
-
let did = "did:plc:testuser123456789abcdef";
102
+
let did = Did::new_unchecked("did:plc:testuser123456789abcdef");
102
103
let data_cid =
103
104
Cid::from_str("bafyreib2rxk3ryblouj3fxza5jvx6psmwewwessc4m6g6e7pqhhkwqomfi").unwrap();
104
105
let rev = Tid::now(LimitedU32::MIN).to_string();
105
106
106
-
let (signed_bytes, sig) = create_signed_commit(did, data_cid, &rev, None, &signing_key)
107
+
let (signed_bytes, sig) = create_signed_commit(&did, data_cid, &rev, None, &signing_key)
107
108
.expect("signing should succeed");
108
109
109
110
assert!(!signed_bytes.is_empty());