Server tools to backfill, tail, mirror, and verify PLC logs

add audit command, implement fjall only for now

ptr.pet ccedaa32 206eed9f

verified
+400 -86
+9
src/bin/allegedly.rs
··· 5 5 use tokio::fs::create_dir_all; 6 6 use tokio::sync::mpsc; 7 7 8 + mod audit; 8 9 mod backfill; 9 10 mod mirror; 10 11 ··· 56 57 Wrap { 57 58 #[command(flatten)] 58 59 args: mirror::Args, 60 + #[command(flatten)] 61 + instrumentation: InstrumentationArgs, 62 + }, 63 + /// Audit a plc database for correctness 64 + Audit { 65 + #[command(flatten)] 66 + args: audit::Args, 59 67 #[command(flatten)] 60 68 instrumentation: InstrumentationArgs, 61 69 }, ··· 118 126 } 119 127 Commands::Mirror { args, .. } => mirror::run(globals, args, true).await?, 120 128 Commands::Wrap { args, .. } => mirror::run(globals, args, false).await?, 129 + Commands::Audit { args, .. } => audit::run(args).await?, 121 130 Commands::Tail { after } => { 122 131 let mut url = globals.upstream; 123 132 url.set_path("/export");
+77
src/bin/audit.rs
··· 1 + use allegedly::{ 2 + FjallDb, audit_fjall, 3 + bin::{InstrumentationArgs, bin_init}, 4 + drop_invalid_ops_fjall, file_to_invalid_ops, invalid_ops_to_stdout, logo, 5 + }; 6 + use clap::Parser; 7 + use std::path::PathBuf; 8 + use tokio::task::JoinSet; 9 + 10 + #[derive(Debug, clap::Args)] 11 + pub struct Args { 12 + /// path to a local fjall database directory 13 + #[arg(long, env = "ALLEGEDLY_FJALL")] 14 + fjall: Option<PathBuf>, 15 + /// path to a file containing invalid ops to fix 16 + #[arg(long, env = "ALLEGEDLY_FIX")] 17 + fix: Option<PathBuf>, 18 + } 19 + 20 + pub async fn run(Args { fjall, fix }: Args) -> anyhow::Result<()> { 21 + let mut tasks = JoinSet::new(); 22 + 23 + if let Some(fjall) = fjall { 24 + let (invalid_ops_tx, invalid_ops_rx) = tokio::sync::mpsc::channel(128); 25 + let db = FjallDb::open(&fjall)?; 26 + 27 + if let Some(fix) = fix { 28 + tasks.spawn(file_to_invalid_ops(fix, invalid_ops_tx)); 29 + tasks.spawn(drop_invalid_ops_fjall(db, invalid_ops_rx)); 30 + } else { 31 + tasks.spawn(audit_fjall(db, invalid_ops_tx)); 32 + tasks.spawn(invalid_ops_to_stdout(invalid_ops_rx)); 33 + } 34 + } else { 35 + anyhow::bail!("no audit target provided"); 36 + } 37 + 38 + while let Some(next) = tasks.join_next().await { 39 + match next { 40 + Err(e) if e.is_panic() => { 41 + log::error!("a joinset task panicked: {e}. bailing now. (should we panic?)"); 42 + return Err(e.into()); 43 + } 44 + Err(e) => { 45 + log::error!("a joinset task failed to join: {e}"); 46 + return Err(e.into()); 47 + } 48 + Ok(Err(e)) => { 49 + log::error!("a joinset task completed with error: {e}"); 50 + return Err(e); 51 + } 52 + Ok(Ok(name)) => { 53 + log::trace!("a task completed: {name:?}. {} left", tasks.len()); 54 + } 55 + } 56 + } 57 + 58 + Ok(()) 59 + } 60 + 61 + #[derive(Debug, Parser)] 62 + struct CliArgs { 63 + #[command(flatten)] 64 + instrumentation: InstrumentationArgs, 65 + #[command(flatten)] 66 + args: Args, 67 + } 68 + 69 + #[allow(dead_code)] 70 + #[tokio::main] 71 + async fn main() -> anyhow::Result<()> { 72 + let args = CliArgs::parse(); 73 + bin_init(args.instrumentation.enable_opentelemetry); 74 + log::info!("{}", logo("audit")); 75 + run(args.args).await?; 76 + Ok(()) 77 + }
+7 -18
src/bin/backfill.rs
··· 44 44 /// Cert for postgres (if needed) 45 45 #[arg(long)] 46 46 postgres_cert: Option<PathBuf>, 47 - /// Delete all operations from the postgres db before starting 47 + /// Delete all operations from the db before starting 48 48 /// 49 - /// only used if `--to-postgres` is present 49 + /// only used if `--to-postgres` or `--to-fjall` is present 50 50 #[arg(long, action)] 51 - postgres_reset: bool, 51 + reset: bool, 52 52 /// Bulk load into a local fjall embedded database 53 53 /// 54 54 /// Pass a directory path for the fjall database 55 - #[arg(long, conflicts_with_all = ["to_postgres", "postgres_cert", "postgres_reset"])] 55 + #[arg(long, conflicts_with_all = ["to_postgres", "postgres_cert"])] 56 56 to_fjall: Option<PathBuf>, 57 - /// Delete all operations from the fjall db before starting 58 - /// 59 - /// only used if `--to-fjall` is present 60 - #[arg(long, action, requires = "to_fjall")] 61 - fjall_reset: bool, 62 57 /// Stop at the week ending before this date 63 58 #[arg(long)] 64 59 until: Option<Dt>, ··· 80 75 source_workers, 81 76 to_postgres, 82 77 postgres_cert, 83 - postgres_reset, 78 + reset, 84 79 to_fjall, 85 - fjall_reset, 86 80 until, 87 81 catch_up, 88 82 }: Args, ··· 180 174 181 175 tasks.spawn(backfill_to_fjall( 182 176 db.clone(), 183 - fjall_reset, 177 + reset, 184 178 bulk_out, 185 179 found_last_tx, 186 180 )); ··· 192 186 let db = Db::new(pg_url.as_str(), postgres_cert).await?; 193 187 log::trace!("connected to postgres"); 194 188 195 - tasks.spawn(backfill_to_pg( 196 - db.clone(), 197 - postgres_reset, 198 - bulk_out, 199 - found_last_tx, 200 - )); 189 + tasks.spawn(backfill_to_pg(db.clone(), reset, bulk_out, found_last_tx)); 201 190 if catch_up { 202 191 tasks.spawn(pages_to_pg(db, full_out)); 203 192 }
+37 -1
src/lib.rs
··· 20 20 pub use cached_value::{CachedValue, Fetcher}; 21 21 pub use client::{CLIENT, UA}; 22 22 pub use mirror::{ExperimentalConf, ListenConf, serve, serve_fjall}; 23 - pub use plc_fjall::{FjallDb, backfill_to_fjall, pages_to_fjall}; 23 + pub use plc_fjall::{FjallDb, audit as audit_fjall, backfill_to_fjall, pages_to_fjall, drop_invalid_ops as drop_invalid_ops_fjall}; 24 24 pub use plc_pg::{Db, backfill_to_pg, pages_to_pg}; 25 25 pub use poll::{PageBoundaryState, get_page, poll_upstream}; 26 26 pub use ratelimit::{CreatePlcOpLimiter, GovernorMiddleware, IpLimiters}; ··· 136 136 }; 137 137 } 138 138 Ok("pages_to_stdout") 139 + } 140 + 141 + pub async fn invalid_ops_to_stdout( 142 + mut rx: mpsc::Receiver<(String, Dt, String)>, 143 + ) -> anyhow::Result<&'static str> { 144 + while let Some((did, at, cid)) = rx.recv().await { 145 + let val = serde_json::json!({ 146 + "did": did, 147 + "at": at, 148 + "cid": cid, 149 + }); 150 + println!("{val}"); 151 + } 152 + Ok("invalid_ops_to_stdout") 153 + } 154 + 155 + pub async fn file_to_invalid_ops( 156 + path: impl AsRef<std::path::Path>, 157 + tx: mpsc::Sender<(String, Dt, String)>, 158 + ) -> anyhow::Result<&'static str> { 159 + let file = tokio::fs::File::open(path).await?; 160 + 161 + use tokio::io::AsyncBufReadExt; 162 + let mut lines = tokio::io::BufReader::new(file).lines(); 163 + while let Some(line) = lines.next_line().await? { 164 + #[derive(serde::Deserialize)] 165 + struct Op { 166 + did: String, 167 + at: Dt, 168 + cid: String, 169 + } 170 + let op: Op = serde_json::from_str(&line)?; 171 + tx.send((op.did, op.at, op.cid)).await?; 172 + } 173 + 174 + Ok("invalid_ops_to_stdout") 139 175 } 140 176 141 177 pub fn logo(name: &str) -> String {
+270 -67
src/plc_fjall.rs
··· 1 1 use crate::{ 2 2 BundleSource, Dt, ExportPage, Op as CommonOp, PageBoundaryState, Week, 3 - crypto::{DidKey, Signature, assure_valid_sig}, 3 + crypto::{AssuranceResults, DidKey, Signature, assure_valid_sig}, 4 4 }; 5 5 use anyhow::Context; 6 6 use data_encoding::BASE32_NOPAD; ··· 797 797 } 798 798 } 799 799 800 + fn verify_op_sig(op: &StoredOp, prev: Option<&StoredOp>) -> anyhow::Result<AssuranceResults> { 801 + let keys: Vec<&DidKey> = match &op.prev { 802 + None => op.get_keys(), 803 + Some(_) => match prev { 804 + None => anyhow::bail!("prev cid exists but the op for that cid is missing"), 805 + Some(p) => p.get_keys(), 806 + }, 807 + }; 808 + 809 + if keys.is_empty() { 810 + anyhow::bail!("no keys found for genesis op or prev op"); 811 + } 812 + 813 + let data = { 814 + let serde_json::Value::Object(mut data) = op.to_json_value() else { 815 + unreachable!("we know op is valid, because it comes from StoredOp") 816 + }; 817 + data.remove("sig"); 818 + serde_json::Value::Object(data) 819 + }; 820 + 821 + let results = assure_valid_sig(keys, &op.sig, &data) 822 + .expect("that our op is an object and we removed sig field"); 823 + Ok(results) 824 + } 825 + 800 826 // this is basically Op, but without the cid and created_at fields 801 827 // since we have them in the key already 802 828 #[derive(Debug, Deserialize, Serialize)] ··· 941 967 .transpose()? 942 968 .flatten(); 943 969 944 - let keys: Vec<&DidKey> = match &operation.prev { 945 - None => operation.get_keys(), 946 - Some(_) => match &prev_op { 947 - None => { 948 - log::error!( 949 - "op {} {} has prev but the prev op is not found", 950 - op.did, 951 - op.cid 952 - ); 970 + let prev_stored = prev_op.as_ref().map(|(_, _, p)| &p.operation); 971 + 972 + match verify_op_sig(&operation, prev_stored) { 973 + Ok(results) => { 974 + if !results.valid { 975 + let msg = results 976 + .errors 977 + .iter() 978 + .map(|e| e.to_string()) 979 + .collect::<Vec<_>>() 980 + .join("\n"); 981 + log::warn!("invalid op {} {}:\n{msg}", op.did, op.cid); 953 982 return Ok(0); 954 983 } 955 - Some((_, _, prev)) => prev.operation.get_keys(), 956 - }, 957 - }; 958 - 959 - if keys.is_empty() { 960 - log::warn!("no keys for op {} {}", op.did, op.cid); 961 - return Ok(0); 962 - } 963 - 964 - let data = { 965 - let serde_json::Value::Object(mut data) = operation.to_json_value() else { 966 - unreachable!("we checked if operation is valid already") 967 - }; 968 - data.remove("sig"); 969 - serde_json::Value::Object(data) 970 - }; 971 - let results = assure_valid_sig(keys, &operation.sig, &data)?; 972 - if !results.valid { 973 - for err in results.errors { 974 - log::warn!("invalid signature for op {} {}: {err}", op.did, op.cid); 984 + } 985 + Err(e) => { 986 + log::warn!("invalid op {} {}: {e}", op.did, op.cid); 987 + return Ok(0); 975 988 } 976 - return Ok(0); 977 989 } 990 + log::debug!("verified op {} {}", op.did, op.cid); 978 991 } 979 992 980 993 let db_op = DbOp { ··· 1004 1017 Ok(1) 1005 1018 } 1006 1019 1020 + fn decode_by_did_entry( 1021 + &self, 1022 + by_did_key: &[u8], 1023 + prefix_len: usize, 1024 + ) -> anyhow::Result<(Dt, PlcCid, DbOp)> { 1025 + let key_rest = by_did_key 1026 + .get(prefix_len..) 1027 + .ok_or_else(|| anyhow::anyhow!("invalid by_did key {by_did_key:?}"))?; 1028 + 1029 + let ts_bytes = key_rest 1030 + .get(..8) 1031 + .ok_or_else(|| anyhow::anyhow!("invalid length: {key_rest:?}"))?; 1032 + let cid_suffix = key_rest 1033 + .get(9..) 1034 + .ok_or_else(|| anyhow::anyhow!("invalid length: {key_rest:?}"))?; 1035 + 1036 + let op_key = [ts_bytes, &[SEP][..], cid_suffix].concat(); 1037 + let ts = decode_timestamp(ts_bytes)?; 1038 + 1039 + let value = self 1040 + .inner 1041 + .ops 1042 + .get(&op_key)? 1043 + .ok_or_else(|| anyhow::anyhow!("op not found: {op_key:?}"))?; 1044 + 1045 + let op: DbOp = rmp_serde::from_slice(&value)?; 1046 + let mut full_cid = op.cid_prefix.clone(); 1047 + full_cid.extend_from_slice(cid_suffix); 1048 + 1049 + Ok((ts, PlcCid(full_cid), op)) 1050 + } 1051 + 1007 1052 fn _ops_for_did( 1008 1053 &self, 1009 1054 did: &str, ··· 1015 1060 let (by_did_key, _) = guard 1016 1061 .into_inner() 1017 1062 .map_err(|e| anyhow::anyhow!("fjall read error: {e}"))?; 1018 - 1019 - let key_rest = by_did_key 1020 - .get(prefix.len()..) 1021 - .ok_or_else(|| anyhow::anyhow!("invalid by_did key {by_did_key:?}"))?; 1022 - 1023 - let ts_bytes = key_rest 1024 - .get(..8) 1025 - .ok_or_else(|| anyhow::anyhow!("invalid length: {key_rest:?}"))?; 1026 - let cid_suffix = key_rest 1027 - .get(9..) 1028 - .ok_or_else(|| anyhow::anyhow!("invalid length: {key_rest:?}"))?; 1029 - 1030 - let op_key = [ts_bytes, &[SEP][..], cid_suffix].concat(); 1031 - let ts = decode_timestamp(ts_bytes)?; 1032 - 1033 - let value = self 1034 - .inner 1035 - .ops 1036 - .get(&op_key)? 1037 - .ok_or_else(|| anyhow::anyhow!("op not found: {op_key:?}"))?; 1038 - 1039 - let op: DbOp = rmp_serde::from_slice(&value)?; 1040 - let mut full_cid_bytes = op.cid_prefix.clone(); 1041 - full_cid_bytes.extend_from_slice(cid_suffix); 1042 - 1043 - let cid = PlcCid(full_cid_bytes); 1044 - 1045 - Ok((ts, cid, op)) 1063 + self.decode_by_did_entry(&by_did_key, prefix.len()) 1046 1064 })) 1047 1065 } 1048 1066 ··· 1111 1129 })) 1112 1130 } 1113 1131 1114 - pub fn export_ops_week( 1132 + pub fn drop_op(&self, did_str: &str, created_at: &Dt, cid: &str) -> anyhow::Result<()> { 1133 + let cid = decode_cid_str(cid)?; 1134 + let cid_suffix = &cid[30..]; 1135 + 1136 + let op_key = op_key(created_at, cid_suffix); 1137 + let by_did_key = by_did_key(did_str, created_at, cid_suffix)?; 1138 + 1139 + let mut batch = self.inner.db.batch(); 1140 + batch.remove(&self.inner.ops, op_key); 1141 + batch.remove(&self.inner.by_did, by_did_key); 1142 + batch.commit()?; 1143 + 1144 + Ok(()) 1145 + } 1146 + 1147 + pub fn audit( 1115 1148 &self, 1116 - week: Week, 1117 - ) -> anyhow::Result<impl Iterator<Item = anyhow::Result<Op>> + '_> { 1118 - let after: Dt = week.into(); 1119 - let before: Dt = week.next().into(); 1149 + invalid_ops_tx: mpsc::Sender<(String, Dt, String)>, 1150 + ) -> anyhow::Result<(usize, usize)> { 1151 + use std::sync::mpsc; 1152 + 1153 + let ops = self.inner.by_did.len()?; 1154 + 1155 + let workers = std::thread::available_parallelism() 1156 + .map(|n| n.get()) 1157 + .unwrap_or(4); 1158 + 1159 + type Batch = (Vec<u8>, Vec<(Dt, PlcCid, DbOp)>); 1160 + let (result_tx, result_rx) = mpsc::sync_channel::<anyhow::Result<(usize, usize)>>(workers); 1161 + 1162 + let channels: Vec<_> = (0..workers) 1163 + .map(|_| mpsc::sync_channel::<Batch>(512)) 1164 + .collect(); 1165 + let senders: Vec<_> = channels.iter().map(|(tx, _)| tx.clone()).collect(); 1166 + 1167 + std::thread::scope(|s| { 1168 + for (_, rx) in channels { 1169 + let result_tx = result_tx.clone(); 1170 + let invalid_ops_tx = invalid_ops_tx.clone(); 1171 + s.spawn(move || { 1172 + let mut checked: usize = 0; 1173 + let mut failed: usize = 0; 1174 + while let Ok((did_prefix, ops)) = rx.recv() { 1175 + let did = decode_did(&did_prefix[..did_prefix.len() - 1]); 1176 + for (ts, cid, op) in &ops { 1177 + checked += 1; 1178 + let prev_op = op.operation.prev.as_ref().and_then(|expected| { 1179 + ops.iter().find(|(_, c, _)| c == expected) 1180 + }); 1181 + let prev_cid_ok = op.operation.prev.is_none() || prev_op.is_some(); 1182 + if !prev_cid_ok { 1183 + log::error!("audit: op {did} {cid} prev cid mismatch or missing predecessor, is db corrupted?"); 1184 + failed += 1; 1185 + let _ = invalid_ops_tx.blocking_send((did.clone(), ts.clone(), cid.to_string())); 1186 + continue; 1187 + } 1188 + let prev_stored = prev_op.map(|(_, _, p)| &p.operation); 1189 + match verify_op_sig(&op.operation, prev_stored) { 1190 + Ok(results) => { 1191 + if !results.valid { 1192 + let msg = results 1193 + .errors 1194 + .iter() 1195 + .map(|e| e.to_string()) 1196 + .collect::<Vec<_>>() 1197 + .join("\n "); 1198 + log::warn!("audit: invalid op {} {}:\n {msg}", did, cid); 1199 + failed += 1; 1200 + let _ = invalid_ops_tx.blocking_send((did.clone(), ts.clone(), cid.to_string())); 1201 + } 1202 + } 1203 + Err(e) => { 1204 + log::warn!("audit: invalid op {} {}: {e}", did, cid); 1205 + failed += 1; 1206 + let _ = invalid_ops_tx.blocking_send((did.clone(), ts.clone(), cid.to_string())); 1207 + } 1208 + } 1209 + } 1210 + } 1211 + let _ = result_tx.send(Ok((checked, failed))); 1212 + }); 1213 + } 1214 + drop(result_tx); 1215 + 1216 + // todo: probably dont use a macro... 1217 + macro_rules! spawn_scan_thread { 1218 + ($iter_method:ident, $start_idx:expr, $reverse:expr, $limit:expr) => {{ 1219 + let senders = senders.clone(); 1220 + let mut iter = self.inner.by_did.iter(); 1221 + 1222 + s.spawn(move || -> anyhow::Result<()> { 1223 + let mut current_prefix: Option<[u8; 16]> = None; 1224 + let mut did_ops: Vec<(Dt, PlcCid, DbOp)> = Vec::new(); 1225 + let mut idx = $start_idx; 1226 + let mut processed_ops: usize = 0; 1227 + 1228 + while let Some(guard) = iter.$iter_method() { 1229 + let (by_did_key, _) = guard 1230 + .into_inner() 1231 + .map_err(|e| anyhow::anyhow!("fjall read error: {e}"))?; 1232 + 1233 + let mut prefix_array = [0u8; 16]; 1234 + prefix_array.copy_from_slice(by_did_key.get(..16).ok_or_else( 1235 + || anyhow::anyhow!("by_did key too short: {by_did_key:?}"), 1236 + )?); 1237 + 1238 + let op = self.decode_by_did_entry(&by_did_key, 16)?; 1239 + 1240 + if current_prefix.map_or(true, |cp| cp != prefix_array) { 1241 + // new did, push the ops 1242 + if let Some(prefix) = current_prefix.take() { 1243 + if $reverse { 1244 + did_ops.reverse(); 1245 + } 1246 + senders[idx % workers] 1247 + .send((prefix.to_vec(), std::mem::take(&mut did_ops))) 1248 + .ok(); 1249 + idx += 1; 1250 + 1251 + if processed_ops >= $limit { 1252 + break; 1253 + } 1254 + } 1255 + current_prefix = Some(prefix_array); 1256 + } 1257 + 1258 + did_ops.push(op); 1259 + processed_ops += 1; 1260 + } 1261 + 1262 + if let Some(prefix) = current_prefix { 1263 + if $reverse { 1264 + did_ops.reverse(); 1265 + } 1266 + senders[idx % workers].send((prefix.to_vec(), did_ops)).ok(); 1267 + } 1268 + 1269 + Ok(()) 1270 + }) 1271 + }}; 1272 + } 1273 + 1274 + // we can start two threads, one for forward iteration and one for reverse iteration 1275 + // this way we have two scans in parallel which should be faster! 1276 + let f_handle = spawn_scan_thread!(next, 0, false, ops / 2); 1277 + let b_handle = spawn_scan_thread!(next_back, workers / 2, true, ops - (ops / 2)); 1278 + 1279 + f_handle.join().unwrap()?; 1280 + b_handle.join().unwrap()?; 1281 + 1282 + drop(senders); 1283 + 1284 + let mut total_checked: usize = 0; 1285 + let mut total_failed: usize = 0; 1286 + for res in result_rx { 1287 + let (c, f) = res?; 1288 + total_checked += c; 1289 + total_failed += f; 1290 + } 1120 1291 1121 - self.export_ops(after..before) 1292 + Ok((total_checked, total_failed)) 1293 + }) 1122 1294 } 1123 1295 } 1124 1296 ··· 1130 1302 let db = self.clone(); 1131 1303 1132 1304 async move { 1133 - let (mut tx, rx) = tokio::io::duplex(1024 * 1024 * 64); 1305 + let (mut tx, rx) = tokio::io::duplex(1024 * 1024 * 16); 1134 1306 1135 1307 tokio::task::spawn_blocking(move || -> anyhow::Result<()> { 1136 - let iter = db.export_ops_week(week)?; 1308 + let after: Dt = week.into(); 1309 + let before: Dt = week.next().into(); 1310 + 1311 + let iter = db.export_ops(after..before)?; 1137 1312 1138 1313 let rt = tokio::runtime::Handle::current(); 1139 1314 ··· 1184 1359 1185 1360 loop { 1186 1361 let pages_finished = pages.is_closed(); 1362 + // we can stop if we have no more pages and all the insert tasks are finished 1187 1363 if pages_finished && insert_tasks.is_empty() { 1188 1364 break; 1189 1365 } ··· 1266 1442 t0.elapsed() 1267 1443 ); 1268 1444 Ok("pages_to_fjall") 1445 + } 1446 + 1447 + pub async fn audit( 1448 + db: FjallDb, 1449 + invalid_ops_tx: mpsc::Sender<(String, Dt, String)>, 1450 + ) -> anyhow::Result<&'static str> { 1451 + log::info!("starting fjall audit..."); 1452 + let t0 = std::time::Instant::now(); 1453 + let (checked, failed) = tokio::task::spawn_blocking(move || db.audit(invalid_ops_tx)).await??; 1454 + log::info!( 1455 + "fjall audit complete in {:?}, {checked} ops checked", 1456 + t0.elapsed() 1457 + ); 1458 + if failed > 0 { 1459 + anyhow::bail!("audit found {failed} invalid operations"); 1460 + } 1461 + Ok("audit_fjall") 1462 + } 1463 + 1464 + pub async fn drop_invalid_ops( 1465 + db: FjallDb, 1466 + mut invalid_ops_rx: mpsc::Receiver<(String, Dt, String)>, 1467 + ) -> anyhow::Result<&'static str> { 1468 + while let Some((did, at, cid)) = invalid_ops_rx.recv().await { 1469 + db.drop_op(&did, &at, &cid)?; 1470 + } 1471 + Ok("drop_invalid_ops") 1269 1472 } 1270 1473 1271 1474 #[cfg(test)]