tangled
alpha
login
or
join now
ptr.pet
/
nsid-tracker
3
fork
atom
tracks lexicons and how many times they appeared on the jetstream
3
fork
atom
overview
issues
pulls
pipelines
add more logging for syncing and migrating
ptr.pet
7 months ago
fddb212c
aebca039
verified
This commit was signed with the committer's
known signature
.
ptr.pet
SSH Key Fingerprint:
SHA256:Abmvag+juovVufZTxyWY8KcVgrznxvBjQpJesv071Aw=
+19
-5
2 changed files
expand all
collapse all
unified
split
server
src
db
mod.rs
main.rs
+17
-5
server/src/db/mod.rs
···
139
139
let _guard = scc::ebr::Guard::new();
140
140
for (_, handle) in self.hits.iter(&_guard) {
141
141
let mut nsid_data = Vec::with_capacity(2);
142
142
+
let mut total_count = 0;
142
143
let is_too_old = handle.since_last_activity() > self.max_last_activity;
143
144
// if we disconnect for a long time, we want to sync all of what we
144
145
// have to avoid having many small blocks (even if we run compaction
···
154
155
if count > 0 && (all || data_count > 0 || is_too_old) {
155
156
for i in 0..data_count {
156
157
nsid_data.push((i, handle.clone(), block_size));
158
158
+
total_count += block_size;
157
159
}
158
160
// only sync remainder if we haven't met block size
159
161
let remainder = count % block_size;
160
162
if (all || data_count == 0) && remainder > 0 {
161
163
nsid_data.push((data_count, handle.clone(), remainder));
164
164
+
total_count += remainder;
162
165
}
163
166
}
167
167
+
tracing::info!(
168
168
+
"{}: will sync {} blocks ({} count)",
169
169
+
handle.nsid(),
170
170
+
nsid_data.len(),
171
171
+
total_count,
172
172
+
);
164
173
data.push(nsid_data);
165
174
}
166
175
drop(_guard);
···
174
183
.map(|(i, handle, max_block_size)| {
175
184
handle
176
185
.encode_block(max_block_size)
186
186
+
.inspect(|block| {
187
187
+
tracing::info!(
188
188
+
"{}: encoded block with {} items",
189
189
+
handle.nsid(),
190
190
+
block.written,
191
191
+
)
192
192
+
})
177
193
.map(|block| (i, block, handle))
178
194
})
179
195
.collect::<Result<Vec<_>, _>>()
···
187
203
self.sync_pool
188
204
.execute(move || match handle.insert(block.key, block.data) {
189
205
Ok(_) => {
190
190
-
tracing::info!(
191
191
-
"[{i}] synced {} of {} to db",
192
192
-
block.written,
193
193
-
handle.nsid()
194
194
-
)
206
206
+
tracing::info!("{}: [{i}] synced {}", block.written, handle.nsid())
195
207
}
196
208
Err(err) => tracing::error!("failed to sync block: {}", err),
197
209
});
+2
server/src/main.rs
···
293
293
}
294
294
let read_time = start.elapsed();
295
295
let read_per_second = total_count as f64 / read_time.as_secs_f64();
296
296
+
drop(from);
297
297
+
tracing::info!("starting sync!!!");
296
298
to.sync(true).expect("cant sync");
297
299
let total_time = start.elapsed();
298
300
let write_per_second = total_count as f64 / (total_time - read_time).as_secs_f64();