just playing with tangled
at globpattern 739 lines 28 kB view raw
1// Copyright 2021 The Jujutsu Authors 2// 3// Licensed under the Apache License, Version 2.0 (the "License"); 4// you may not use this file except in compliance with the License. 5// You may obtain a copy of the License at 6// 7// https://www.apache.org/licenses/LICENSE-2.0 8// 9// Unless required by applicable law or agreed to in writing, software 10// distributed under the License is distributed on an "AS IS" BASIS, 11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12// See the License for the specific language governing permissions and 13// limitations under the License. 14 15//! A persistent table of fixed-size keys to variable-size values. 16//! 17//! The keys are stored in sorted order, with each key followed by an 18//! integer offset into the list of values. The values are 19//! concatenated after the keys. A file may have a parent file, and 20//! the parent may have its own parent, and so on. The child file then 21//! represents the union of the entries. 22 23#![allow(missing_docs)] 24 25use std::cmp::Ordering; 26use std::collections::BTreeMap; 27use std::collections::HashMap; 28use std::fs::File; 29use std::io; 30use std::io::Read; 31use std::io::Write as _; 32use std::path::PathBuf; 33use std::sync::Arc; 34use std::sync::RwLock; 35 36use blake2::Blake2b512; 37use blake2::Digest as _; 38use tempfile::NamedTempFile; 39use thiserror::Error; 40 41use crate::file_util::persist_content_addressed_temp_file; 42use crate::lock::FileLock; 43use crate::lock::FileLockError; 44 45pub trait TableSegment { 46 fn segment_num_entries(&self) -> usize; 47 fn segment_parent_file(&self) -> Option<&Arc<ReadonlyTable>>; 48 fn segment_get_value(&self, key: &[u8]) -> Option<&[u8]>; 49 fn segment_add_entries_to(&self, mut_table: &mut MutableTable); 50 51 fn num_entries(&self) -> usize { 52 if let Some(parent_file) = self.segment_parent_file() { 53 parent_file.num_entries() + self.segment_num_entries() 54 } else { 55 self.segment_num_entries() 56 } 57 } 58 59 fn get_value<'a>(&'a self, key: &[u8]) -> Option<&'a [u8]> { 60 self.segment_get_value(key) 61 .or_else(|| self.segment_parent_file()?.get_value(key)) 62 } 63} 64 65pub struct ReadonlyTable { 66 key_size: usize, 67 parent_file: Option<Arc<ReadonlyTable>>, 68 name: String, 69 // Number of entries not counting the parent file 70 num_local_entries: usize, 71 // The file's entries in the raw format they're stored in on disk. 72 index: Vec<u8>, 73 values: Vec<u8>, 74} 75 76impl ReadonlyTable { 77 fn load_from( 78 file: &mut dyn Read, 79 store: &TableStore, 80 name: String, 81 key_size: usize, 82 ) -> TableStoreResult<Arc<ReadonlyTable>> { 83 let to_load_err = |err| TableStoreError::LoadSegment { 84 name: name.clone(), 85 err, 86 }; 87 let read_u32 = |file: &mut dyn Read| -> TableStoreResult<u32> { 88 let mut buf = [0; 4]; 89 file.read_exact(&mut buf).map_err(to_load_err)?; 90 Ok(u32::from_le_bytes(buf)) 91 }; 92 let parent_filename_len = read_u32(file)?; 93 let maybe_parent_file = if parent_filename_len > 0 { 94 let mut parent_filename_bytes = vec![0; parent_filename_len as usize]; 95 file.read_exact(&mut parent_filename_bytes) 96 .map_err(to_load_err)?; 97 let parent_filename = String::from_utf8(parent_filename_bytes).unwrap(); 98 let parent_file = store.load_table(parent_filename)?; 99 Some(parent_file) 100 } else { 101 None 102 }; 103 let num_local_entries = read_u32(file)? as usize; 104 let index_size = num_local_entries * ReadonlyTableIndexEntry::size(key_size); 105 let mut data = vec![]; 106 file.read_to_end(&mut data).map_err(to_load_err)?; 107 let values = data.split_off(index_size); 108 let index = data; 109 Ok(Arc::new(ReadonlyTable { 110 key_size, 111 parent_file: maybe_parent_file, 112 name, 113 num_local_entries, 114 index, 115 values, 116 })) 117 } 118 119 pub fn start_mutation(self: &Arc<Self>) -> MutableTable { 120 MutableTable::incremental(self.clone()) 121 } 122 123 fn segment_value_offset_by_pos(&self, pos: usize) -> usize { 124 if pos == self.num_local_entries { 125 self.values.len() 126 } else { 127 ReadonlyTableIndexEntry::new(self, pos).value_offset() 128 } 129 } 130 131 fn segment_value_by_pos(&self, pos: usize) -> &[u8] { 132 &self.values 133 [self.segment_value_offset_by_pos(pos)..self.segment_value_offset_by_pos(pos + 1)] 134 } 135} 136 137impl TableSegment for ReadonlyTable { 138 fn segment_num_entries(&self) -> usize { 139 self.num_local_entries 140 } 141 142 fn segment_parent_file(&self) -> Option<&Arc<ReadonlyTable>> { 143 self.parent_file.as_ref() 144 } 145 146 fn segment_get_value(&self, key: &[u8]) -> Option<&[u8]> { 147 let mut low_pos = 0; 148 let mut high_pos = self.num_local_entries; 149 loop { 150 if high_pos == low_pos { 151 return None; 152 } 153 let mid_pos = (low_pos + high_pos) / 2; 154 let mid_entry = ReadonlyTableIndexEntry::new(self, mid_pos); 155 match key.cmp(mid_entry.key()) { 156 Ordering::Less => { 157 high_pos = mid_pos; 158 } 159 Ordering::Equal => { 160 return Some(self.segment_value_by_pos(mid_pos)); 161 } 162 Ordering::Greater => { 163 low_pos = mid_pos + 1; 164 } 165 } 166 } 167 } 168 169 fn segment_add_entries_to(&self, mut_table: &mut MutableTable) { 170 for pos in 0..self.num_local_entries { 171 let entry = ReadonlyTableIndexEntry::new(self, pos); 172 mut_table.add_entry( 173 entry.key().to_vec(), 174 self.segment_value_by_pos(pos).to_vec(), 175 ); 176 } 177 } 178} 179 180struct ReadonlyTableIndexEntry<'table> { 181 data: &'table [u8], 182} 183 184impl<'table> ReadonlyTableIndexEntry<'table> { 185 fn new(table: &'table ReadonlyTable, pos: usize) -> Self { 186 let entry_size = ReadonlyTableIndexEntry::size(table.key_size); 187 let offset = entry_size * pos; 188 let data = &table.index[offset..][..entry_size]; 189 ReadonlyTableIndexEntry { data } 190 } 191 192 fn size(key_size: usize) -> usize { 193 key_size + 4 194 } 195 196 fn key(&self) -> &'table [u8] { 197 &self.data[0..self.data.len() - 4] 198 } 199 200 fn value_offset(&self) -> usize { 201 u32::from_le_bytes(self.data[self.data.len() - 4..].try_into().unwrap()) as usize 202 } 203} 204 205pub struct MutableTable { 206 key_size: usize, 207 parent_file: Option<Arc<ReadonlyTable>>, 208 entries: BTreeMap<Vec<u8>, Vec<u8>>, 209} 210 211impl MutableTable { 212 fn full(key_size: usize) -> Self { 213 Self { 214 key_size, 215 parent_file: None, 216 entries: BTreeMap::new(), 217 } 218 } 219 220 fn incremental(parent_file: Arc<ReadonlyTable>) -> Self { 221 let key_size = parent_file.key_size; 222 Self { 223 key_size, 224 parent_file: Some(parent_file), 225 entries: BTreeMap::new(), 226 } 227 } 228 229 pub fn add_entry(&mut self, key: Vec<u8>, value: Vec<u8>) { 230 assert_eq!(key.len(), self.key_size); 231 self.entries.insert(key, value); 232 } 233 234 fn add_entries_from(&mut self, other: &dyn TableSegment) { 235 other.segment_add_entries_to(self); 236 } 237 238 fn merge_in(&mut self, other: &Arc<ReadonlyTable>) { 239 let mut maybe_own_ancestor = self.parent_file.clone(); 240 let mut maybe_other_ancestor = Some(other.clone()); 241 let mut files_to_add = vec![]; 242 loop { 243 if maybe_other_ancestor.is_none() { 244 break; 245 } 246 let other_ancestor = maybe_other_ancestor.as_ref().unwrap(); 247 if maybe_own_ancestor.is_none() { 248 files_to_add.push(other_ancestor.clone()); 249 maybe_other_ancestor = other_ancestor.parent_file.clone(); 250 continue; 251 } 252 let own_ancestor = maybe_own_ancestor.as_ref().unwrap(); 253 if own_ancestor.name == other_ancestor.name { 254 break; 255 } 256 if own_ancestor.num_entries() < other_ancestor.num_entries() { 257 files_to_add.push(other_ancestor.clone()); 258 maybe_other_ancestor = other_ancestor.parent_file.clone(); 259 } else { 260 maybe_own_ancestor = own_ancestor.parent_file.clone(); 261 } 262 } 263 264 for file in files_to_add.iter().rev() { 265 self.add_entries_from(file.as_ref()); 266 } 267 } 268 269 fn serialize(self) -> Vec<u8> { 270 let mut buf = vec![]; 271 272 if let Some(parent_file) = &self.parent_file { 273 buf.extend(u32::try_from(parent_file.name.len()).unwrap().to_le_bytes()); 274 buf.extend_from_slice(parent_file.name.as_bytes()); 275 } else { 276 buf.extend(0_u32.to_le_bytes()); 277 } 278 279 buf.extend(u32::try_from(self.entries.len()).unwrap().to_le_bytes()); 280 281 let mut value_offset = 0_u32; 282 for (key, value) in &self.entries { 283 buf.extend_from_slice(key); 284 buf.extend(value_offset.to_le_bytes()); 285 value_offset += u32::try_from(value.len()).unwrap(); 286 } 287 for value in self.entries.values() { 288 buf.extend_from_slice(value); 289 } 290 buf 291 } 292 293 /// If the MutableTable has more than half the entries of its parent 294 /// ReadonlyTable, return MutableTable with the commits from both. This 295 /// is done recursively, so the stack of index files has O(log n) files. 296 #[expect(clippy::assigning_clones)] 297 fn maybe_squash_with_ancestors(self) -> MutableTable { 298 let mut num_new_entries = self.entries.len(); 299 let mut files_to_squash = vec![]; 300 let mut maybe_parent_file = self.parent_file.clone(); 301 let mut squashed; 302 loop { 303 match maybe_parent_file { 304 Some(parent_file) => { 305 // TODO: We should probably also squash if the parent file has less than N 306 // commits, regardless of how many (few) are in `self`. 307 if 2 * num_new_entries < parent_file.num_local_entries { 308 squashed = MutableTable::incremental(parent_file); 309 break; 310 } 311 num_new_entries += parent_file.num_local_entries; 312 files_to_squash.push(parent_file.clone()); 313 maybe_parent_file = parent_file.parent_file.clone(); 314 } 315 None => { 316 squashed = MutableTable::full(self.key_size); 317 break; 318 } 319 } 320 } 321 322 if files_to_squash.is_empty() { 323 return self; 324 } 325 326 for parent_file in files_to_squash.iter().rev() { 327 squashed.add_entries_from(parent_file.as_ref()); 328 } 329 squashed.add_entries_from(&self); 330 squashed 331 } 332 333 fn save_in(self, store: &TableStore) -> TableStoreResult<Arc<ReadonlyTable>> { 334 if self.entries.is_empty() && self.parent_file.is_some() { 335 return Ok(self.parent_file.unwrap()); 336 } 337 338 let buf = self.maybe_squash_with_ancestors().serialize(); 339 let mut hasher = Blake2b512::new(); 340 hasher.update(&buf); 341 let file_id_hex = hex::encode(hasher.finalize()); 342 let file_path = store.dir.join(&file_id_hex); 343 let to_save_err = |err| TableStoreError::SaveSegment { 344 name: file_id_hex.clone(), 345 err, 346 }; 347 348 let mut temp_file = NamedTempFile::new_in(&store.dir).map_err(to_save_err)?; 349 let file = temp_file.as_file_mut(); 350 file.write_all(&buf).map_err(to_save_err)?; 351 persist_content_addressed_temp_file(temp_file, file_path).map_err(to_save_err)?; 352 353 ReadonlyTable::load_from(&mut buf.as_slice(), store, file_id_hex, store.key_size) 354 } 355} 356 357impl TableSegment for MutableTable { 358 fn segment_num_entries(&self) -> usize { 359 self.entries.len() 360 } 361 362 fn segment_parent_file(&self) -> Option<&Arc<ReadonlyTable>> { 363 self.parent_file.as_ref() 364 } 365 366 fn segment_get_value(&self, key: &[u8]) -> Option<&[u8]> { 367 self.entries.get(key).map(Vec::as_slice) 368 } 369 370 fn segment_add_entries_to(&self, mut_table: &mut MutableTable) { 371 for (key, value) in &self.entries { 372 mut_table.add_entry(key.clone(), value.clone()); 373 } 374 } 375} 376 377#[derive(Debug, Error)] 378pub enum TableStoreError { 379 #[error("Failed to load table heads")] 380 LoadHeads(#[source] io::Error), 381 #[error("Failed to save table heads")] 382 SaveHeads(#[source] io::Error), 383 #[error("Failed to load table segment '{name}'")] 384 LoadSegment { 385 name: String, 386 #[source] 387 err: io::Error, 388 }, 389 #[error("Failed to save table segment '{name}'")] 390 SaveSegment { 391 name: String, 392 #[source] 393 err: io::Error, 394 }, 395 #[error("Failed to lock table store")] 396 Lock(#[source] FileLockError), 397} 398 399pub type TableStoreResult<T> = Result<T, TableStoreError>; 400 401pub struct TableStore { 402 dir: PathBuf, 403 key_size: usize, 404 cached_tables: RwLock<HashMap<String, Arc<ReadonlyTable>>>, 405} 406 407impl TableStore { 408 pub fn init(dir: PathBuf, key_size: usize) -> Self { 409 std::fs::create_dir(dir.join("heads")).unwrap(); 410 TableStore { 411 dir, 412 key_size, 413 cached_tables: Default::default(), 414 } 415 } 416 417 pub fn reinit(&self) { 418 std::fs::remove_dir_all(self.dir.join("heads")).unwrap(); 419 TableStore::init(self.dir.clone(), self.key_size); 420 } 421 422 pub fn key_size(&self) -> usize { 423 self.key_size 424 } 425 426 pub fn load(dir: PathBuf, key_size: usize) -> Self { 427 TableStore { 428 dir, 429 key_size, 430 cached_tables: Default::default(), 431 } 432 } 433 434 pub fn save_table(&self, mut_table: MutableTable) -> TableStoreResult<Arc<ReadonlyTable>> { 435 let maybe_parent_table = mut_table.parent_file.clone(); 436 let table = mut_table.save_in(self)?; 437 self.add_head(&table)?; 438 if let Some(parent_table) = maybe_parent_table { 439 if parent_table.name != table.name { 440 self.remove_head(&parent_table); 441 } 442 } 443 { 444 let mut locked_cache = self.cached_tables.write().unwrap(); 445 locked_cache.insert(table.name.clone(), table.clone()); 446 } 447 Ok(table) 448 } 449 450 fn add_head(&self, table: &Arc<ReadonlyTable>) -> TableStoreResult<()> { 451 std::fs::write(self.dir.join("heads").join(&table.name), "") 452 .map_err(TableStoreError::SaveHeads) 453 } 454 455 fn remove_head(&self, table: &Arc<ReadonlyTable>) { 456 // It's fine if the old head was not found. It probably means 457 // that we're on a distributed file system where the locking 458 // doesn't work. We'll probably end up with two current 459 // heads. We'll detect that next time we load the table. 460 std::fs::remove_file(self.dir.join("heads").join(&table.name)).ok(); 461 } 462 463 fn lock(&self) -> TableStoreResult<FileLock> { 464 FileLock::lock(self.dir.join("lock")).map_err(TableStoreError::Lock) 465 } 466 467 fn load_table(&self, name: String) -> TableStoreResult<Arc<ReadonlyTable>> { 468 { 469 let read_locked_cached = self.cached_tables.read().unwrap(); 470 if let Some(table) = read_locked_cached.get(&name).cloned() { 471 return Ok(table); 472 } 473 } 474 let to_load_err = |err| TableStoreError::LoadSegment { 475 name: name.clone(), 476 err, 477 }; 478 let table_file_path = self.dir.join(&name); 479 let mut table_file = File::open(table_file_path).map_err(to_load_err)?; 480 let table = ReadonlyTable::load_from(&mut table_file, self, name, self.key_size)?; 481 { 482 let mut write_locked_cache = self.cached_tables.write().unwrap(); 483 write_locked_cache.insert(table.name.clone(), table.clone()); 484 } 485 Ok(table) 486 } 487 488 fn get_head_tables(&self) -> TableStoreResult<Vec<Arc<ReadonlyTable>>> { 489 let mut tables = vec![]; 490 for head_entry in 491 std::fs::read_dir(self.dir.join("heads")).map_err(TableStoreError::LoadHeads)? 492 { 493 let head_file_name = head_entry.map_err(TableStoreError::LoadHeads)?.file_name(); 494 let table = self.load_table(head_file_name.to_str().unwrap().to_string())?; 495 tables.push(table); 496 } 497 Ok(tables) 498 } 499 500 pub fn get_head(&self) -> TableStoreResult<Arc<ReadonlyTable>> { 501 let mut tables = self.get_head_tables()?; 502 503 if tables.is_empty() { 504 let empty_table = MutableTable::full(self.key_size); 505 self.save_table(empty_table) 506 } else if tables.len() == 1 { 507 Ok(tables.pop().unwrap()) 508 } else { 509 // There are multiple heads. We take a lock, then check if there are still 510 // multiple heads (it's likely that another process was in the process of 511 // deleting on of them). If there are still multiple heads, we attempt to 512 // merge all the tables into one. We then save that table and record the new 513 // head. Note that the locking isn't necessary for correctness; we 514 // take the lock only to avoid other concurrent processes from doing 515 // the same work (and producing another set of divergent heads). 516 let (table, _) = self.get_head_locked()?; 517 Ok(table) 518 } 519 } 520 521 pub fn get_head_locked(&self) -> TableStoreResult<(Arc<ReadonlyTable>, FileLock)> { 522 let lock = self.lock()?; 523 let mut tables = self.get_head_tables()?; 524 525 if tables.is_empty() { 526 let empty_table = MutableTable::full(self.key_size); 527 let table = self.save_table(empty_table)?; 528 return Ok((table, lock)); 529 } 530 531 if tables.len() == 1 { 532 // Return early so we don't write a table with no changes compared to its parent 533 return Ok((tables.pop().unwrap(), lock)); 534 } 535 536 let mut merged_table = MutableTable::incremental(tables[0].clone()); 537 for other in &tables[1..] { 538 merged_table.merge_in(other); 539 } 540 let merged_table = self.save_table(merged_table)?; 541 for table in &tables[1..] { 542 self.remove_head(table); 543 } 544 Ok((merged_table, lock)) 545 } 546} 547 548#[cfg(test)] 549mod tests { 550 use test_case::test_case; 551 552 use super::*; 553 use crate::tests::new_temp_dir; 554 555 #[test_case(false; "memory")] 556 #[test_case(true; "file")] 557 fn stacked_table_empty(on_disk: bool) { 558 let temp_dir = new_temp_dir(); 559 let store = TableStore::init(temp_dir.path().to_path_buf(), 3); 560 let mut_table = store.get_head().unwrap().start_mutation(); 561 let mut _saved_table = None; 562 let table: &dyn TableSegment = if on_disk { 563 _saved_table = Some(store.save_table(mut_table).unwrap()); 564 _saved_table.as_ref().unwrap().as_ref() 565 } else { 566 &mut_table 567 }; 568 569 // Cannot find any keys 570 assert_eq!(table.get_value(b"\0\0\0"), None); 571 assert_eq!(table.get_value(b"aaa"), None); 572 assert_eq!(table.get_value(b"\xff\xff\xff"), None); 573 } 574 575 #[test_case(false; "memory")] 576 #[test_case(true; "file")] 577 fn stacked_table_single_key(on_disk: bool) { 578 let temp_dir = new_temp_dir(); 579 let store = TableStore::init(temp_dir.path().to_path_buf(), 3); 580 let mut mut_table = store.get_head().unwrap().start_mutation(); 581 mut_table.add_entry(b"abc".to_vec(), b"value".to_vec()); 582 let mut _saved_table = None; 583 let table: &dyn TableSegment = if on_disk { 584 _saved_table = Some(store.save_table(mut_table).unwrap()); 585 _saved_table.as_ref().unwrap().as_ref() 586 } else { 587 &mut_table 588 }; 589 590 // Can find expected keys 591 assert_eq!(table.get_value(b"\0\0\0"), None); 592 assert_eq!(table.get_value(b"abc"), Some(b"value".as_slice())); 593 assert_eq!(table.get_value(b"\xff\xff\xff"), None); 594 } 595 596 #[test_case(false; "memory")] 597 #[test_case(true; "file")] 598 fn stacked_table_multiple_keys(on_disk: bool) { 599 let temp_dir = new_temp_dir(); 600 let store = TableStore::init(temp_dir.path().to_path_buf(), 3); 601 let mut mut_table = store.get_head().unwrap().start_mutation(); 602 mut_table.add_entry(b"zzz".to_vec(), b"val3".to_vec()); 603 mut_table.add_entry(b"abc".to_vec(), b"value1".to_vec()); 604 mut_table.add_entry(b"abd".to_vec(), b"value 2".to_vec()); 605 let mut _saved_table = None; 606 let table: &dyn TableSegment = if on_disk { 607 _saved_table = Some(store.save_table(mut_table).unwrap()); 608 _saved_table.as_ref().unwrap().as_ref() 609 } else { 610 &mut_table 611 }; 612 613 // Can find expected keys 614 assert_eq!(table.get_value(b"\0\0\0"), None); 615 assert_eq!(table.get_value(b"abb"), None); 616 assert_eq!(table.get_value(b"abc"), Some(b"value1".as_slice())); 617 assert_eq!(table.get_value(b"abd"), Some(b"value 2".as_slice())); 618 assert_eq!(table.get_value(b"abe"), None); 619 assert_eq!(table.get_value(b"zzz"), Some(b"val3".as_slice())); 620 assert_eq!(table.get_value(b"\xff\xff\xff"), None); 621 } 622 623 #[test] 624 fn stacked_table_multiple_keys_with_parent_file() { 625 let temp_dir = new_temp_dir(); 626 let store = TableStore::init(temp_dir.path().to_path_buf(), 3); 627 let mut mut_table = store.get_head().unwrap().start_mutation(); 628 mut_table.add_entry(b"abd".to_vec(), b"value 2".to_vec()); 629 mut_table.add_entry(b"abc".to_vec(), b"value1".to_vec()); 630 mut_table.add_entry(b"zzz".to_vec(), b"val3".to_vec()); 631 for round in 0..10 { 632 for i in 0..10 { 633 mut_table.add_entry( 634 format!("x{i}{round}").into_bytes(), 635 format!("value {i}{round}").into_bytes(), 636 ); 637 } 638 let saved_table = store.save_table(mut_table).unwrap(); 639 mut_table = MutableTable::incremental(saved_table); 640 } 641 642 // Can find expected keys 643 assert_eq!(mut_table.get_value(b"\0\0\0"), None); 644 assert_eq!(mut_table.get_value(b"x.."), None); 645 assert_eq!(mut_table.get_value(b"x14"), Some(b"value 14".as_slice())); 646 assert_eq!(mut_table.get_value(b"x41"), Some(b"value 41".as_slice())); 647 assert_eq!(mut_table.get_value(b"x49"), Some(b"value 49".as_slice())); 648 assert_eq!(mut_table.get_value(b"x94"), Some(b"value 94".as_slice())); 649 assert_eq!(mut_table.get_value(b"xAA"), None); 650 assert_eq!(mut_table.get_value(b"\xff\xff\xff"), None); 651 } 652 653 #[test] 654 fn stacked_table_merge() { 655 let temp_dir = new_temp_dir(); 656 let store = TableStore::init(temp_dir.path().to_path_buf(), 3); 657 let mut mut_base_table = store.get_head().unwrap().start_mutation(); 658 mut_base_table.add_entry(b"abc".to_vec(), b"value1".to_vec()); 659 let base_table = store.save_table(mut_base_table).unwrap(); 660 661 let mut mut_table1 = MutableTable::incremental(base_table.clone()); 662 mut_table1.add_entry(b"abd".to_vec(), b"value 2".to_vec()); 663 mut_table1.add_entry(b"zzz".to_vec(), b"val3".to_vec()); 664 mut_table1.add_entry(b"mmm".to_vec(), b"side 1".to_vec()); 665 let table1 = store.save_table(mut_table1).unwrap(); 666 let mut mut_table2 = MutableTable::incremental(base_table); 667 mut_table2.add_entry(b"yyy".to_vec(), b"val5".to_vec()); 668 mut_table2.add_entry(b"mmm".to_vec(), b"side 2".to_vec()); 669 mut_table2.add_entry(b"abe".to_vec(), b"value 4".to_vec()); 670 mut_table2.merge_in(&table1); 671 672 // Can find expected keys 673 assert_eq!(mut_table2.get_value(b"\0\0\0"), None); 674 assert_eq!(mut_table2.get_value(b"abc"), Some(b"value1".as_slice())); 675 assert_eq!(mut_table2.get_value(b"abd"), Some(b"value 2".as_slice())); 676 assert_eq!(mut_table2.get_value(b"abe"), Some(b"value 4".as_slice())); 677 // The caller shouldn't write two values for the same key, so it's undefined 678 // which wins, but let's test how it currently behaves. 679 assert_eq!(mut_table2.get_value(b"mmm"), Some(b"side 1".as_slice())); 680 assert_eq!(mut_table2.get_value(b"yyy"), Some(b"val5".as_slice())); 681 assert_eq!(mut_table2.get_value(b"zzz"), Some(b"val3".as_slice())); 682 assert_eq!(mut_table2.get_value(b"\xff\xff\xff"), None); 683 } 684 685 #[test] 686 fn stacked_table_automatic_merge() { 687 // Same test as above, but here we let the store do the merging on load 688 let temp_dir = new_temp_dir(); 689 let store = TableStore::init(temp_dir.path().to_path_buf(), 3); 690 let mut mut_base_table = store.get_head().unwrap().start_mutation(); 691 mut_base_table.add_entry(b"abc".to_vec(), b"value1".to_vec()); 692 let base_table = store.save_table(mut_base_table).unwrap(); 693 694 let mut mut_table1 = MutableTable::incremental(base_table.clone()); 695 mut_table1.add_entry(b"abd".to_vec(), b"value 2".to_vec()); 696 mut_table1.add_entry(b"zzz".to_vec(), b"val3".to_vec()); 697 mut_table1.add_entry(b"mmm".to_vec(), b"side 1".to_vec()); 698 store.save_table(mut_table1).unwrap(); 699 let mut mut_table2 = MutableTable::incremental(base_table); 700 mut_table2.add_entry(b"yyy".to_vec(), b"val5".to_vec()); 701 mut_table2.add_entry(b"mmm".to_vec(), b"side 2".to_vec()); 702 mut_table2.add_entry(b"abe".to_vec(), b"value 4".to_vec()); 703 let table2 = store.save_table(mut_table2).unwrap(); 704 705 // The saved table does not have the keys from table1 706 assert_eq!(table2.get_value(b"abd"), None); 707 708 // Can find expected keys in the merged table we get from get_head() 709 let merged_table = store.get_head().unwrap(); 710 assert_eq!(merged_table.get_value(b"\0\0\0"), None); 711 assert_eq!(merged_table.get_value(b"abc"), Some(b"value1".as_slice())); 712 assert_eq!(merged_table.get_value(b"abd"), Some(b"value 2".as_slice())); 713 assert_eq!(merged_table.get_value(b"abe"), Some(b"value 4".as_slice())); 714 // The caller shouldn't write two values for the same key, so it's undefined 715 // which wins. 716 let value_mmm = merged_table.get_value(b"mmm"); 717 assert!(value_mmm == Some(b"side 1".as_slice()) || value_mmm == Some(b"side 2".as_slice())); 718 assert_eq!(merged_table.get_value(b"yyy"), Some(b"val5".as_slice())); 719 assert_eq!(merged_table.get_value(b"zzz"), Some(b"val3".as_slice())); 720 assert_eq!(merged_table.get_value(b"\xff\xff\xff"), None); 721 } 722 723 #[test] 724 fn stacked_table_store_save_empty() { 725 let temp_dir = new_temp_dir(); 726 let store = TableStore::init(temp_dir.path().to_path_buf(), 3); 727 728 let mut mut_table = store.get_head().unwrap().start_mutation(); 729 mut_table.add_entry(b"abc".to_vec(), b"value".to_vec()); 730 store.save_table(mut_table).unwrap(); 731 732 let mut_table = store.get_head().unwrap().start_mutation(); 733 store.save_table(mut_table).unwrap(); 734 735 // Table head shouldn't be removed on empty save 736 let table = store.get_head().unwrap(); 737 assert_eq!(table.get_value(b"abc"), Some(b"value".as_slice())); 738 } 739}