diff --git a/crates/core/src/db/relational_db.rs b/crates/core/src/db/relational_db.rs index 4cf13096c96..422e1feab42 100644 --- a/crates/core/src/db/relational_db.rs +++ b/crates/core/src/db/relational_db.rs @@ -50,7 +50,7 @@ use spacetimedb_sats::{AlgebraicType, AlgebraicValue, ProductType, ProductValue} use spacetimedb_schema::def::{ModuleDef, TableDef, ViewDef}; use spacetimedb_schema::reducer_name::ReducerName; use spacetimedb_schema::schema::{ - ColumnSchema, IndexSchema, RowLevelSecuritySchema, Schema, SequenceSchema, TableSchema, + ColumnSchema, ConstraintSchema, IndexSchema, RowLevelSecuritySchema, Schema, SequenceSchema, TableSchema, }; use spacetimedb_schema::table_name::TableName; use spacetimedb_snapshot::{ReconstructedSnapshot, SnapshotError, SnapshotRepository}; @@ -1482,6 +1482,15 @@ impl RelationalDB { Ok(self.inner.drop_sequence_mut_tx(tx, seq_id)?) } + /// Creates a constraint, making the corresponding index unique if applicable. + pub fn create_constraint( + &self, + tx: &mut MutTx, + constraint: ConstraintSchema, + ) -> Result { + Ok(self.inner.create_constraint_mut_tx(tx, constraint)?) + } + ///Removes the [Constraints] from database instance pub fn drop_constraint(&self, tx: &mut MutTx, constraint_id: ConstraintId) -> Result<(), DBError> { Ok(self.inner.drop_constraint_mut_tx(tx, constraint_id)?) diff --git a/crates/core/src/db/update.rs b/crates/core/src/db/update.rs index 896a1325e83..141eb12dfd0 100644 --- a/crates/core/src/db/update.rs +++ b/crates/core/src/db/update.rs @@ -6,6 +6,7 @@ use spacetimedb_lib::db::auth::StTableType; use spacetimedb_lib::identity::AuthCtx; use spacetimedb_lib::AlgebraicValue; use spacetimedb_primitives::{ColSet, TableId}; +use spacetimedb_schema::schema::ConstraintSchema; use spacetimedb_schema::auto_migrate::{AutoMigratePlan, ManualMigratePlan, MigratePlan}; use spacetimedb_schema::def::{TableDef, ViewDef}; use spacetimedb_schema::schema::{column_schemas_from_defs, IndexSchema, Schema, SequenceSchema, TableSchema}; @@ -220,6 +221,24 @@ fn auto_migrate_database( ); stdb.drop_constraint(tx, constraint_schema.constraint_id)?; } + spacetimedb_schema::auto_migrate::AutoMigrateStep::AddConstraint(constraint_name) => { + let table_def = plan.new.stored_in_table_def(constraint_name).unwrap(); + let constraint_def = &table_def.constraints[constraint_name]; + let table_id = stdb.table_id_from_name_mut(tx, &table_def.name)?.unwrap(); + let constraint_schema = ConstraintSchema::from_module_def( + plan.new, + constraint_def, + table_id, + spacetimedb_primitives::ConstraintId::SENTINEL, + ); + log!( + logger, + "Adding constraint `{}` on table `{}`", + constraint_name, + table_def.name + ); + stdb.create_constraint(tx, constraint_schema)?; + } spacetimedb_schema::auto_migrate::AutoMigrateStep::AddSequence(sequence_name) => { let table_def = plan.new.stored_in_table_def(sequence_name).unwrap(); let sequence_def = table_def.sequences.get(sequence_name).unwrap(); diff --git a/crates/datastore/src/locking_tx_datastore/committed_state.rs b/crates/datastore/src/locking_tx_datastore/committed_state.rs index fc38f821225..9e95d78ba70 100644 --- a/crates/datastore/src/locking_tx_datastore/committed_state.rs +++ b/crates/datastore/src/locking_tx_datastore/committed_state.rs @@ -149,7 +149,7 @@ impl CommittedState { } /// Returns the views that perform a full scan of this table - pub(super) fn views_for_table_scan(&self, table_id: &TableId) -> impl Iterator + use<'_> { + pub(super) fn views_for_table_scan(&self, table_id: &TableId) -> impl Iterator { self.read_sets.views_for_table_scan(table_id) } @@ -158,7 +158,7 @@ impl CommittedState { &'a self, table_id: &TableId, row_ref: RowRef<'a>, - ) -> impl Iterator + use<'a> { + ) -> impl Iterator { self.read_sets.views_for_index_seek(table_id, row_ref) } } @@ -1324,14 +1324,30 @@ impl CommittedState { .unwrap_or_else(|e| match e {}); } // A constraint was removed. Add it back. - ConstraintRemoved(table_id, constraint_schema) => { + ConstraintRemoved(table_id, constraint_schema, index_id) => { let table = self.tables.get_mut(&table_id)?; table.with_mut_schema(|s| s.update_constraint(constraint_schema)); + // If the constraint had a unique index, make it unique again. + if let Some(index_id) = index_id { + if let Some(idx) = table.indexes.get_mut(&index_id) { + idx.make_unique().expect("rollback: index should have no duplicates"); + } + } } // A constraint was added. Remove it. - ConstraintAdded(table_id, constraint_id) => { + ConstraintAdded(table_id, constraint_id, index_id, pointer_map) => { let table = self.tables.get_mut(&table_id)?; table.with_mut_schema(|s| s.remove_constraint(constraint_id)); + // If the constraint made an index unique, revert it to non-unique. + if let Some(index_id) = index_id { + if let Some(idx) = table.indexes.get_mut(&index_id) { + idx.make_non_unique(); + } + } + // Restore the pointer map if it was taken. + if let Some(pm) = pointer_map { + table.restore_pointer_map(pm); + } } // A sequence was removed. Add it back. SequenceRemoved(table_id, seq, schema) => { diff --git a/crates/datastore/src/locking_tx_datastore/datastore.rs b/crates/datastore/src/locking_tx_datastore/datastore.rs index a08b6386b36..e534f23adf4 100644 --- a/crates/datastore/src/locking_tx_datastore/datastore.rs +++ b/crates/datastore/src/locking_tx_datastore/datastore.rs @@ -42,7 +42,7 @@ use spacetimedb_sats::{memory_usage::MemoryUsage, Deserialize}; use spacetimedb_schema::table_name::TableName; use spacetimedb_schema::{ reducer_name::ReducerName, - schema::{ColumnSchema, IndexSchema, SequenceSchema, TableSchema}, + schema::{ColumnSchema, ConstraintSchema, IndexSchema, SequenceSchema, TableSchema}, }; use spacetimedb_snapshot::{ReconstructedSnapshot, SnapshotRepository}; use spacetimedb_table::{ @@ -575,6 +575,14 @@ impl MutTxDatastore for Locking { tx.sequence_id_from_name(sequence_name) } + fn create_constraint_mut_tx( + &self, + tx: &mut Self::MutTx, + constraint: ConstraintSchema, + ) -> Result { + tx.create_constraint(constraint) + } + fn drop_constraint_mut_tx(&self, tx: &mut Self::MutTx, constraint_id: ConstraintId) -> Result<()> { tx.drop_constraint(constraint_id) } @@ -1206,13 +1214,14 @@ impl spacetimedb_commitlog::payload::txdata::Visitor for ReplayVi // TODO: avoid clone Ok(schema) => schema.table_name.clone(), - Err(_) => match self.dropped_table_names.remove(&table_id) { - Some(name) => name, - _ => { + Err(_) => { + if let Some(name) = self.dropped_table_names.remove(&table_id) { + name + } else { return self .process_error(anyhow!("Error looking up name for truncated table {table_id:?}").into()); } - }, + } }; if let Err(e) = self.committed_state.replay_truncate(table_id).with_context(|| { @@ -1300,7 +1309,7 @@ mod tests { use spacetimedb_lib::error::ResultTest; use spacetimedb_lib::st_var::StVarValue; use spacetimedb_lib::{resolved_type_via_v9, ScheduleAt, TimeDuration}; - use spacetimedb_primitives::{col_list, ArgId, ColId, ScheduleId, ViewId}; + use spacetimedb_primitives::{col_list, ArgId, ColId, ColSet, ScheduleId, ViewId}; use spacetimedb_sats::algebraic_value::ser::value_serialize; use spacetimedb_sats::bsatn::ToBsatn; use spacetimedb_sats::layout::RowTypeLayout; @@ -3975,4 +3984,197 @@ mod tests { ); Ok(()) } + + /// Helper: create a table with a non-unique btree index on `col_pos` but no constraints. + fn table_with_non_unique_index(col_pos: u16) -> TableSchema { + let indices = vec![IndexSchema::for_test( + "Foo_idx_btree", + BTreeAlgorithm::from(col_pos), + )]; + basic_table_schema_with_indices(indices, Vec::::new()) + } + + /// Helper: create a table with a non-unique btree index on multiple columns but no constraints. + fn table_with_non_unique_multi_col_index(cols: impl Into) -> TableSchema { + let indices = vec![IndexSchema::for_test( + "Foo_multi_idx_btree", + BTreeAlgorithm { columns: cols.into() }, + )]; + basic_table_schema_with_indices(indices, Vec::::new()) + } + + #[test] + fn test_create_constraint_makes_index_unique() -> ResultTest<()> { + let datastore = get_datastore()?; + + // TX1: create table with non-unique index on col 0. + let mut tx = begin_mut_tx(&datastore); + let schema = table_with_non_unique_index(0); + let table_id = datastore.create_table_mut_tx(&mut tx, schema)?; + commit(&datastore, tx)?; + + // TX2: insert unique rows and commit. + let mut tx = begin_mut_tx(&datastore); + insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Alice", 30))?; + insert(&datastore, &mut tx, table_id, &u32_str_u32(2, "Bob", 25))?; + commit(&datastore, tx)?; + + // TX3: add unique constraint — should succeed since data is unique. + let mut tx = begin_mut_tx(&datastore); + let mut constraint = ConstraintSchema::unique_for_test("Foo_id_unique", 0u16); + constraint.table_id = table_id; + datastore.create_constraint_mut_tx(&mut tx, constraint)?; + + // Inserting a duplicate should now fail (index is unique). + let dup_result = insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Charlie", 20)); + assert!(dup_result.is_err(), "duplicate insert should fail after adding unique constraint"); + commit(&datastore, tx)?; + + Ok(()) + } + + #[test] + fn test_create_constraint_rollback_restores_non_unique() -> ResultTest<()> { + let datastore = get_datastore()?; + + // TX1: create table with non-unique index on col 0. + let mut tx = begin_mut_tx(&datastore); + let schema = table_with_non_unique_index(0); + let table_id = datastore.create_table_mut_tx(&mut tx, schema)?; + commit(&datastore, tx)?; + + // TX2: insert unique rows and commit. + let mut tx = begin_mut_tx(&datastore); + insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Alice", 30))?; + insert(&datastore, &mut tx, table_id, &u32_str_u32(2, "Bob", 25))?; + commit(&datastore, tx)?; + + // TX3: add unique constraint, then rollback. + let mut tx = begin_mut_tx(&datastore); + let mut constraint = ConstraintSchema::unique_for_test("Foo_id_unique", 0u16); + constraint.table_id = table_id; + datastore.create_constraint_mut_tx(&mut tx, constraint)?; + let _ = datastore.rollback_mut_tx(tx); + + // TX4: after rollback, duplicates should be allowed again. + let mut tx = begin_mut_tx(&datastore); + let result = insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Charlie", 20)); + assert!(result.is_ok(), "duplicate insert should succeed after rollback of unique constraint"); + Ok(()) + } + + #[test] + fn test_create_constraint_fails_with_duplicates() -> ResultTest<()> { + let datastore = get_datastore()?; + + // TX1: create table with non-unique index on col 0. + let mut tx = begin_mut_tx(&datastore); + let schema = table_with_non_unique_index(0); + let table_id = datastore.create_table_mut_tx(&mut tx, schema)?; + commit(&datastore, tx)?; + + // TX2: insert duplicate rows and commit. + let mut tx = begin_mut_tx(&datastore); + insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Alice", 30))?; + insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Bob", 25))?; // duplicate id=1 + commit(&datastore, tx)?; + + // TX3: try to add unique constraint — should fail. + let mut tx = begin_mut_tx(&datastore); + let mut constraint = ConstraintSchema::unique_for_test("Foo_id_unique", 0u16); + constraint.table_id = table_id; + let result = datastore.create_constraint_mut_tx(&mut tx, constraint); + assert!(result.is_err(), "create_constraint should fail when duplicates exist"); + + Ok(()) + } + + #[test] + fn test_create_constraint_multi_col() -> ResultTest<()> { + let datastore = get_datastore()?; + + // TX1: create table with non-unique multi-column index on (col 0, col 2). + let mut tx = begin_mut_tx(&datastore); + let schema = table_with_non_unique_multi_col_index(col_list![0, 2]); + let table_id = datastore.create_table_mut_tx(&mut tx, schema)?; + commit(&datastore, tx)?; + + // TX2: insert rows unique on (id, age) and commit. + let mut tx = begin_mut_tx(&datastore); + insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Alice", 30))?; + insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Bob", 25))?; // same id, different age + commit(&datastore, tx)?; + + // TX3: add unique constraint on (col 0, col 2) — should succeed. + let mut tx = begin_mut_tx(&datastore); + let mut constraint = ConstraintSchema::unique_for_test( + "Foo_id_age_unique", + ColSet::from(col_list![0, 2]), + ); + constraint.table_id = table_id; + datastore.create_constraint_mut_tx(&mut tx, constraint)?; + commit(&datastore, tx)?; + + Ok(()) + } + + #[test] + fn test_drop_constraint_makes_index_non_unique() -> ResultTest<()> { + let datastore = get_datastore()?; + + // TX1: create table with unique constraint. + let mut tx = begin_mut_tx(&datastore); + let schema = basic_table_schema_with_indices(basic_indices(), basic_constraints()); + let table_id = datastore.create_table_mut_tx(&mut tx, schema)?; + commit(&datastore, tx)?; + + // TX2: insert a row. + let mut tx = begin_mut_tx(&datastore); + insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Alice", 30))?; + commit(&datastore, tx)?; + + // TX3: drop the unique constraint on col 0. + let mut tx = begin_mut_tx(&datastore); + let constraint_id = tx + .constraint_id_from_name("Foo_id_key")? + .expect("constraint should exist"); + datastore.drop_constraint_mut_tx(&mut tx, constraint_id)?; + + // Inserting a duplicate on col 0 should now succeed. + let result = insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Bob", 25)); + assert!(result.is_ok(), "duplicate insert should succeed after dropping unique constraint"); + commit(&datastore, tx)?; + + Ok(()) + } + + #[test] + fn test_drop_constraint_rollback_keeps_unique() -> ResultTest<()> { + let datastore = get_datastore()?; + + // TX1: create table with unique constraint. + let mut tx = begin_mut_tx(&datastore); + let schema = basic_table_schema_with_indices(basic_indices(), basic_constraints()); + let table_id = datastore.create_table_mut_tx(&mut tx, schema)?; + commit(&datastore, tx)?; + + // TX2: insert a row. + let mut tx = begin_mut_tx(&datastore); + insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Alice", 30))?; + commit(&datastore, tx)?; + + // TX3: drop constraint, then rollback. + let mut tx = begin_mut_tx(&datastore); + let constraint_id = tx + .constraint_id_from_name("Foo_id_key")? + .expect("constraint should exist"); + datastore.drop_constraint_mut_tx(&mut tx, constraint_id)?; + let _ = datastore.rollback_mut_tx(tx); + + // TX4: after rollback, constraint should be back — duplicates should fail. + let mut tx = begin_mut_tx(&datastore); + let dup_result = insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Bob", 25)); + assert!(dup_result.is_err(), "duplicate insert should fail after rollback of drop constraint"); + Ok(()) + } } diff --git a/crates/datastore/src/locking_tx_datastore/mut_tx.rs b/crates/datastore/src/locking_tx_datastore/mut_tx.rs index 4c9ec984b84..d6a03973014 100644 --- a/crates/datastore/src/locking_tx_datastore/mut_tx.rs +++ b/crates/datastore/src/locking_tx_datastore/mut_tx.rs @@ -111,7 +111,7 @@ impl ViewReadSets { } /// Returns the views that perform a full scan of this table - pub fn views_for_table_scan(&self, table_id: &TableId) -> impl Iterator + use<'_> { + pub fn views_for_table_scan(&self, table_id: &TableId) -> impl Iterator { self.tables .get(table_id) .into_iter() @@ -149,7 +149,7 @@ impl ViewReadSets { &'a self, table_id: &TableId, row_ptr: RowRef<'a>, - ) -> impl Iterator + use<'a> { + ) -> impl Iterator { self.tables .get(table_id) .into_iter() @@ -301,11 +301,11 @@ impl MutTxId { upper: Bound, ) { // Check for precise index seek. - if let (Bound::Included(low_val), Bound::Included(up_val)) = (&lower, &upper) - && low_val == up_val - { - self.record_index_scan_point_inner(view, table_id, index_id, low_val.clone()); - return; + if let (Bound::Included(low_val), Bound::Included(up_val)) = (&lower, &upper) { + if low_val == up_val { + self.record_index_scan_point_inner(view, table_id, index_id, low_val.clone()); + return; + } } // Everything else is treated as a table scan. @@ -705,7 +705,7 @@ impl MutTxId { // Insert constraints into `st_constraints`. for constraint in constraints { - self.create_constraint(constraint)?; + self.create_st_constraint(constraint)?; } // Insert sequences into `st_sequences`. @@ -1804,19 +1804,12 @@ impl MutTxId { }) } - /// Create a constraint. - /// - /// Requires: - /// - `constraint.constraint_name` must not be used for any other database entity. - /// - `constraint.constraint_id == ConstraintId::SENTINEL` - /// - `constraint.table_id != TableId::SENTINEL` - /// - `is_unique` must be `true` if and only if a unique constraint will exist on - /// `ColSet::from(&constraint.constraint_algorithm.columns())` after this transaction is committed. + /// Insert constraint metadata into system tables only. /// - /// Ensures: - /// - The constraint metadata is inserted into the system tables (and other data structures reflecting them). - /// - The returned ID is unique and is not `constraintId::SENTINEL`. - fn create_constraint(&mut self, mut constraint: ConstraintSchema) -> Result { + /// This is used during `create_table` where the index is already created + /// with the correct uniqueness. For adding constraints to existing tables, + /// use [`create_constraint`] instead. + fn create_st_constraint(&mut self, mut constraint: ConstraintSchema) -> Result { if constraint.table_id == TableId::SENTINEL { return Err(anyhow::anyhow!("`table_id` must not be `TableId::SENTINEL` in `{constraint:#?}`").into()); } @@ -1849,15 +1842,15 @@ impl MutTxId { let ((tx_table, ..), (commit_table, ..)) = self.get_or_create_insert_table_mut(table_id)?; constraint.constraint_id = constraint_id; - // This won't clone-write when creating a table but likely to otherwise. tx_table.with_mut_schema_and_clone(commit_table, |s| s.update_constraint(constraint.clone())); - self.push_schema_change(PendingSchemaChange::ConstraintAdded(table_id, constraint_id)); + self.push_schema_change(PendingSchemaChange::ConstraintAdded(table_id, constraint_id, None, None)); log::trace!("CONSTRAINT CREATED: {constraint_id}"); Ok(constraint_id) } - pub fn drop_constraint(&mut self, constraint_id: ConstraintId) -> Result<()> { + /// Remove constraint metadata from system tables only. + fn drop_st_constraint(&mut self, constraint_id: ConstraintId) -> Result<(TableId, ConstraintSchema)> { // Delete row in `st_constraint`. let st_constraint_ref = self .iter_by_col_eq( @@ -1870,19 +1863,176 @@ impl MutTxId { let table_id = st_constraint_ref.read_col(StConstraintFields::TableId)?; self.delete(ST_CONSTRAINT_ID, st_constraint_ref.pointer())?; - // Remove constraint in transaction's insert table. let ((tx_table, ..), (commit_table, ..)) = self.get_or_create_insert_table_mut(table_id)?; - // This likely will do a clone-write as over time? - // The schema might have found other referents. let schema = commit_table .with_mut_schema_and_clone(tx_table, |s| s.remove_constraint(constraint_id)) .expect("there should be a schema in the committed state if we reach here"); - self.push_schema_change(PendingSchemaChange::ConstraintRemoved(table_id, schema)); - // TODO(1.0): we should also re-initialize `table` without a unique constraint. - // unless some other unique constraint on the same columns exists. - // NOTE(centril): is this already handled by dropping the corresponding index? - // Probably not in the case where an index - // with the same name goes from being unique to not unique. + + Ok((table_id, schema)) + } + + /// Create a constraint, making the corresponding index unique. + /// + /// This inserts constraint metadata AND converts the in-memory index + /// from non-unique to unique. If the existing data contains duplicate + /// values in the constrained columns, an error is returned. + pub fn create_constraint(&mut self, constraint: ConstraintSchema) -> Result { + let table_id = constraint.table_id; + let unique_cols = constraint.data.unique_columns().cloned(); + + // Step 1: Insert metadata into system tables. + let constraint_id = self.create_st_constraint(constraint)?; + + // Step 2: If this is a unique constraint, make the index unique. + let (made_unique_index_id, pointer_map) = if let Some(cols) = unique_cols { + let col_list: ColList = cols.into(); + + let ((tx_table, _, tx_delete_table), (commit_table, commit_blob_store, _)) = + self.get_or_create_insert_table_mut(table_id)?; + + // Find the index matching these columns. + let index_id = commit_table.get_index_by_cols(&col_list).map(|(id, _)| id); + + if let Some(index_id) = index_id { + // Check for duplicates in the committed table's index. + let duplicates = commit_table + .indexes + .get(&index_id) + .expect("index must exist") + .iter_duplicates(); + + if !duplicates.is_empty() { + let total_groups = duplicates.len(); + let examples: String = duplicates + .iter() + .take(10) + .map(|(val, count)| format!(" - {val:?} appears {count} times")) + .collect::>() + .join("\n"); + return Err(anyhow::anyhow!( + "Cannot add unique constraint on table {table_id} column(s) {col_list:?}:\n\ + {total_groups} duplicate group(s) found.\n{examples}{}", + if total_groups > 10 { "\n ... and more" } else { "" } + ) + .into()); + } + + // Record whether this table had a unique index before. + let had_unique = commit_table.has_unique_index(); + + // Make the index unique on both tables. + commit_table + .indexes + .get_mut(&index_id) + .expect("index must exist") + .make_unique() + .expect("duplicates were already checked"); + + tx_table + .indexes + .get_mut(&index_id) + .expect("tx index must exist") + .make_unique() + .expect("tx table should have no duplicates"); + + // Check that the two unique indices can be merged. + let commit_idx = &commit_table.indexes[&index_id]; + let tx_idx = &tx_table.indexes[&index_id]; + let is_deleted = |ptr: &RowPointer| tx_delete_table.contains(*ptr); + commit_idx.can_merge(tx_idx, is_deleted).map_err(|violation| { + let violation = commit_table + .get_row_ref(commit_blob_store, violation) + .expect("row came from scanning the table") + .project(&commit_idx.indexed_columns) + .expect("cols should be valid for this table"); + anyhow::anyhow!("Unique constraint violation during merge: {violation:?}") + })?; + + // Take the pointer map if this is the first unique index. + let pointer_map = if !had_unique { + commit_table.take_pointer_map() + } else { + None + }; + + // Also take tx table's pointer map if needed. + if !had_unique { + tx_table.take_pointer_map(); + } + + (Some(index_id), pointer_map) + } else { + (None, None) + } + } else { + (None, None) + }; + + // Update the pending schema change with index info. + // The last pushed change is our ConstraintAdded from create_st_constraint. + // Replace it with the enriched version. + if let Some(last) = self.tx_state.pending_schema_changes.last_mut() { + *last = PendingSchemaChange::ConstraintAdded( + table_id, + constraint_id, + made_unique_index_id, + pointer_map, + ); + } + + Ok(constraint_id) + } + + /// Drop a constraint, making the corresponding index non-unique. + pub fn drop_constraint(&mut self, constraint_id: ConstraintId) -> Result<()> { + let (table_id, schema) = self.drop_st_constraint(constraint_id)?; + + // If this was a unique constraint, make the index non-unique. + let unique_cols = schema.data.unique_columns().cloned(); + let made_non_unique_index_id = if let Some(cols) = unique_cols { + let col_list: ColList = cols.into(); + + let ((tx_table, ..), (commit_table, commit_blob_store, _)) = + self.get_or_create_insert_table_mut(table_id)?; + + let index_id = commit_table.get_index_by_cols(&col_list).map(|(id, _)| id); + + if let Some(index_id) = index_id { + // Make the index non-unique on both tables. + commit_table + .indexes + .get_mut(&index_id) + .expect("index must exist") + .make_non_unique(); + + tx_table + .indexes + .get_mut(&index_id) + .expect("tx index must exist") + .make_non_unique(); + + // If no unique indices remain, rebuild the pointer map. + if !commit_table.has_unique_index() { + let pm = commit_table + .scan_rows(commit_blob_store) + .map(|row_ref| (row_ref.row_hash(), row_ref.pointer())) + .collect(); + commit_table.restore_pointer_map(pm); + } + + Some(index_id) + } else { + None + } + } else { + None + }; + + self.push_schema_change(PendingSchemaChange::ConstraintRemoved( + table_id, + schema, + made_non_unique_index_id, + )); Ok(()) } @@ -2604,9 +2754,7 @@ impl MutTxId { ) { // This is possible on restart if the database was previously running a version // before this system table was added. - log::error!( - "[{database_identity}]: delete_st_client_credentials: attempting to delete credentials for missing connection id ({connection_id}), error: {e}" - ); + log::error!("[{database_identity}]: delete_st_client_credentials: attempting to delete credentials for missing connection id ({connection_id}), error: {e}"); } Ok(()) } @@ -2621,7 +2769,7 @@ impl MutTxId { identity: identity.into(), connection_id: connection_id.into(), }; - match self + if let Some(ptr) = self .iter_by_col_eq( ST_CLIENT_ID, // TODO(perf, minor, centril): consider a `const_col_list([x, ..])` @@ -2632,12 +2780,9 @@ impl MutTxId { .next() .map(|row| row.pointer()) { - Some(ptr) => self.delete(ST_CLIENT_ID, ptr).map(drop)?, - _ => { - log::error!( - "[{database_identity}]: delete_st_client: attempting to delete client ({identity}, {connection_id}), but no st_client row for that client is resident" - ); - } + self.delete(ST_CLIENT_ID, ptr).map(drop)? + } else { + log::error!("[{database_identity}]: delete_st_client: attempting to delete client ({identity}, {connection_id}), but no st_client row for that client is resident"); } self.delete_st_client_credentials(database_identity, connection_id) } diff --git a/crates/datastore/src/locking_tx_datastore/tx_state.rs b/crates/datastore/src/locking_tx_datastore/tx_state.rs index 945fbdd4612..a4e45733773 100644 --- a/crates/datastore/src/locking_tx_datastore/tx_state.rs +++ b/crates/datastore/src/locking_tx_datastore/tx_state.rs @@ -124,10 +124,12 @@ pub enum PendingSchemaChange { /// Only non-representational row-type changes are allowed here, /// so existing rows in the table will be compatible with the new row type. TableAlterRowType(TableId, Vec), - /// The constraint with [`ConstraintSchema`] was added to the table with [`TableId`]. - ConstraintRemoved(TableId, ConstraintSchema), + /// The constraint with [`ConstraintSchema`] was removed from the table with [`TableId`]. + /// If an index was made non-unique, its [`IndexId`] is stored. + ConstraintRemoved(TableId, ConstraintSchema, Option), /// The constraint with [`ConstraintId`] was added to the table with [`TableId`]. - ConstraintAdded(TableId, ConstraintId), + /// If an index was made unique, its [`IndexId`] and the taken [`PointerMap`] are stored. + ConstraintAdded(TableId, ConstraintId, Option, Option), /// The [`Sequence`] with [`SequenceSchema`] was added to the table with [`TableId`]. SequenceRemoved(TableId, Sequence, SequenceSchema), /// The sequence with [`SequenceId`] was added to the table with [`TableId`]. @@ -147,10 +149,12 @@ impl MemoryUsage for PendingSchemaChange { Self::TableAdded(table_id) => table_id.heap_usage(), Self::TableAlterAccess(table_id, st_access) => table_id.heap_usage() + st_access.heap_usage(), Self::TableAlterRowType(table_id, column_schemas) => table_id.heap_usage() + column_schemas.heap_usage(), - Self::ConstraintRemoved(table_id, constraint_schema) => { - table_id.heap_usage() + constraint_schema.heap_usage() + Self::ConstraintRemoved(table_id, constraint_schema, index_id) => { + table_id.heap_usage() + constraint_schema.heap_usage() + index_id.heap_usage() + } + Self::ConstraintAdded(table_id, constraint_id, index_id, pointer_map) => { + table_id.heap_usage() + constraint_id.heap_usage() + index_id.heap_usage() + pointer_map.heap_usage() } - Self::ConstraintAdded(table_id, constraint_id) => table_id.heap_usage() + constraint_id.heap_usage(), Self::SequenceRemoved(table_id, sequence, sequence_schema) => { table_id.heap_usage() + sequence.heap_usage() + sequence_schema.heap_usage() } diff --git a/crates/datastore/src/traits.rs b/crates/datastore/src/traits.rs index 9b98c11addf..c05b2d67443 100644 --- a/crates/datastore/src/traits.rs +++ b/crates/datastore/src/traits.rs @@ -15,7 +15,7 @@ use spacetimedb_primitives::*; use spacetimedb_sats::hash::Hash; use spacetimedb_sats::{AlgebraicValue, ProductType, ProductValue}; use spacetimedb_schema::reducer_name::ReducerName; -use spacetimedb_schema::schema::{IndexSchema, SequenceSchema, TableSchema}; +use spacetimedb_schema::schema::{ConstraintSchema, IndexSchema, SequenceSchema, TableSchema}; use spacetimedb_schema::table_name::TableName; use spacetimedb_table::static_assert_size; use spacetimedb_table::table::RowRef; @@ -638,6 +638,11 @@ pub trait MutTxDatastore: TxDatastore + MutTx { fn sequence_id_from_name_mut_tx(&self, tx: &Self::MutTx, sequence_name: &str) -> super::Result>; // Constraints + fn create_constraint_mut_tx( + &self, + tx: &mut Self::MutTx, + constraint: ConstraintSchema, + ) -> super::Result; fn drop_constraint_mut_tx(&self, tx: &mut Self::MutTx, constraint_id: ConstraintId) -> super::Result<()>; fn constraint_id_from_name(&self, tx: &Self::MutTx, constraint_name: &str) -> super::Result>; diff --git a/crates/schema/src/auto_migrate.rs b/crates/schema/src/auto_migrate.rs index ab4dfc14fb4..b5e5c80b65f 100644 --- a/crates/schema/src/auto_migrate.rs +++ b/crates/schema/src/auto_migrate.rs @@ -277,6 +277,8 @@ pub enum AutoMigrateStep<'def> { AddTable(::Key<'def>), /// Add an index. AddIndex(::Key<'def>), + /// Add a constraint to an existing table (with data validation precheck). + AddConstraint(::Key<'def>), /// Add a sequence. AddSequence(::Key<'def>), /// Add a schedule annotation to a table. @@ -400,9 +402,6 @@ pub enum AutoMigrateError { )] ChangeWithinColumnTypeRenamedField(ChangeColumnTypeParts), - #[error("Adding a unique constraint {constraint} requires a manual migration")] - AddUniqueConstraint { constraint: RawIdentifier }, - #[error("Changing a unique constraint {constraint} requires a manual migration")] ChangeUniqueConstraint { constraint: RawIdentifier }, @@ -1002,11 +1001,9 @@ fn auto_migrate_constraints(plan: &mut AutoMigratePlan, new_tables: &HashSet<&Id // it's okay to add a constraint in a new table. Ok(()) } else { - // it's not okay to add a new constraint to an existing table. - Err(AutoMigrateError::AddUniqueConstraint { - constraint: new.name.clone(), - } - .into()) + // existing table — duplicate detection happens inside create_constraint + plan.steps.push(AutoMigrateStep::AddConstraint(new.key())); + Ok(()) } } Diff::Remove { old } => { @@ -1505,8 +1502,6 @@ mod tests { let apples = expect_identifier("Apples"); let bananas = expect_identifier("Bananas"); - let apples_name_unique_constraint = "Apples_name_key"; - let weight = expect_identifier("weight"); let count = expect_identifier("count"); let name = expect_identifier("name"); @@ -1701,10 +1696,8 @@ mod tests { && type1.0 == prod1_ty && type2.0 == new_prod1_ty ); - expect_error_matching!( - result, - AutoMigrateError::AddUniqueConstraint { constraint } => &constraint[..] == apples_name_unique_constraint - ); + // Note: AddUniqueConstraint is no longer an error — adding unique constraints + // to existing tables is now allowed; duplicate detection happens inside create_constraint. expect_error_matching!( result, diff --git a/crates/schema/src/auto_migrate/formatter.rs b/crates/schema/src/auto_migrate/formatter.rs index a3edad35b97..ef7fa4d58a7 100644 --- a/crates/schema/src/auto_migrate/formatter.rs +++ b/crates/schema/src/auto_migrate/formatter.rs @@ -54,6 +54,10 @@ fn format_step( let index_info = extract_index_info(*index, plan.old)?; f.format_index(&index_info, Action::Removed) } + AutoMigrateStep::AddConstraint(constraint) => { + let constraint_info = extract_constraint_info(*constraint, plan.new)?; + f.format_constraint(&constraint_info, Action::Created) + } AutoMigrateStep::RemoveConstraint(constraint) => { let constraint_info = extract_constraint_info(*constraint, plan.old)?; f.format_constraint(&constraint_info, Action::Removed) diff --git a/crates/table/src/table.rs b/crates/table/src/table.rs index 7a29f32d530..dfce52f3a2b 100644 --- a/crates/table/src/table.rs +++ b/crates/table/src/table.rs @@ -1481,6 +1481,27 @@ Found violation at pointer {ptr:?} to row {:?}.", Some(index) } + /// Take the pointer map, if any, returning it. + /// + /// This is used when making an index unique — a unique index subsumes + /// the pointer map's role of preventing duplicate rows. + pub fn take_pointer_map(&mut self) -> Option { + self.pointer_map.take() + } + + /// Restore a previously taken pointer map. + /// + /// This is used on rollback when a unique constraint is removed + /// and no other unique indices remain. + pub fn restore_pointer_map(&mut self, pointer_map: PointerMap) { + self.pointer_map = Some(pointer_map); + } + + /// Returns whether this table has any unique index. + pub fn has_unique_index(&self) -> bool { + self.indexes.values().any(|idx| idx.is_unique()) + } + /// Returns an iterator over all the rows of `self`, yielded as [`RowRef`]s. pub fn scan_rows<'a>(&'a self, blob_store: &'a dyn BlobStore) -> TableScanIter<'a> { TableScanIter { diff --git a/crates/table/src/table_index/hash_index.rs b/crates/table/src/table_index/hash_index.rs index c1fd89cfd97..47a7e664519 100644 --- a/crates/table/src/table_index/hash_index.rs +++ b/crates/table/src/table_index/hash_index.rs @@ -120,3 +120,53 @@ impl Index for HashIndex { Ok(()) } } + +impl HashIndex { + /// Returns an iterator over keys that have more than one row pointer, + /// yielding `(&key, count)` for each duplicate key. + pub(super) fn iter_duplicates(&self) -> impl Iterator { + self.map.iter().filter_map(|(k, entry)| { + let count = entry.count(); + if count > 1 { + Some((k, count)) + } else { + None + } + }) + } + + /// Check for duplicates and, if none, convert into a `HashMap`. + /// + /// Returns `Ok(map)` if every key maps to exactly one row. + /// Returns `Err((self, ptr))` with a witness `RowPointer` of a duplicate if any key + /// maps to more than one row. The original `HashIndex` is returned intact on error. + pub(super) fn check_and_into_unique(self) -> Result, (Self, RowPointer)> { + // First pass: check for duplicates. + let dup = self + .map + .values() + .find_map(|entry| { + if entry.count() > 1 { + Some(entry.iter().next().unwrap()) + } else { + None + } + }); + + if let Some(ptr) = dup { + return Err((self, ptr)); + } + + // No duplicates; conversion is infallible. + let result = self + .map + .into_iter() + .map(|(k, entry)| { + let ptr = entry.iter().next().unwrap(); + (k, ptr) + }) + .collect(); + + Ok(result) + } +} diff --git a/crates/table/src/table_index/mod.rs b/crates/table/src/table_index/mod.rs index a467f87efc8..2b48effd920 100644 --- a/crates/table/src/table_index/mod.rs +++ b/crates/table/src/table_index/mod.rs @@ -405,6 +405,58 @@ macro_rules! same_for_all_types { }; } +/// Defines `try_make_unique`, `into_non_unique`, and `iter_duplicates` +/// for all non-unique ↔ unique variant pairs on `TypedIndex`. +macro_rules! define_uniqueness_conversions { + ( + btree { + $($bt_non:ident <=> $bt_uni:ident : $bt_conv:expr),* $(,)? + } + hash { + $($h_non:ident <=> $h_uni:ident : $h_conv:expr),* $(,)? + } + ) => { + /// Consuming: try to convert a non-unique index to unique. + /// Returns the original on error. + fn try_make_unique(self) -> Result { + match self { + $(Self::$bt_non(mm) => mm.check_and_into_unique() + .map(|m| Self::$bt_uni(BtreeUniqueIndex::from_non_unique(m))) + .map_err(|(mm, p)| (Self::$bt_non(mm), p)),)* + $(Self::$h_non(hi) => hi.check_and_into_unique() + .map(|m| Self::$h_uni(UniqueHashIndex::from_non_unique(m))) + .map_err(|(hi, p)| (Self::$h_non(hi), p)),)* + other => Ok(other), + } + } + + /// Consuming: convert a unique index back to non-unique. + /// Non-unique and direct indices are returned as-is. + fn into_non_unique(self) -> Self { + match self { + $(Self::$bt_uni(um) => Self::$bt_non(um.into_non_unique()),)* + $(Self::$h_uni(uh) => Self::$h_non(uh.into_non_unique()),)* + other => other, + } + } + + /// Returns all duplicate keys (count > 1) in this non-unique index, + /// with keys converted to [`AlgebraicValue`]. + /// Returns an empty vec for unique indices. + fn iter_duplicates(&self) -> Vec<(AlgebraicValue, usize)> { + match self { + $(Self::$bt_non(mm) => mm.iter_duplicates() + .map(|(k, c)| ($bt_conv(k), c)) + .collect(),)* + $(Self::$h_non(hi) => hi.iter_duplicates() + .map(|(k, c)| ($h_conv(k), c)) + .collect(),)* + _ => Vec::new(), + } + } + }; +} + impl MemoryUsage for TypedIndex { fn heap_usage(&self) -> usize { same_for_all_types!(self, this => this.heap_usage()) @@ -1190,6 +1242,81 @@ impl TypedIndex { pub fn num_key_bytes(&self) -> u64 { same_for_all_types!(self, this => this.num_key_bytes()) } + + define_uniqueness_conversions! { + btree { + BtreeBool <=> UniqueBtreeBool : |k: &bool| AlgebraicValue::Bool(*k), + BtreeU8 <=> UniqueBtreeU8 : |k: &u8| AlgebraicValue::U8(*k), + BtreeSumTag <=> UniqueBtreeSumTag : |k: &SumTag| AlgebraicValue::U8(k.0), + BtreeI8 <=> UniqueBtreeI8 : |k: &i8| AlgebraicValue::I8(*k), + BtreeU16 <=> UniqueBtreeU16 : |k: &u16| AlgebraicValue::U16(*k), + BtreeI16 <=> UniqueBtreeI16 : |k: &i16| AlgebraicValue::I16(*k), + BtreeU32 <=> UniqueBtreeU32 : |k: &u32| AlgebraicValue::U32(*k), + BtreeI32 <=> UniqueBtreeI32 : |k: &i32| AlgebraicValue::I32(*k), + BtreeU64 <=> UniqueBtreeU64 : |k: &u64| AlgebraicValue::U64(*k), + BtreeI64 <=> UniqueBtreeI64 : |k: &i64| AlgebraicValue::I64(*k), + BtreeU128 <=> UniqueBtreeU128 : |k: &Packed| AlgebraicValue::U128(*k), + BtreeI128 <=> UniqueBtreeI128 : |k: &Packed| AlgebraicValue::I128(*k), + BtreeU256 <=> UniqueBtreeU256 : |k: &u256| AlgebraicValue::U256(Box::new(*k)), + BtreeI256 <=> UniqueBtreeI256 : |k: &i256| AlgebraicValue::I256(Box::new(*k)), + BtreeF32 <=> UniqueBtreeF32 : |k: &F32| AlgebraicValue::F32(*k), + BtreeF64 <=> UniqueBtreeF64 : |k: &F64| AlgebraicValue::F64(*k), + BtreeString <=> UniqueBtreeString : |k: &Box| AlgebraicValue::String(k.clone()), + BtreeAV <=> UniqueBtreeAV : |k: &AlgebraicValue| k.clone(), + } + hash { + HashBool <=> UniqueHashBool : |k: &bool| AlgebraicValue::Bool(*k), + HashU8 <=> UniqueHashU8 : |k: &u8| AlgebraicValue::U8(*k), + HashSumTag <=> UniqueHashSumTag : |k: &SumTag| AlgebraicValue::U8(k.0), + HashI8 <=> UniqueHashI8 : |k: &i8| AlgebraicValue::I8(*k), + HashU16 <=> UniqueHashU16 : |k: &u16| AlgebraicValue::U16(*k), + HashI16 <=> UniqueHashI16 : |k: &i16| AlgebraicValue::I16(*k), + HashU32 <=> UniqueHashU32 : |k: &u32| AlgebraicValue::U32(*k), + HashI32 <=> UniqueHashI32 : |k: &i32| AlgebraicValue::I32(*k), + HashU64 <=> UniqueHashU64 : |k: &u64| AlgebraicValue::U64(*k), + HashI64 <=> UniqueHashI64 : |k: &i64| AlgebraicValue::I64(*k), + HashU128 <=> UniqueHashU128 : |k: &Packed| AlgebraicValue::U128(*k), + HashI128 <=> UniqueHashI128 : |k: &Packed| AlgebraicValue::I128(*k), + HashU256 <=> UniqueHashU256 : |k: &u256| AlgebraicValue::U256(Box::new(*k)), + HashI256 <=> UniqueHashI256 : |k: &i256| AlgebraicValue::I256(Box::new(*k)), + HashF32 <=> UniqueHashF32 : |k: &F32| AlgebraicValue::F32(*k), + HashF64 <=> UniqueHashF64 : |k: &F64| AlgebraicValue::F64(*k), + HashString <=> UniqueHashString : |k: &Box| AlgebraicValue::String(k.clone()), + HashAV <=> UniqueHashAV : |k: &AlgebraicValue| k.clone(), + } + } + + /// Convert this non-unique index to a unique index in place. + /// + /// Returns `Ok(())` if the index was already unique or was successfully converted. + /// Returns `Err(ptr)` where `ptr` witnesses a duplicate key, leaving `self` unchanged. + fn make_unique(&mut self) -> Result<(), RowPointer> { + if self.is_unique() { + return Ok(()); + } + + let dummy = Self::BtreeBool(<_>::default()); + let old = core::mem::replace(self, dummy); + match old.try_make_unique() { + Ok(new) => { + *self = new; + Ok(()) + } + Err((restored, ptr)) => { + *self = restored; + Err(ptr) + } + } + } + + /// Convert this unique index back to a non-unique index in place. + /// + /// No-op for already non-unique or direct indices. + fn make_non_unique(&mut self) { + let dummy = Self::BtreeBool(<_>::default()); + let old = core::mem::replace(self, dummy); + *self = old.into_non_unique(); + } } /// An index on a set of [`ColId`]s of a table. @@ -1428,6 +1555,28 @@ impl TableIndex { } } + /// Convert this non-unique index to a unique index in place. + /// + /// Returns `Ok(())` if the index was already unique or was successfully converted. + /// Returns `Err(ptr)` where `ptr` witnesses a duplicate key, leaving `self` unchanged. + pub fn make_unique(&mut self) -> Result<(), RowPointer> { + self.idx.make_unique() + } + + /// Convert this unique index back to a non-unique index in place. + /// + /// No-op for already non-unique or direct indices. + pub fn make_non_unique(&mut self) { + self.idx.make_non_unique() + } + + /// Returns all duplicate keys (count > 1) in this index, + /// with keys converted to [`AlgebraicValue`]. + /// Returns an empty vec for unique indices. + pub fn iter_duplicates(&self) -> Vec<(AlgebraicValue, usize)> { + self.idx.iter_duplicates() + } + /// Deletes all entries from the index, leaving it empty. /// /// When inserting a newly-created index into the committed state, diff --git a/crates/table/src/table_index/multimap.rs b/crates/table/src/table_index/multimap.rs index b1ab1361072..4a4cc6596c2 100644 --- a/crates/table/src/table_index/multimap.rs +++ b/crates/table/src/table_index/multimap.rs @@ -134,6 +134,56 @@ impl RangedIndex for MultiMap { } } +impl MultiMap { + /// Returns an iterator over keys that have more than one row pointer, + /// yielding `(&key, count)` for each duplicate key. + pub(super) fn iter_duplicates(&self) -> impl Iterator { + self.map.iter().filter_map(|(k, entry)| { + let count = entry.count(); + if count > 1 { + Some((k, count)) + } else { + None + } + }) + } + + /// Check for duplicates and, if none, convert into a `BTreeMap`. + /// + /// Returns `Ok(map)` if every key maps to exactly one row. + /// Returns `Err((self, ptr))` with a witness `RowPointer` of a duplicate if any key + /// maps to more than one row. The original `MultiMap` is returned intact on error. + pub(super) fn check_and_into_unique(self) -> Result, (Self, RowPointer)> { + // First pass: check for duplicates (borrows self.map immutably). + let dup = self + .map + .values() + .find_map(|entry| { + if entry.count() > 1 { + Some(entry.iter().next().unwrap()) + } else { + None + } + }); + + if let Some(ptr) = dup { + return Err((self, ptr)); + } + + // No duplicates; conversion is infallible. + let result = self + .map + .into_iter() + .map(|(k, entry)| { + let ptr = entry.iter().next().unwrap(); + (k, ptr) + }) + .collect(); + + Ok(result) + } +} + /// An iterator over values in a [`MultiMap`] where the keys are in a certain range. #[derive(Clone)] pub struct MultiMapRangeIter<'a, K> { diff --git a/crates/table/src/table_index/same_key_entry.rs b/crates/table/src/table_index/same_key_entry.rs index b17d7c38c1a..ecd469dff76 100644 --- a/crates/table/src/table_index/same_key_entry.rs +++ b/crates/table/src/table_index/same_key_entry.rs @@ -111,6 +111,14 @@ impl SameKeyEntry { } } + /// Returns the number of entries for this key. + pub(super) fn count(&self) -> usize { + match self { + Self::Small(list) => list.len(), + Self::Large(set) => set.len(), + } + } + /// Returns an iterator over all the entries for this key. pub(super) fn iter(&self) -> SameKeyEntryIter<'_> { match self { diff --git a/crates/table/src/table_index/unique_hash_index.rs b/crates/table/src/table_index/unique_hash_index.rs index ba572d03085..fe8d9d1c6c2 100644 --- a/crates/table/src/table_index/unique_hash_index.rs +++ b/crates/table/src/table_index/unique_hash_index.rs @@ -36,6 +36,39 @@ impl MemoryUsage for UniqueHashIndex { } } +impl UniqueHashIndex { + /// Construct a `UniqueHashIndex` from a `HashMap`. + /// + /// Each entry is inserted via [`Index::insert`] so that `num_key_bytes` + /// is correctly maintained regardless of `K::MemoStorage`. + /// + /// # Panics + /// + /// Panics if the map contains duplicate keys (should never happen + /// since the caller verified uniqueness). + pub fn from_non_unique(map: HashMap) -> Self { + let mut result = Self::default(); + for (key, ptr) in map { + result + .insert(key, ptr) + .expect("duplicate key in supposedly unique hash map"); + } + result + } + + /// Convert this unique hash index back into a non-unique `HashIndex`. + /// + /// This is lossless: each key maps to exactly one `RowPointer`. + pub fn into_non_unique(self) -> super::hash_index::HashIndex { + let mut hi = super::hash_index::HashIndex::default(); + for (key, ptr) in self.map { + // HashIndex::insert always succeeds. + let _ = as Index>::insert(&mut hi, key, ptr); + } + hi + } +} + impl Index for UniqueHashIndex { type Key = K; diff --git a/crates/table/src/table_index/uniquemap.rs b/crates/table/src/table_index/uniquemap.rs index fa77885d233..710475c8e27 100644 --- a/crates/table/src/table_index/uniquemap.rs +++ b/crates/table/src/table_index/uniquemap.rs @@ -95,6 +95,40 @@ impl Index for UniqueMap { } } +impl UniqueMap { + /// Construct a `UniqueMap` from a `BTreeMap`. + /// + /// Each entry is inserted via [`Index::insert`] so that `num_key_bytes` + /// is correctly maintained regardless of `K::MemoStorage`. + /// + /// # Panics + /// + /// Panics if the map contains duplicate keys (should never happen + /// since the caller verified uniqueness). + pub fn from_non_unique(map: BTreeMap) -> Self { + let mut result = Self::default(); + for (key, ptr) in map { + result + .insert(key, ptr) + .expect("duplicate key in supposedly unique map"); + } + result + } + + /// Convert this unique map back into a non-unique `MultiMap`. + /// + /// This is lossless: each key maps to exactly one `RowPointer`, + /// which becomes a single-entry `SameKeyEntry` in the `MultiMap`. + pub fn into_non_unique(self) -> super::multimap::MultiMap { + let mut mm = super::multimap::MultiMap::default(); + for (key, ptr) in self.map { + // MultiMap::insert always succeeds. + let _ = as Index>::insert(&mut mm, key, ptr); + } + mm + } +} + /// An iterator over the potential value in a [`UniqueMap`] for a given key. pub struct UniqueMapPointIter<'a> { /// The iterator seeking for matching keys in the range.