Skip to content

Commit

Permalink
Merge branch 'main' into feat/ci-check-script
Browse files Browse the repository at this point in the history
  • Loading branch information
Dustin-Ray authored Oct 7, 2024
2 parents 9bbc3b7 + 2cbff77 commit c52d556
Show file tree
Hide file tree
Showing 36 changed files with 275 additions and 230 deletions.
5 changes: 4 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -76,4 +76,7 @@ semicolon_if_nothing_returned = "deny"
unnested_or_patterns = "deny"
unreadable_literal = "deny"
must_use_candidate = "deny"
range_plus_one = "deny"
range_plus_one = "deny"
cloned_instead_of_copied = "deny"
from_iter_instead_of_collect = "deny"
cast_lossless = "deny"
12 changes: 5 additions & 7 deletions crates/proof-of-sql/benches/scaffold/random_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,20 +40,18 @@ pub fn generate_random_columns<'a, S: Scalar>(
}
(ColumnType::Int128, Some(b)) => {
Column::Int128(alloc.alloc_slice_fill_with(num_rows, |_| {
rng.gen_range((-b(num_rows) as i128)..=(b(num_rows) as i128))
rng.gen_range((i128::from(-b(num_rows)))..=(i128::from(b(num_rows))))
}))
}
(ColumnType::VarChar, _) => {
let strs = alloc.alloc_slice_fill_with(num_rows, |_| {
let len = rng
.gen_range(0..=bound.map(|b| b(num_rows) as usize).unwrap_or(10));
alloc.alloc_str(
String::from_iter(
rng.sample_iter(&rand::distributions::Alphanumeric)
.take(len)
.map(char::from),
)
.as_str(),
&rng.sample_iter(&rand::distributions::Alphanumeric)
.take(len)
.map(char::from)
.collect::<String>(),
) as &str
});
Column::VarChar((
Expand Down
2 changes: 1 addition & 1 deletion crates/proof-of-sql/src/base/commitment/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ impl Commitment for RistrettoPoint {
offset: usize,
_setup: &Self::PublicSetup<'_>,
) -> Vec<Self> {
let sequences = Vec::from_iter(committable_columns.iter().map(Into::into));
let sequences: Vec<_> = committable_columns.iter().map(Into::into).collect();
let mut compressed_commitments = vec![Default::default(); committable_columns.len()];
blitzar::compute::compute_curve25519_commitments(
&mut compressed_commitments,
Expand Down
11 changes: 5 additions & 6 deletions crates/proof-of-sql/src/base/commitment/query_commitments.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,11 @@ impl<C: Commitment> QueryCommitmentsExt<C> for QueryCommitments<C> {
table_ref,
TableCommitment::from_accessor_with_max_bounds(
table_ref,
&Vec::from_iter(
accessor
.lookup_schema(table_ref)
.iter()
.filter_map(|c| columns.iter().find(|x| x.name() == c.0).copied()),
),
&accessor
.lookup_schema(table_ref)
.iter()
.filter_map(|c| columns.iter().find(|x| x.name() == c.0).copied())
.collect::<Vec<_>>(),
accessor,
),
)
Expand Down
32 changes: 20 additions & 12 deletions crates/proof-of-sql/src/base/commitment/vec_commitment_ext.rs
Original file line number Diff line number Diff line change
Expand Up @@ -217,9 +217,10 @@ mod tests {
],
0,
);
let expected_commitments =
Vec::from_iter(expected_commitments.iter().map(|c| c.decompress().unwrap()));

let expected_commitments: Vec<_> = expected_commitments
.iter()
.map(|c| c.decompress().unwrap())
.collect();
assert_eq!(commitments, expected_commitments);
}

Expand Down Expand Up @@ -258,8 +259,10 @@ mod tests {
],
0,
);
let expected_commitments =
Vec::from_iter(expected_commitments.iter().map(|c| c.decompress().unwrap()));
let expected_commitments: Vec<_> = expected_commitments
.iter()
.map(|c| c.decompress().unwrap())
.collect();

assert_eq!(commitments, expected_commitments);
}
Expand Down Expand Up @@ -343,8 +346,10 @@ mod tests {
],
0,
);
let expected_commitments =
Vec::from_iter(expected_commitments.iter().map(|c| c.decompress().unwrap()));
let expected_commitments: Vec<_> = expected_commitments
.iter()
.map(|c| c.decompress().unwrap())
.collect();

assert_eq!(commitments, expected_commitments);
}
Expand Down Expand Up @@ -384,9 +389,10 @@ mod tests {
],
0,
);
let expected_commitments =
Vec::from_iter(expected_commitments.iter().map(|c| c.decompress().unwrap()));

let expected_commitments: Vec<_> = expected_commitments
.iter()
.map(|c| c.decompress().unwrap())
.collect();
assert_eq!(commitments, expected_commitments);
}

Expand Down Expand Up @@ -460,8 +466,10 @@ mod tests {
],
3,
);
let expected_commitments =
Vec::from_iter(expected_commitments.iter().map(|c| c.decompress().unwrap()));
let expected_commitments: Vec<_> = expected_commitments
.iter()
.map(|c| c.decompress().unwrap())
.collect();

assert_eq!(commitments, expected_commitments);
}
Expand Down
24 changes: 13 additions & 11 deletions crates/proof-of-sql/src/base/database/column_operation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,15 +45,15 @@ pub fn try_add_subtract_column_types(
Ok(ColumnType::Scalar)
} else {
let left_precision_value =
lhs.precision_value().expect("Numeric types have precision") as i16;
i16::from(lhs.precision_value().expect("Numeric types have precision"));
let right_precision_value =
rhs.precision_value().expect("Numeric types have precision") as i16;
i16::from(rhs.precision_value().expect("Numeric types have precision"));
let left_scale = lhs.scale().expect("Numeric types have scale");
let right_scale = rhs.scale().expect("Numeric types have scale");
let scale = left_scale.max(right_scale);
let precision_value: i16 = scale as i16
+ (left_precision_value - left_scale as i16)
.max(right_precision_value - right_scale as i16)
let precision_value: i16 = i16::from(scale)
+ (left_precision_value - i16::from(left_scale))
.max(right_precision_value - i16::from(right_scale))
+ 1_i16;
let precision = u8::try_from(precision_value)
.map_err(|_| ColumnOperationError::DecimalConversionError {
Expand Down Expand Up @@ -115,7 +115,7 @@ pub fn try_multiply_column_types(
let scale = left_scale.checked_add(right_scale).ok_or(
ColumnOperationError::DecimalConversionError {
source: DecimalError::InvalidScale {
scale: left_scale as i16 + right_scale as i16,
scale: i16::from(left_scale) + i16::from(right_scale),
},
},
)?;
Expand Down Expand Up @@ -150,10 +150,12 @@ pub fn try_divide_column_types(
// We can unwrap here because we know that both types are integers
return Ok(lhs.max_integer_type(&rhs).unwrap());
}
let left_precision_value = lhs.precision_value().expect("Numeric types have precision") as i16;
let right_precision_value = rhs.precision_value().expect("Numeric types have precision") as i16;
let left_scale = lhs.scale().expect("Numeric types have scale") as i16;
let right_scale = rhs.scale().expect("Numeric types have scale") as i16;
let left_precision_value =
i16::from(lhs.precision_value().expect("Numeric types have precision"));
let right_precision_value =
i16::from(rhs.precision_value().expect("Numeric types have precision"));
let left_scale = i16::from(lhs.scale().expect("Numeric types have scale"));
let right_scale = i16::from(rhs.scale().expect("Numeric types have scale"));
let raw_scale = (left_scale + right_precision_value + 1_i16).max(6_i16);
let precision_value: i16 = left_precision_value - left_scale + right_scale + raw_scale;
let scale =
Expand Down Expand Up @@ -899,7 +901,7 @@ where
.scale()
.expect("numeric columns have scale");
let applied_scale = rhs_scale - lhs_scale + new_scale;
let applied_scale_factor = BigInt::from(10).pow(applied_scale.unsigned_abs() as u32);
let applied_scale_factor = BigInt::from(10).pow(u32::from(applied_scale.unsigned_abs()));
let result: Vec<S> = lhs
.iter()
.zip(rhs)
Expand Down
9 changes: 4 additions & 5 deletions crates/proof-of-sql/src/base/database/filter_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,10 @@ pub fn filter_columns<'a, S: Scalar>(
.map(|(i, _)| i)
.collect();
let result_length = indexes.len();
let filtered_result = Vec::from_iter(
columns
.iter()
.map(|column| filter_column_by_index(alloc, column, &indexes)),
);
let filtered_result: Vec<_> = columns
.iter()
.map(|column| filter_column_by_index(alloc, column, &indexes))
.collect();
(filtered_result, result_length)
}
/// This function takes an index vector and a `Column` and returns a
Expand Down
49 changes: 28 additions & 21 deletions crates/proof-of-sql/src/base/database/group_by_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,13 +67,12 @@ pub fn aggregate_columns<'a, S: Scalar>(

// `filtered_indexes` is a vector of indexes of the rows that are selected. We sort this vector
// so that all the rows in the same group are next to each other.
let mut filtered_indexes = Vec::from_iter(
selection_column_in
.iter()
.enumerate()
.filter(|&(_, &b)| b)
.map(|(i, _)| i),
);
let mut filtered_indexes: Vec<_> = selection_column_in
.iter()
.enumerate()
.filter(|&(_, &b)| b)
.map(|(i, _)| i)
.collect();
if_rayon!(
filtered_indexes.par_sort_unstable_by(|&a, &b| compare_indexes_by_columns(
group_by_columns_in,
Expand All @@ -96,25 +95,33 @@ pub fn aggregate_columns<'a, S: Scalar>(
compare_indexes_by_columns(group_by_columns_in, a, b) == Ordering::Equal
})
.multiunzip();
let group_by_columns_out = Vec::from_iter(
group_by_columns_in
.iter()
.map(|column| filter_column_by_index(alloc, column, &group_by_result_indexes)),
);
let group_by_columns_out: Vec<_> = group_by_columns_in
.iter()
.map(|column| filter_column_by_index(alloc, column, &group_by_result_indexes))
.collect();

// This calls the `sum_aggregate_column_by_index_counts` function on each column in `sum_columns`
// and gives a vector of `S` slices
let sum_columns_out = Vec::from_iter(sum_columns_in.iter().map(|column| {
sum_aggregate_column_by_index_counts(alloc, column, &counts, &filtered_indexes)
}));
let sum_columns_out: Vec<_> = sum_columns_in
.iter()
.map(|column| {
sum_aggregate_column_by_index_counts(alloc, column, &counts, &filtered_indexes)
})
.collect();

let max_columns_out = Vec::from_iter(max_columns_in.iter().map(|column| {
max_aggregate_column_by_index_counts(alloc, column, &counts, &filtered_indexes)
}));
let max_columns_out: Vec<_> = max_columns_in
.iter()
.map(|column| {
max_aggregate_column_by_index_counts(alloc, column, &counts, &filtered_indexes)
})
.collect();

let min_columns_out = Vec::from_iter(min_columns_in.iter().map(|column| {
min_aggregate_column_by_index_counts(alloc, column, &counts, &filtered_indexes)
}));
let min_columns_out: Vec<_> = min_columns_in
.iter()
.map(|column| {
min_aggregate_column_by_index_counts(alloc, column, &counts, &filtered_indexes)
})
.collect();

// Cast the counts to something compatible with BigInt.
let count_column_out = alloc.alloc_slice_fill_iter(counts.into_iter().map(|c| c as i64));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ pub fn convert_scalar_to_i256<S: Scalar>(val: &S) -> i256 {
let limbs: [u64; 4] = abs_scalar.into();

let low = (limbs[0] as u128) | ((limbs[1] as u128) << 64);
let high = (limbs[2] as i128) | ((limbs[3] as i128) << 64);
let high = i128::from(limbs[2]) | (i128::from(limbs[3]) << 64);

let abs_i256 = i256::from_parts(low, high);
if is_negative {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ pub fn make_random_test_accessor_data(
ColumnType::Int128 => {
column_fields.push(Field::new(*col_name, DataType::Decimal128(38, 0), false));

let values: Vec<i128> = values.iter().map(|x| *x as i128).collect();
let values: Vec<i128> = values.iter().map(|x| i128::from(*x)).collect();
columns.push(Arc::new(
Decimal128Array::from(values.clone())
.with_precision_and_scale(38, 0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ impl TestSchemaAccessor {

impl SchemaAccessor for TestSchemaAccessor {
fn lookup_column(&self, table_ref: TableRef, column_id: Identifier) -> Option<ColumnType> {
self.schemas.get(&table_ref)?.get(&column_id).cloned()
self.schemas.get(&table_ref)?.get(&column_id).copied()
}

fn lookup_schema(&self, table_ref: TableRef) -> Vec<(Identifier, ColumnType)> {
Expand Down
12 changes: 9 additions & 3 deletions crates/proof-of-sql/src/base/encode/varint_trait.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,12 @@ fn zigzag_decode(from: u64) -> i64 {
macro_rules! impl_varint {
($t:ty, unsigned) => {
impl VarInt for $t {
#[allow(clippy::cast_lossless)]
fn required_space(self) -> usize {
(self as u64).required_space()
}

#[allow(clippy::cast_lossless)]
fn decode_var(src: &[u8]) -> Option<(Self, usize)> {
let (n, s) = u64::decode_var(src)?;
// This check is required to ensure that we actually return `None` when `src` has a value that would overflow `Self`.
Expand All @@ -84,17 +86,20 @@ macro_rules! impl_varint {
}
}

#[allow(clippy::cast_lossless)]
fn encode_var(self, dst: &mut [u8]) -> usize {
(self as u64).encode_var(dst)
}
}
};
($t:ty, signed) => {
impl VarInt for $t {
#[allow(clippy::cast_lossless)]
fn required_space(self) -> usize {
(self as i64).required_space()
}

#[allow(clippy::cast_lossless)]
fn decode_var(src: &[u8]) -> Option<(Self, usize)> {
let (n, s) = i64::decode_var(src)?;
// This check is required to ensure that we actually return `None` when `src` has a value that would overflow `Self`.
Expand All @@ -105,6 +110,7 @@ macro_rules! impl_varint {
}
}

#[allow(clippy::cast_lossless)]
fn encode_var(self, dst: &mut [u8]) -> usize {
(self as i64).encode_var(dst)
}
Expand All @@ -124,7 +130,7 @@ impl_varint!(i8, signed);

impl VarInt for bool {
fn required_space(self) -> usize {
(self as u64).required_space()
u64::from(self).required_space()
}

fn decode_var(src: &[u8]) -> Option<(Self, usize)> {
Expand All @@ -138,7 +144,7 @@ impl VarInt for bool {
}

fn encode_var(self, dst: &mut [u8]) -> usize {
(self as u64).encode_var(dst)
u64::from(self).encode_var(dst)
}
}

Expand All @@ -159,7 +165,7 @@ impl VarInt for u64 {
let mut success = false;
for b in src {
let msb_dropped = b & DROP_MSB;
result |= (msb_dropped as u64) << shift;
result |= u64::from(msb_dropped) << shift;
shift += 7;

if shift > (9 * 7) {
Expand Down
Loading

0 comments on commit c52d556

Please sign in to comment.