diff --git a/.cargo/config.toml b/.cargo/config.toml index bf125a3eda47..77736304c643 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -13,4 +13,5 @@ rustflags = [ "-Wclippy::print_stderr", "-Wclippy::implicit_clone", "-Aclippy::items_after_test_module", + "-Wunused_results", ] diff --git a/benchmarks/src/bin/nyc-taxi.rs b/benchmarks/src/bin/nyc-taxi.rs index 9bc6a5fd2e15..424bdcd7d635 100644 --- a/benchmarks/src/bin/nyc-taxi.rs +++ b/benchmarks/src/bin/nyc-taxi.rs @@ -114,7 +114,7 @@ async fn write_data( }; let now = Instant::now(); - db.insert(requests).await.unwrap(); + let _ = db.insert(requests).await.unwrap(); let elapsed = now.elapsed(); total_rpc_elapsed_ms += elapsed.as_millis(); progress_bar.inc(row_count as _); @@ -377,19 +377,16 @@ fn create_table_expr() -> CreateTableExpr { } fn query_set() -> HashMap { - let mut ret = HashMap::new(); - - ret.insert( - "count_all".to_string(), - format!("SELECT COUNT(*) FROM {TABLE_NAME};"), - ); - - ret.insert( - "fare_amt_by_passenger".to_string(), - format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count") - ); - - ret + HashMap::from([ + ( + "count_all".to_string(), + format!("SELECT COUNT(*) FROM {TABLE_NAME};"), + ), + ( + "fare_amt_by_passenger".to_string(), + format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count"), + ) + ]) } async fn do_write(args: &Args, db: &Database) { @@ -414,7 +411,8 @@ async fn do_write(args: &Args, db: &Database) { let db = db.clone(); let mpb = multi_progress_bar.clone(); let pb_style = progress_bar_style.clone(); - write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await }); + let _ = write_jobs + .spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await }); } } while write_jobs.join_next().await.is_some() { @@ -423,7 +421,8 @@ async fn do_write(args: &Args, db: &Database) { let db = db.clone(); let mpb = multi_progress_bar.clone(); let pb_style = progress_bar_style.clone(); - write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await }); + let _ = write_jobs + .spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await }); } } } diff --git a/src/catalog/src/helper.rs b/src/catalog/src/helper.rs index dfad2b76d63e..fc7e263b033c 100644 --- a/src/catalog/src/helper.rs +++ b/src/catalog/src/helper.rs @@ -392,6 +392,6 @@ mod tests { #[test] fn test_table_global_value_compatibility() { let s = r#"{"node_id":1,"regions_id_map":{"1":[0]},"table_info":{"ident":{"table_id":1098,"version":1},"name":"container_cpu_limit","desc":"Created on insertion","catalog_name":"greptime","schema_name":"dd","meta":{"schema":{"column_schemas":[{"name":"container_id","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"container_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"docker_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"host","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_name","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"image_tag","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"interval","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"runtime","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"short_image","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"type","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"dd_value","data_type":{"Float64":{}},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}},{"name":"ts","data_type":{"Timestamp":{"Millisecond":null}},"is_nullable":false,"is_time_index":true,"default_constraint":null,"metadata":{"greptime:time_index":"true"}},{"name":"git.repository_url","data_type":{"String":null},"is_nullable":true,"is_time_index":false,"default_constraint":null,"metadata":{}}],"timestamp_index":11,"version":1},"primary_key_indices":[0,1,2,3,4,5,6,7,8,9,12],"value_indices":[10,11],"engine":"mito","next_column_id":12,"region_numbers":[],"engine_options":{},"options":{},"created_on":"1970-01-01T00:00:00Z"},"table_type":"Base"}}"#; - TableGlobalValue::parse(s).unwrap(); + assert!(TableGlobalValue::parse(s).is_ok()); } } diff --git a/src/catalog/src/lib.rs b/src/catalog/src/lib.rs index 54f69b5d41c9..926674003f1e 100644 --- a/src/catalog/src/lib.rs +++ b/src/catalog/src/lib.rs @@ -180,7 +180,7 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>( table_name, ), })?; - manager + let _ = manager .register_table(RegisterTableRequest { catalog: catalog_name.clone(), schema: schema_name.clone(), diff --git a/src/catalog/src/local/manager.rs b/src/catalog/src/local/manager.rs index a53c1138fb76..188ea4d0d892 100644 --- a/src/catalog/src/local/manager.rs +++ b/src/catalog/src/local/manager.rs @@ -118,9 +118,10 @@ impl LocalCatalogManager { async fn init_system_catalog(&self) -> Result<()> { // register SystemCatalogTable - self.catalogs + let _ = self + .catalogs .register_catalog_sync(SYSTEM_CATALOG_NAME.to_string())?; - self.catalogs.register_schema_sync(RegisterSchemaRequest { + let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest { catalog: SYSTEM_CATALOG_NAME.to_string(), schema: INFORMATION_SCHEMA_NAME.to_string(), })?; @@ -131,12 +132,13 @@ impl LocalCatalogManager { table_id: SYSTEM_CATALOG_TABLE_ID, table: self.system.information_schema.system.clone(), }; - self.catalogs.register_table(register_table_req).await?; + let _ = self.catalogs.register_table(register_table_req).await?; // register default catalog and default schema - self.catalogs + let _ = self + .catalogs .register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())?; - self.catalogs.register_schema_sync(RegisterSchemaRequest { + let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest { catalog: DEFAULT_CATALOG_NAME.to_string(), schema: DEFAULT_SCHEMA_NAME.to_string(), })?; @@ -151,7 +153,8 @@ impl LocalCatalogManager { table: numbers_table, }; - self.catalogs + let _ = self + .catalogs .register_table(register_number_table_req) .await?; @@ -226,7 +229,8 @@ impl LocalCatalogManager { for entry in entries { match entry { Entry::Catalog(c) => { - self.catalogs + let _ = self + .catalogs .register_catalog_if_absent(c.catalog_name.clone()); info!("Register catalog: {}", c.catalog_name); } @@ -235,7 +239,7 @@ impl LocalCatalogManager { catalog: s.catalog_name.clone(), schema: s.schema_name.clone(), }; - self.catalogs.register_schema_sync(req)?; + let _ = self.catalogs.register_schema_sync(req)?; info!("Registered schema: {:?}", s); } Entry::Table(t) => { @@ -297,7 +301,7 @@ impl LocalCatalogManager { table_id: t.table_id, table: table_ref, }; - self.catalogs.register_table(register_request).await?; + let _ = self.catalogs.register_table(register_request).await?; Ok(()) } @@ -389,8 +393,9 @@ impl CatalogManager for LocalCatalogManager { let engine = request.table.table_info().meta.engine.to_string(); let table_name = request.table_name.clone(); let table_id = request.table_id; - self.catalogs.register_table(request).await?; - self.system + let _ = self.catalogs.register_table(request).await?; + let _ = self + .system .register_table( catalog_name.clone(), schema_name.clone(), @@ -438,7 +443,8 @@ impl CatalogManager for LocalCatalogManager { let engine = old_table.table_info().meta.engine.to_string(); // rename table in system catalog - self.system + let _ = self + .system .register_table( catalog_name.clone(), schema_name.clone(), @@ -499,7 +505,8 @@ impl CatalogManager for LocalCatalogManager { schema: schema_name, } ); - self.system + let _ = self + .system .register_schema(request.catalog.clone(), schema_name.clone()) .await?; self.catalogs.register_schema_sync(request) diff --git a/src/catalog/src/local/memory.rs b/src/catalog/src/local/memory.rs index 799d93a71f2b..a1334e06e86f 100644 --- a/src/catalog/src/local/memory.rs +++ b/src/catalog/src/local/memory.rs @@ -49,9 +49,8 @@ impl Default for MemoryCatalogManager { catalogs: Default::default(), }; - let mut catalog = HashMap::with_capacity(1); - catalog.insert(DEFAULT_SCHEMA_NAME.to_string(), HashMap::new()); - manager + let catalog = HashMap::from([(DEFAULT_SCHEMA_NAME.to_string(), HashMap::new())]); + let _ = manager .catalogs .write() .unwrap() @@ -115,7 +114,7 @@ impl CatalogManager for MemoryCatalogManager { } let table = schema.remove(&request.table_name).unwrap(); - schema.insert(request.new_table_name, table); + let _ = schema.insert(request.new_table_name, table); Ok(true) } @@ -144,9 +143,11 @@ impl CatalogManager for MemoryCatalogManager { } async fn register_schema(&self, request: RegisterSchemaRequest) -> Result { - self.register_schema_sync(request)?; - increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0); - Ok(true) + let registered = self.register_schema_sync(request)?; + if registered { + increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0); + } + Ok(registered) } async fn register_system_table(&self, _request: RegisterSystemTableRequest) -> Result<()> { @@ -234,9 +235,11 @@ impl CatalogManager for MemoryCatalogManager { } async fn register_catalog(&self, name: String) -> Result { - self.register_catalog_sync(name)?; - increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0); - Ok(true) + let registered = self.register_catalog_sync(name)?; + if registered { + increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0); + } + Ok(registered) } fn as_any(&self) -> &dyn Any { @@ -252,7 +255,7 @@ impl MemoryCatalogManager { match entry { Entry::Occupied(_) => true, Entry::Vacant(v) => { - v.insert(HashMap::new()); + let _ = v.insert(HashMap::new()); false } } @@ -273,7 +276,7 @@ impl MemoryCatalogManager { if catalog.contains_key(&request.schema) { return Ok(false); } - catalog.insert(request.schema, HashMap::new()); + let _ = catalog.insert(request.schema, HashMap::new()); Ok(true) } @@ -310,7 +313,7 @@ impl MemoryCatalogManager { table_id: table.table_info().ident.table_id, table, }; - manager.register_table_sync(request).unwrap(); + let _ = manager.register_table_sync(request).unwrap(); manager } } @@ -341,7 +344,7 @@ mod tests { table: Arc::new(NumbersTable::default()), }; - catalog_list.register_table(register_request).await.unwrap(); + assert!(catalog_list.register_table(register_request).await.is_ok()); let table = catalog_list .table( DEFAULT_CATALOG_NAME, @@ -390,7 +393,7 @@ mod tests { new_table_name: new_table_name.to_string(), table_id, }; - catalog.rename_table(rename_request).await.unwrap(); + assert!(catalog.rename_table(rename_request).await.is_ok()); // test old table name not exist assert!(!catalog @@ -492,7 +495,7 @@ mod tests { table_id: 2333, table: Arc::new(NumbersTable::default()), }; - catalog.register_table(register_table_req).await.unwrap(); + assert!(catalog.register_table(register_table_req).await.is_ok()); assert!(catalog .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name) .await diff --git a/src/catalog/src/remote/client.rs b/src/catalog/src/remote/client.rs index 6df2fcd2616d..66e470932333 100644 --- a/src/catalog/src/remote/client.rs +++ b/src/catalog/src/remote/client.rs @@ -240,7 +240,7 @@ impl KvBackend for MetaKvBackend { async fn move_value(&self, from_key: &[u8], to_key: &[u8]) -> Result<()> { let req = MoveValueRequest::new(from_key, to_key); - self.client.move_value(req).await.context(MetaSrvSnafu)?; + let _ = self.client.move_value(req).await.context(MetaSrvSnafu)?; Ok(()) } diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs index e6cc824f0811..127be693e8cf 100644 --- a/src/catalog/src/remote/manager.rs +++ b/src/catalog/src/remote/manager.rs @@ -112,7 +112,7 @@ impl RemoteCatalogManager { joins.push(self.initiate_schemas(node_id, backend, engine_manager, catalog_name)); } - futures::future::try_join_all(joins).await?; + let _ = futures::future::try_join_all(joins).await?; Ok(()) } @@ -623,13 +623,14 @@ impl CatalogManager for RemoteCatalogManager { self.check_catalog_schema_exist(&catalog_name, &schema_name) .await?; - self.register_table( - catalog_name.clone(), - schema_name.clone(), - request.table_name, - request.table.clone(), - ) - .await?; + let _ = self + .register_table( + catalog_name.clone(), + schema_name.clone(), + request.table_name, + request.table.clone(), + ) + .await?; let table_info = request.table.table_info(); let table_ident = TableIdent { @@ -680,7 +681,8 @@ impl CatalogManager for RemoteCatalogManager { table_id: table_info.ident.table_id, engine: table_info.meta.engine.clone(), }; - self.region_alive_keepers + let _ = self + .region_alive_keepers .deregister_table(&table_ident) .await; } @@ -846,7 +848,7 @@ impl CatalogManager for RemoteCatalogManager { let catalog_key = String::from_utf8_lossy(&catalog.0); if let Ok(key) = CatalogKey::parse(&catalog_key) { - catalogs.insert(key.catalog_name); + let _ = catalogs.insert(key.catalog_name); } } } @@ -865,7 +867,7 @@ impl CatalogManager for RemoteCatalogManager { let schema_key = String::from_utf8_lossy(&schema.0); if let Ok(key) = SchemaKey::parse(&schema_key) { - schemas.insert(key.schema_name); + let _ = schemas.insert(key.schema_name); } } } @@ -886,7 +888,7 @@ impl CatalogManager for RemoteCatalogManager { let table_key = String::from_utf8_lossy(&table.0); if let Ok(key) = TableRegionalKey::parse(&table_key) { - tables.insert(key.table_name); + let _ = tables.insert(key.table_name); } } } diff --git a/src/catalog/src/remote/mock.rs b/src/catalog/src/remote/mock.rs index c23e1fa757b6..248ee4a430d6 100644 --- a/src/catalog/src/remote/mock.rs +++ b/src/catalog/src/remote/mock.rs @@ -45,7 +45,6 @@ pub struct MockKvBackend { impl Default for MockKvBackend { fn default() -> Self { - let mut map = BTreeMap::default(); let catalog_value = CatalogValue {}.as_bytes().unwrap(); let schema_value = SchemaValue {}.as_bytes().unwrap(); @@ -60,11 +59,11 @@ impl Default for MockKvBackend { } .to_string(); - // create default catalog and schema - map.insert(default_catalog_key.into(), catalog_value); - map.insert(default_schema_key.into(), schema_value); - - let map = RwLock::new(map); + let map = RwLock::new(BTreeMap::from([ + // create default catalog and schema + (default_catalog_key.into(), catalog_value), + (default_schema_key.into(), schema_value), + ])); Self { map } } } @@ -109,7 +108,7 @@ impl KvBackend for MockKvBackend { async fn set(&self, key: &[u8], val: &[u8]) -> Result<(), Error> { let mut map = self.map.write().await; - map.insert(key.to_vec(), val.to_vec()); + let _ = map.insert(key.to_vec(), val.to_vec()); Ok(()) } @@ -124,7 +123,7 @@ impl KvBackend for MockKvBackend { match existing { Entry::Vacant(e) => { if expect.is_empty() { - e.insert(val.to_vec()); + let _ = e.insert(val.to_vec()); Ok(Ok(())) } else { Ok(Err(None)) @@ -132,7 +131,7 @@ impl KvBackend for MockKvBackend { } Entry::Occupied(mut existing) => { if existing.get() == expect { - existing.insert(val.to_vec()); + let _ = existing.insert(val.to_vec()); Ok(Ok(())) } else { Ok(Err(Some(existing.get().clone()))) @@ -201,7 +200,7 @@ impl TableEngine for MockTableEngine { )) as Arc<_>; let mut tables = self.tables.write().unwrap(); - tables.insert(table_id, table.clone() as TableRef); + let _ = tables.insert(table_id, table.clone() as TableRef); Ok(table) } diff --git a/src/catalog/src/remote/region_alive_keeper.rs b/src/catalog/src/remote/region_alive_keeper.rs index 130b7536fd6f..be372732fb34 100644 --- a/src/catalog/src/remote/region_alive_keeper.rs +++ b/src/catalog/src/remote/region_alive_keeper.rs @@ -92,7 +92,7 @@ impl RegionAliveKeepers { } let mut keepers = self.keepers.lock().await; - keepers.insert(table_ident.clone(), keeper.clone()); + let _ = keepers.insert(table_ident.clone(), keeper.clone()); if self.started.load(Ordering::Relaxed) { keeper.start().await; @@ -237,7 +237,7 @@ impl RegionAliveKeeper { let countdown_task_handles = Arc::downgrade(&self.countdown_task_handles); let on_task_finished = async move { if let Some(x) = countdown_task_handles.upgrade() { - x.lock().await.remove(®ion); + let _ = x.lock().await.remove(®ion); } // Else the countdown task handles map could be dropped because the keeper is dropped. }; let handle = Arc::new(CountdownTaskHandle::new( @@ -248,7 +248,7 @@ impl RegionAliveKeeper { )); let mut handles = self.countdown_task_handles.lock().await; - handles.insert(region, handle.clone()); + let _ = handles.insert(region, handle.clone()); if self.started.load(Ordering::Relaxed) { handle.start(self.heartbeat_interval_millis).await; @@ -772,7 +772,7 @@ mod test { }; let table_engine = Arc::new(MockTableEngine::default()); - table_engine.create_table(ctx, request).await.unwrap(); + assert!(table_engine.create_table(ctx, request).await.is_ok()); let table_ident = TableIdent { catalog: catalog.to_string(), @@ -788,7 +788,7 @@ mod test { region: 1, rx, }; - common_runtime::spawn_bg(async move { + let _handle = common_runtime::spawn_bg(async move { task.run().await; }); diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs index 854602b54fff..3eaa2baddbd4 100644 --- a/src/catalog/src/system.rs +++ b/src/catalog/src/system.rs @@ -228,21 +228,21 @@ pub(crate) fn build_table_deletion_request( } fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap { - let mut m = HashMap::with_capacity(3); - m.insert( - "entry_type".to_string(), - Arc::new(UInt8Vector::from_slice([entry_type as u8])) as _, - ); - m.insert( - "key".to_string(), - Arc::new(BinaryVector::from_slice(&[key])) as _, - ); - // Timestamp in key part is intentionally left to 0 - m.insert( - "timestamp".to_string(), - Arc::new(TimestampMillisecondVector::from_slice([0])) as _, - ); - m + HashMap::from([ + ( + "entry_type".to_string(), + Arc::new(UInt8Vector::from_slice([entry_type as u8])) as VectorRef, + ), + ( + "key".to_string(), + Arc::new(BinaryVector::from_slice(&[key])) as VectorRef, + ), + ( + "timestamp".to_string(), + // Timestamp in key part is intentionally left to 0 + Arc::new(TimestampMillisecondVector::from_slice([0])) as VectorRef, + ), + ]) } pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest { @@ -262,18 +262,18 @@ pub fn build_insert_request(entry_type: EntryType, key: &[u8], value: &[u8]) -> let mut columns_values = HashMap::with_capacity(6); columns_values.extend(primary_key_columns.into_iter()); - columns_values.insert( + let _ = columns_values.insert( "value".to_string(), Arc::new(BinaryVector::from_slice(&[value])) as _, ); let now = util::current_time_millis(); - columns_values.insert( + let _ = columns_values.insert( "gmt_created".to_string(), Arc::new(TimestampMillisecondVector::from_slice([now])) as _, ); - columns_values.insert( + let _ = columns_values.insert( "gmt_modified".to_string(), Arc::new(TimestampMillisecondVector::from_slice([now])) as _, ); @@ -482,14 +482,13 @@ mod tests { } #[test] - #[should_panic] pub fn test_decode_mismatch() { - decode_system_catalog( + assert!(decode_system_catalog( Some(EntryType::Table as u8), Some("some_catalog.some_schema.42".as_bytes()), None, ) - .unwrap(); + .is_err()); } #[test] @@ -504,7 +503,7 @@ mod tests { let dir = create_temp_dir("system-table-test"); let store_dir = dir.path().to_string_lossy(); let mut builder = object_store::services::Fs::default(); - builder.root(&store_dir); + let _ = builder.root(&store_dir); let object_store = ObjectStore::new(builder).unwrap().finish(); let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default()); let table_engine = Arc::new(MitoEngine::new( diff --git a/src/catalog/src/table_source.rs b/src/catalog/src/table_source.rs index d5b3423c8126..fc6882015ce5 100644 --- a/src/catalog/src/table_source.rs +++ b/src/catalog/src/table_source.rs @@ -111,7 +111,7 @@ impl DfTableSourceProvider { let provider = DfTableProviderAdapter::new(table); let source = provider_as_source(Arc::new(provider)); - self.resolved_tables.insert(resolved_name, source.clone()); + let _ = self.resolved_tables.insert(resolved_name, source.clone()); Ok(source) } } diff --git a/src/catalog/tests/remote_catalog_tests.rs b/src/catalog/tests/remote_catalog_tests.rs index 983b18ebd7fd..cafecf81c352 100644 --- a/src/catalog/tests/remote_catalog_tests.rs +++ b/src/catalog/tests/remote_catalog_tests.rs @@ -82,7 +82,7 @@ mod tests { let mut res = HashSet::new(); while let Some(r) = iter.next().await { let kv = r.unwrap(); - res.insert(String::from_utf8_lossy(&kv.0).to_string()); + let _ = res.insert(String::from_utf8_lossy(&kv.0).to_string()); } assert_eq!( vec!["__c-greptime".to_string()], @@ -305,11 +305,11 @@ mod tests { let schema_name = "nonexistent_schema".to_string(); // register catalog to catalog manager - components + assert!(components .catalog_manager .register_catalog(catalog_name.clone()) .await - .unwrap(); + .is_ok()); assert_eq!( HashSet::::from_iter( vec![DEFAULT_CATALOG_NAME.to_string(), catalog_name.clone()].into_iter() diff --git a/src/client/src/client.rs b/src/client/src/client.rs index 30862a1f53ac..f5a686cc02b2 100644 --- a/src/client/src/client.rs +++ b/src/client/src/client.rs @@ -165,7 +165,7 @@ impl Client { pub async fn health_check(&self) -> Result<()> { let (_, channel) = self.find_channel()?; let mut client = HealthCheckClient::new(channel); - client.health_check(HealthCheckRequest {}).await?; + let _ = client.health_check(HealthCheckRequest {}).await?; Ok(()) } } diff --git a/src/client/src/database.rs b/src/client/src/database.rs index 524c406abbf7..b3688fb9d1c5 100644 --- a/src/client/src/database.rs +++ b/src/client/src/database.rs @@ -173,7 +173,7 @@ impl Database { let mut client = self.client.make_database_client()?.inner; let (sender, receiver) = mpsc::channel::(65536); let receiver = ReceiverStream::new(receiver); - client.handle_requests(receiver).await?; + let _ = client.handle_requests(receiver).await?; Ok(sender) } diff --git a/src/client/src/load_balance.rs b/src/client/src/load_balance.rs index d2837883715a..3543db5a1950 100644 --- a/src/client/src/load_balance.rs +++ b/src/client/src/load_balance.rs @@ -60,7 +60,7 @@ mod tests { let random = Random; for _ in 0..100 { let peer = random.get_peer(&peers).unwrap(); - all.contains(peer); + assert!(all.contains(peer)); } } } diff --git a/src/cmd/src/cli/repl.rs b/src/cmd/src/cli/repl.rs index 56cea0dd64e5..e5c4cb5911ea 100644 --- a/src/cmd/src/cli/repl.rs +++ b/src/cmd/src/cli/repl.rs @@ -108,7 +108,7 @@ impl Repl { Ok(ref line) => { let request = line.trim(); - self.rl.add_history_entry(request.to_string()); + let _ = self.rl.add_history_entry(request.to_string()); request.try_into() } @@ -137,7 +137,7 @@ impl Repl { } } ReplCommand::Sql { sql } => { - self.execute_sql(sql).await; + let _ = self.execute_sql(sql).await; } ReplCommand::Exit => { return Ok(()); diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs index b3518d9627c8..bd0442357808 100644 --- a/src/cmd/src/datanode.rs +++ b/src/cmd/src/datanode.rs @@ -326,12 +326,12 @@ mod tests { .is_err()); // Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value - (StartCommand { + assert!((StartCommand { node_id: Some(42), ..Default::default() }) .load_options(TopLevelOptions::default()) - .unwrap(); + .is_ok()); } #[test] diff --git a/src/cmd/tests/cli.rs b/src/cmd/tests/cli.rs index 07ad1123cbc0..0176846f02cd 100644 --- a/src/cmd/tests/cli.rs +++ b/src/cmd/tests/cli.rs @@ -27,10 +27,10 @@ mod tests { impl Repl { fn send_line(&mut self, line: &str) { - self.repl.send_line(line).unwrap(); + assert!(self.repl.send_line(line).is_ok()); // read a line to consume the prompt - self.read_line(); + let _ = self.read_line(); } fn read_line(&mut self) -> String { @@ -76,7 +76,7 @@ mod tests { std::thread::sleep(Duration::from_secs(3)); let mut repl_cmd = Command::new("./greptime"); - repl_cmd.current_dir(bin_path).args([ + let _ = repl_cmd.current_dir(bin_path).args([ "--log-level=off", "cli", "attach", @@ -105,7 +105,7 @@ mod tests { test_select(repl); datanode.kill().unwrap(); - datanode.wait().unwrap(); + assert!(datanode.wait().is_ok()); } fn test_create_database(repl: &mut Repl) { diff --git a/src/common/base/src/lib.rs b/src/common/base/src/lib.rs index c76e6b881bb8..5552a2ebf9e6 100644 --- a/src/common/base/src/lib.rs +++ b/src/common/base/src/lib.rs @@ -41,7 +41,7 @@ impl Plugins { } pub fn insert(&self, value: T) { - self.lock().insert(value); + let _ = self.lock().insert(value); } pub fn get(&self) -> Option { diff --git a/src/common/datasource/src/file_format.rs b/src/common/datasource/src/file_format.rs index 6cd0c7861516..b74e2b155836 100644 --- a/src/common/datasource/src/file_format.rs +++ b/src/common/datasource/src/file_format.rs @@ -213,7 +213,7 @@ pub async fn stream_to_file T>( } // Flushes all pending writes - writer.try_flush(true).await?; + let _ = writer.try_flush(true).await?; writer.close_inner_writer().await?; Ok(rows) diff --git a/src/common/datasource/src/file_format/csv.rs b/src/common/datasource/src/file_format/csv.rs index b723ce9ddc1b..dfd2f3199af8 100644 --- a/src/common/datasource/src/file_format/csv.rs +++ b/src/common/datasource/src/file_format/csv.rs @@ -291,20 +291,20 @@ mod tests { #[test] fn test_try_from() { - let mut map = HashMap::new(); + let map = HashMap::new(); let format: CsvFormat = CsvFormat::try_from(&map).unwrap(); assert_eq!(format, CsvFormat::default()); - map.insert( - FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(), - "2000".to_string(), - ); - - map.insert(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string()); - map.insert(FORMAT_DELIMITER.to_string(), b'\t'.to_string()); - map.insert(FORMAT_HAS_HEADER.to_string(), "false".to_string()); - + let map = HashMap::from([ + ( + FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(), + "2000".to_string(), + ), + (FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string()), + (FORMAT_DELIMITER.to_string(), b'\t'.to_string()), + (FORMAT_HAS_HEADER.to_string(), "false".to_string()), + ]); let format = CsvFormat::try_from(&map).unwrap(); assert_eq!( diff --git a/src/common/datasource/src/file_format/json.rs b/src/common/datasource/src/file_format/json.rs index b9cf6e31a93a..9a13cc1cf1bb 100644 --- a/src/common/datasource/src/file_format/json.rs +++ b/src/common/datasource/src/file_format/json.rs @@ -214,18 +214,18 @@ mod tests { #[test] fn test_try_from() { - let mut map = HashMap::new(); + let map = HashMap::new(); let format = JsonFormat::try_from(&map).unwrap(); assert_eq!(format, JsonFormat::default()); - map.insert( - FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(), - "2000".to_string(), - ); - - map.insert(FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string()); - + let map = HashMap::from([ + ( + FORMAT_SCHEMA_INFER_MAX_RECORD.to_string(), + "2000".to_string(), + ), + (FORMAT_COMPRESSION_TYPE.to_string(), "zstd".to_string()), + ]); let format = JsonFormat::try_from(&map).unwrap(); assert_eq!( diff --git a/src/common/datasource/src/object_store/fs.rs b/src/common/datasource/src/object_store/fs.rs index 78a481b2948b..7f43c50591dc 100644 --- a/src/common/datasource/src/object_store/fs.rs +++ b/src/common/datasource/src/object_store/fs.rs @@ -20,7 +20,7 @@ use crate::error::{BuildBackendSnafu, Result}; pub fn build_fs_backend(root: &str) -> Result { let mut builder = Fs::default(); - builder.root(root); + let _ = builder.root(root); let object_store = ObjectStore::new(builder) .context(BuildBackendSnafu)? .finish(); diff --git a/src/common/datasource/src/object_store/s3.rs b/src/common/datasource/src/object_store/s3.rs index 0ebd80411b21..f1c39dbe05a7 100644 --- a/src/common/datasource/src/object_store/s3.rs +++ b/src/common/datasource/src/object_store/s3.rs @@ -34,28 +34,26 @@ pub fn build_s3_backend( ) -> Result { let mut builder = S3::default(); - builder.root(path); - - builder.bucket(host); + let _ = builder.root(path).bucket(host); if let Some(endpoint) = connection.get(ENDPOINT_URL) { - builder.endpoint(endpoint); + let _ = builder.endpoint(endpoint); } if let Some(region) = connection.get(REGION) { - builder.region(region); + let _ = builder.region(region); } if let Some(key_id) = connection.get(ACCESS_KEY_ID) { - builder.access_key_id(key_id); + let _ = builder.access_key_id(key_id); } if let Some(key) = connection.get(SECRET_ACCESS_KEY) { - builder.secret_access_key(key); + let _ = builder.secret_access_key(key); } if let Some(session_token) = connection.get(SESSION_TOKEN) { - builder.security_token(session_token); + let _ = builder.security_token(session_token); } if let Some(enable_str) = connection.get(ENABLE_VIRTUAL_HOST_STYLE) { @@ -69,7 +67,7 @@ pub fn build_s3_backend( .build() })?; if enable { - builder.enable_virtual_host_style(); + let _ = builder.enable_virtual_host_style(); } } diff --git a/src/common/datasource/src/test_util.rs b/src/common/datasource/src/test_util.rs index ab04017f1644..0117f54087a5 100644 --- a/src/common/datasource/src/test_util.rs +++ b/src/common/datasource/src/test_util.rs @@ -55,7 +55,7 @@ pub fn format_schema(schema: Schema) -> Vec { pub fn test_store(root: &str) -> ObjectStore { let mut builder = Fs::default(); - builder.root(root); + let _ = builder.root(root); ObjectStore::new(builder).unwrap().finish() } @@ -64,7 +64,7 @@ pub fn test_tmp_store(root: &str) -> (ObjectStore, TempDir) { let dir = create_temp_dir(root); let mut builder = Fs::default(); - builder.root("/"); + let _ = builder.root("/"); (ObjectStore::new(builder).unwrap().finish(), dir) } @@ -113,14 +113,14 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi let output_path = format!("{}/{}", dir.path().display(), "output"); - stream_to_json( + assert!(stream_to_json( Box::pin(stream), tmp_store.clone(), &output_path, threshold(size), ) .await - .unwrap(); + .is_ok()); let written = tmp_store.read(&output_path).await.unwrap(); let origin = store.read(origin_path).await.unwrap(); @@ -155,14 +155,14 @@ pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usiz let output_path = format!("{}/{}", dir.path().display(), "output"); - stream_to_csv( + assert!(stream_to_csv( Box::pin(stream), tmp_store.clone(), &output_path, threshold(size), ) .await - .unwrap(); + .is_ok()); let written = tmp_store.read(&output_path).await.unwrap(); let origin = store.read(origin_path).await.unwrap(); diff --git a/src/common/function-macro/tests/test_derive.rs b/src/common/function-macro/tests/test_derive.rs index 253a3ae3e41d..db2b469e9b36 100644 --- a/src/common/function-macro/tests/test_derive.rs +++ b/src/common/function-macro/tests/test_derive.rs @@ -22,7 +22,7 @@ struct Foo {} #[test] #[allow(clippy::extra_unused_type_parameters)] fn test_derive() { - Foo::default(); + let _ = Foo::default(); assert_fields!(Foo: input_types); assert_impl_all!(Foo: std::fmt::Debug, Default, AggrFuncTypeStore); } diff --git a/src/common/function/src/scalars/function_registry.rs b/src/common/function/src/scalars/function_registry.rs index d25341c7ab23..0e8c1c4b1943 100644 --- a/src/common/function/src/scalars/function_registry.rs +++ b/src/common/function/src/scalars/function_registry.rs @@ -32,14 +32,16 @@ pub struct FunctionRegistry { impl FunctionRegistry { pub fn register(&self, func: FunctionRef) { - self.functions + let _ = self + .functions .write() .unwrap() .insert(func.name().to_string(), func); } pub fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) { - self.aggregate_functions + let _ = self + .aggregate_functions .write() .unwrap() .insert(func.name(), func); diff --git a/src/common/grpc-expr/src/insert.rs b/src/common/grpc-expr/src/insert.rs index c98d3a2182e9..d96492185674 100644 --- a/src/common/grpc-expr/src/insert.rs +++ b/src/common/grpc-expr/src/insert.rs @@ -77,7 +77,7 @@ pub fn find_new_columns(schema: &SchemaRef, columns: &[Column]) -> Result