Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clippy fixes for 1.81.0. #2461

Merged
merged 2 commits into from
Sep 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions candle-examples/examples/musicgen/musicgen_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -284,11 +284,11 @@ impl MusicgenDecoder {
};
let embed_dim = cfg.vocab_size + 1;
let embed_tokens = (0..cfg.num_codebooks)
.map(|i| embedding(embed_dim, h, vb.pp(&format!("embed_tokens.{i}"))))
.map(|i| embedding(embed_dim, h, vb.pp(format!("embed_tokens.{i}"))))
.collect::<Result<Vec<_>>>()?;
let embed_positions = MusicgenSinusoidalPositionalEmbedding::load(vb.clone(), cfg)?;
let layers = (0..cfg.num_hidden_layers)
.map(|i| MusicgenDecoderLayer::load(vb.pp(&format!("layers.{i}")), cfg))
.map(|i| MusicgenDecoderLayer::load(vb.pp(format!("layers.{i}")), cfg))
.collect::<Result<Vec<_>>>()?;
let layer_norm = layer_norm(h, 1e-5, vb.pp("layer_norm"))?;
Ok(Self {
Expand Down Expand Up @@ -341,7 +341,7 @@ impl MusicgenForCausalLM {
let h = cfg.hidden_size;
let decoder = MusicgenDecoder::load(vb.pp("model.decoder"), cfg)?;
let lm_heads = (0..cfg.num_codebooks)
.map(|i| linear_no_bias(h, cfg.vocab_size, vb.pp(&format!("lm_heads.{i}"))))
.map(|i| linear_no_bias(h, cfg.vocab_size, vb.pp(format!("lm_heads.{i}"))))
.collect::<Result<Vec<_>>>()?;
Ok(Self {
decoder,
Expand Down
1 change: 0 additions & 1 deletion candle-examples/examples/silero-vad/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ use anyhow::Result;
use clap::Parser;

use candle::{DType, Tensor};
use candle_onnx;

#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum Which {
Expand Down
6 changes: 3 additions & 3 deletions candle-examples/examples/yolo-v3/darknet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ fn conv(vb: VarBuilder, index: usize, p: usize, b: &Block) -> Result<(usize, Bl)
let padding = if pad != 0 { (size - 1) / 2 } else { 0 };
let (bn, bias) = match b.parameters.get("batch_normalize") {
Some(p) if p.parse::<usize>()? != 0 => {
let bn = batch_norm(filters, 1e-5, vb.pp(&format!("batch_norm_{index}")))?;
let bn = batch_norm(filters, 1e-5, vb.pp(format!("batch_norm_{index}")))?;
(Some(bn), false)
}
Some(_) | None => (None, true),
Expand All @@ -135,9 +135,9 @@ fn conv(vb: VarBuilder, index: usize, p: usize, b: &Block) -> Result<(usize, Bl)
dilation: 1,
};
let conv = if bias {
conv2d(p, filters, size, conv_cfg, vb.pp(&format!("conv_{index}")))?
conv2d(p, filters, size, conv_cfg, vb.pp(format!("conv_{index}")))?
} else {
conv2d_no_bias(p, filters, size, conv_cfg, vb.pp(&format!("conv_{index}")))?
conv2d_no_bias(p, filters, size, conv_cfg, vb.pp(format!("conv_{index}")))?
};
let leaky = match activation {
"leaky" => true,
Expand Down
2 changes: 1 addition & 1 deletion candle-examples/examples/yolo-v8/model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ impl C2f {
let cv2 = ConvBlock::load(vb.pp("cv2"), (2 + n) * c, c2, 1, 1, None)?;
let mut bottleneck = Vec::with_capacity(n);
for idx in 0..n {
let b = Bottleneck::load(vb.pp(&format!("bottleneck.{idx}")), c, c, shortcut)?;
let b = Bottleneck::load(vb.pp(format!("bottleneck.{idx}")), c, c, shortcut)?;
bottleneck.push(b)
}
Ok(Self {
Expand Down
6 changes: 3 additions & 3 deletions candle-transformers/src/models/bert.rs
Original file line number Diff line number Diff line change
Expand Up @@ -419,7 +419,7 @@ struct BertEncoder {
impl BertEncoder {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let layers = (0..config.num_hidden_layers)
.map(|index| BertLayer::load(vb.pp(&format!("layer.{index}")), config))
.map(|index| BertLayer::load(vb.pp(format!("layer.{index}")), config))
.collect::<Result<Vec<_>>>()?;
let span = tracing::span!(tracing::Level::TRACE, "encoder");
Ok(BertEncoder { layers, span })
Expand Down Expand Up @@ -454,8 +454,8 @@ impl BertModel {
(Err(err), _) | (_, Err(err)) => {
if let Some(model_type) = &config.model_type {
if let (Ok(embeddings), Ok(encoder)) = (
BertEmbeddings::load(vb.pp(&format!("{model_type}.embeddings")), config),
BertEncoder::load(vb.pp(&format!("{model_type}.encoder")), config),
BertEmbeddings::load(vb.pp(format!("{model_type}.embeddings")), config),
BertEncoder::load(vb.pp(format!("{model_type}.encoder")), config),
) {
(embeddings, encoder)
} else {
Expand Down
2 changes: 1 addition & 1 deletion candle-transformers/src/models/bigcode.rs
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ impl GPTBigCode {
let wte = embedding(cfg.vocab_size, hidden_size, vb_t.pp("wte"))?;
let wpe = embedding(cfg.max_position_embeddings, hidden_size, vb_t.pp("wpe"))?;
let blocks = (0..cfg.num_hidden_layers)
.map(|i| Block::load(vb_t.pp(&format!("h.{i}")), &cfg))
.map(|i| Block::load(vb_t.pp(format!("h.{i}")), &cfg))
.collect::<Result<Vec<_>>>()?;
let ln_f = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb_t.pp("ln_f"))?;
let lm_head = linear(hidden_size, cfg.vocab_size, false, vb_t.pp("wte"))?;
Expand Down
6 changes: 3 additions & 3 deletions candle-transformers/src/models/distilbert.rs
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ struct Transformer {
impl Transformer {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let layers = (0..config.n_layers)
.map(|index| TransformerBlock::load(vb.pp(&format!("layer.{index}")), config))
.map(|index| TransformerBlock::load(vb.pp(format!("layer.{index}")), config))
.collect::<Result<Vec<_>>>()?;
let span = tracing::span!(tracing::Level::TRACE, "encoder");
Ok(Transformer { layers, span })
Expand Down Expand Up @@ -311,8 +311,8 @@ impl DistilBertModel {
(Err(err), _) | (_, Err(err)) => {
if let Some(model_type) = &config.model_type {
if let (Ok(embeddings), Ok(encoder)) = (
Embeddings::load(vb.pp(&format!("{model_type}.embeddings")), config),
Transformer::load(vb.pp(&format!("{model_type}.transformer")), config),
Embeddings::load(vb.pp(format!("{model_type}.embeddings")), config),
Transformer::load(vb.pp(format!("{model_type}.transformer")), config),
) {
(embeddings, encoder)
} else {
Expand Down
2 changes: 1 addition & 1 deletion candle-transformers/src/models/falcon.rs
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@ impl Falcon {
vb.pp("transformer.word_embeddings"),
)?;
let blocks = (0..cfg.num_hidden_layers)
.map(|i| FalconDecoderLayer::load(vb.pp(&format!("transformer.h.{i}")), &cfg))
.map(|i| FalconDecoderLayer::load(vb.pp(format!("transformer.h.{i}")), &cfg))
.collect::<Result<Vec<_>>>()?;
let ln_f = layer_norm(
cfg.hidden_size,
Expand Down
2 changes: 1 addition & 1 deletion candle-transformers/src/models/jina_bert.rs
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ impl BertEncoder {
candle::bail!("only alibi is supported as a position-embedding-type")
}
let layers = (0..cfg.num_hidden_layers)
.map(|index| BertLayer::new(vb.pp(&format!("layer.{index}")), cfg))
.map(|index| BertLayer::new(vb.pp(format!("layer.{index}")), cfg))
.collect::<Result<Vec<_>>>()?;
let span = tracing::span!(tracing::Level::TRACE, "encoder");
let alibi = build_alibi_bias(cfg)?.to_device(vb.device())?;
Expand Down
2 changes: 1 addition & 1 deletion candle-transformers/src/models/llama.rs
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,7 @@ impl Llama {
let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
let ln_f = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?;
let blocks: Vec<_> = (0..cfg.num_hidden_layers)
.map(|i| Block::load(vb.pp(&format!("model.layers.{i}")), cfg).unwrap())
.map(|i| Block::load(vb.pp(format!("model.layers.{i}")), cfg).unwrap())
.collect();

Ok(Self {
Expand Down
2 changes: 1 addition & 1 deletion candle-transformers/src/models/llama2_c.rs
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ impl Llama {
let lm_head = linear(cfg.dim, cfg.vocab_size, vb.pp("lm_head"))?;
let ln_f = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("model.norm"))?;
let blocks: Vec<_> = (0..cfg.n_layers)
.map(|i| Block::load(vb.pp(&format!("model.layers.{i}")), &cfg).unwrap())
.map(|i| Block::load(vb.pp(format!("model.layers.{i}")), &cfg).unwrap())
.collect();
Ok(Self {
wte,
Expand Down
2 changes: 1 addition & 1 deletion candle-transformers/src/models/moondream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ impl VisionTransformer {
let blocks = (0..cfg.num_blocks)
.map(|i| {
VitBlock::new(
vb.pp(&format!("blocks.{}", i)),
vb.pp(format!("blocks.{}", i)),
cfg.embed_dim,
cfg.num_heads,
cfg,
Expand Down
8 changes: 4 additions & 4 deletions candle-transformers/src/models/segformer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ impl SegformerEncoder {
stride,
num_channels,
hidden_size,
vb.pp(&format!("patch_embeddings.{}", i)),
vb.pp(format!("patch_embeddings.{}", i)),
)?);
let mut layers = Vec::with_capacity(config.depths[i]);
for j in 0..config.depths[i] {
Expand All @@ -417,14 +417,14 @@ impl SegformerEncoder {
num_attention_heads,
sequence_reduction_ratio,
mlp_ratio,
vb.pp(&format!("block.{}.{}", i, j)),
vb.pp(format!("block.{}.{}", i, j)),
)?);
}
blocks.push(layers);
layer_norms.push(layer_norm(
hidden_size,
config.layer_norm_eps,
vb.pp(&format!("layer_norm.{}", i)),
vb.pp(format!("layer_norm.{}", i)),
)?);
}
Ok(Self {
Expand Down Expand Up @@ -507,7 +507,7 @@ impl SegformerDecodeHead {
linear_c.push(SegformerMLP::new(
config,
hidden_size,
vb.pp(&format!("linear_c.{}", i)),
vb.pp(format!("linear_c.{}", i)),
)?);
}
let linear_fuse = conv2d_no_bias(
Expand Down
2 changes: 1 addition & 1 deletion candle-transformers/src/models/t5.rs
Original file line number Diff line number Diff line change
Expand Up @@ -659,7 +659,7 @@ struct T5Stack {
impl T5Stack {
fn load(decoder: bool, vb: VarBuilder, shared: &Arc<Embedding>, cfg: &Config) -> Result<Self> {
let block = (0..cfg.num_layers)
.map(|i| T5Block::load(i == 0, decoder, vb.pp(&format!("block.{i}")), cfg))
.map(|i| T5Block::load(i == 0, decoder, vb.pp(format!("block.{i}")), cfg))
.collect::<Result<Vec<_>>>()?;
let final_layer_norm = T5LayerNorm::load(
cfg.d_model,
Expand Down
4 changes: 2 additions & 2 deletions candle-transformers/src/models/whisper/model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ impl AudioEncoder {
let positional_embedding = sinusoids(n_ctx, n_state, vb.device())?;
let blocks = (0..cfg.encoder_layers)
.map(|i| {
ResidualAttentionBlock::load(n_state, n_head, false, vb.pp(&format!("layers.{i}")))
ResidualAttentionBlock::load(n_state, n_head, false, vb.pp(format!("layers.{i}")))
})
.collect::<Result<Vec<_>>>()?;
let ln_post = layer_norm(n_state, vb.pp("layer_norm"))?;
Expand Down Expand Up @@ -321,7 +321,7 @@ impl TextDecoder {
let positional_embedding = vb.get((n_ctx, n_state), "embed_positions.weight")?;
let blocks = (0..cfg.decoder_layers)
.map(|i| {
ResidualAttentionBlock::load(n_state, n_head, true, vb.pp(&format!("layers.{i}")))
ResidualAttentionBlock::load(n_state, n_head, true, vb.pp(format!("layers.{i}")))
})
.collect::<Result<Vec<_>>>()?;
let ln = layer_norm(n_state, vb.pp("layer_norm"))?;
Expand Down
2 changes: 1 addition & 1 deletion candle-wasm-examples/llama2-c/src/model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ impl Llama {
let lm_head = linear(cfg.dim, cfg.vocab_size, vb.pp("lm_head"))?;
let norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("model.norm"))?;
let blocks: Vec<_> = (0..cfg.n_layers)
.map(|i| Block::load(vb.pp(&format!("model.layers.{i}")), cache, cfg).unwrap())
.map(|i| Block::load(vb.pp(format!("model.layers.{i}")), cache, cfg).unwrap())
.collect();
Ok(Self::new(wte, blocks, norm, lm_head))
}
Expand Down
2 changes: 1 addition & 1 deletion candle-wasm-examples/yolo/src/model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ impl C2f {
let cv2 = ConvBlock::load(vb.pp("cv2"), (2 + n) * c, c2, 1, 1, None)?;
let mut bottleneck = Vec::with_capacity(n);
for idx in 0..n {
let b = Bottleneck::load(vb.pp(&format!("bottleneck.{idx}")), c, c, shortcut)?;
let b = Bottleneck::load(vb.pp(format!("bottleneck.{idx}")), c, c, shortcut)?;
bottleneck.push(b)
}
Ok(Self {
Expand Down
Loading