From 6454b8da5441dfd60225a1930a50f8ae8ca556c8 Mon Sep 17 00:00:00 2001 From: HeyyyyyyG <49757268+HeyyyyyyG@users.noreply.github.com> Date: Thu, 22 Feb 2024 20:17:34 -0800 Subject: [PATCH 01/12] Jiaqiz/option to disable adapters & merge all lora layers (#8029) * Added LoRA support for the Dense layer of Attention * Added LoRA MLP support to MCore and NeMo models. * Change LoRA config default to QKV. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed bug with ddp training. * use adapter only when it is enabled Signed-off-by: jiaqi zeng * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix lora merge script (#8113) Signed-off-by: Chen Cui Co-authored-by: Adi Renduchintala * add peft ckpt to nemo Signed-off-by: Jiaqi Zeng * merge lora weights for all layers, mcore only Signed-off-by: Jiaqi Zeng * support/fix cpu initialization Signed-off-by: Chen Cui * add example usage Signed-off-by: Chen Cui * fix TP due to distributed checkpoint Signed-off-by: Chen Cui * updating the logic of merging lora weights for all layers, mcore only Signed-off-by: Jiaqi Zeng * MCoreMixin chages. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * merge in fp32 then cast back Signed-off-by: Jiaqi Zeng * remove ckpt to nemo Signed-off-by: Jiaqi Zeng * fix import Signed-off-by: Jiaqi Zeng --------- Signed-off-by: jiaqi zeng Signed-off-by: Chen Cui Signed-off-by: Jiaqi Zeng Co-authored-by: Tugrul Konuk Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Adi Renduchintala Co-authored-by: Chen Cui --- .../common/megatron/adapters/mcore_mixins.py | 20 +++--- .../nlp/modules/common/megatron/attention.py | 12 ++-- .../modules/common/megatron/language_model.py | 4 +- .../nlp/modules/common/megatron/mlp.py | 4 +- .../megatron/token_level_encoder_decoder.py | 2 +- .../modules/common/megatron/transformer.py | 4 +- .../merge_lora_weights/merge.py | 71 +++++++++++++++---- 7 files changed, 82 insertions(+), 35 deletions(-) diff --git a/nemo/collections/nlp/modules/common/megatron/adapters/mcore_mixins.py b/nemo/collections/nlp/modules/common/megatron/adapters/mcore_mixins.py index 3d355255850ac..096f1de37cfe5 100644 --- a/nemo/collections/nlp/modules/common/megatron/adapters/mcore_mixins.py +++ b/nemo/collections/nlp/modules/common/megatron/adapters/mcore_mixins.py @@ -93,7 +93,7 @@ def get_query_key_value_tensors(self, hidden_states, key_value_states=None): # LoRA logic if self.is_adapter_available(): lora_kqv_adapter = self.get_adapter_module(AdapterName.LORA_KQV_ADAPTER) - if lora_kqv_adapter: + if lora_kqv_adapter and self.adapter_cfg[AdapterName.LORA_KQV_ADAPTER]['enabled']: if isinstance(self.linear_qkv, TELayerNormColumnParallelLinear): lora_mixed_qkv = lora_kqv_adapter(layernorm_output) elif isinstance(self.linear_qkv, TEColumnParallelLinear): @@ -138,11 +138,11 @@ def get_query_key_value_tensors(self, hidden_states, key_value_states=None): if self.is_adapter_available(): key_infused_adapter = self.get_adapter_module(AdapterName.KEY_INFUSED) value_infused_adapter = self.get_adapter_module(AdapterName.VALUE_INFUSED) - if key_infused_adapter: + if key_infused_adapter and self.adapter_cfg[AdapterName.KEY_INFUSED]['enabled']: assert value_infused_adapter is not None, "Expected value_infused_adapter not found!" kls = key.shape key = key_infused_adapter(key.reshape(kls[0], kls[1], -1)).reshape(kls).to(query.dtype) - if value_infused_adapter: + if value_infused_adapter and self.adapter_cfg[AdapterName.VALUE_INFUSED]['enabled']: assert key_infused_adapter is not None, "Expected key_infused_adapter not found!" vls = value.shape value = value_infused_adapter(value.reshape(vls[0], vls[1], -1)).reshape(vls).to(query.dtype) @@ -229,7 +229,7 @@ def forward( # LoRA logic if self.is_adapter_available(): lora_linear_proj_adapter = self.get_adapter_module(AdapterName.LORA_DENSE_ATTENTION_ADAPTER) - if lora_linear_proj_adapter: + if lora_linear_proj_adapter and self.adapter_cfg[AdapterName.LORA_DENSE_ATTENTION_ADAPTER]['enabled']: lora_output = lora_linear_proj_adapter(core_attn_out) output = output + lora_output @@ -252,7 +252,7 @@ def forward(self, hidden_states): # LoRA logic if self.is_adapter_available(): lora_linear_fc1_adapter = self.get_adapter_module(AdapterName.LORA_Hto4H_ADAPTER) - if lora_linear_fc1_adapter: + if lora_linear_fc1_adapter and self.adapter_cfg[AdapterName.LORA_Hto4H_ADAPTER]['enabled']: lora_output = lora_linear_fc1_adapter(hidden_states) intermediate_parallel = intermediate_parallel + lora_output @@ -283,7 +283,7 @@ def glu(x): # LoRA logic if self.is_adapter_available(): lora_linear_fc2_adapter = self.get_adapter_module(AdapterName.LORA_4HtoH_ADAPTER) - if lora_linear_fc2_adapter: + if lora_linear_fc2_adapter and self.adapter_cfg[AdapterName.LORA_4HtoH_ADAPTER]['enabled']: lora_output = lora_linear_fc2_adapter(intermediate_parallel) output = output + lora_output return output, output_bias @@ -303,7 +303,9 @@ def forward(self, input_ids, position_ids): _sq, _bs, _hs = encoder_input.size() ptuning_adapter = self.get_adapter_module(AdapterName.PTUNING_ADAPTER) v = ptuning_adapter.virtual_tokens - if ptuning_adapter and _sq >= v: # The sequence should be longer the v to insert virtual embeddings. + if ( + ptuning_adapter and self.adapter_cfg[AdapterName.PTUNING_ADAPTER]['enabled'] and _sq >= v + ): # The sequence should be longer the v to insert virtual embeddings. virtual_embeddings = ptuning_adapter(_bs) encoder_input = encoder_input[ v:, :, : @@ -349,7 +351,7 @@ def forward( # adapter logic if self.is_adapter_available(): adapter_1 = self.get_adapter_module(AdapterName.PRE_ATTN_ADAPTER) - if adapter_1: + if adapter_1 and self.adapter_cfg[AdapterName.PRE_ATTN_ADAPTER]['enabled']: attention_output, bias = attention_output_with_bias attention_output = ( adapter_1(attention_output) + attention_output @@ -399,7 +401,7 @@ def forward( # adapter logic if self.is_adapter_available(): adapter_2 = self.get_adapter_module(AdapterName.POST_ATTN_ADAPTER) - if adapter_2: + if adapter_2 and self.adapter_cfg[AdapterName.POST_ATTN_ADAPTER]['enabled']: mlp_output, bias = mlp_output_with_bias mlp_output = adapter_2(mlp_output) + mlp_output # simple adapter call with residual connection mlp_output_with_bias = (mlp_output, bias) diff --git a/nemo/collections/nlp/modules/common/megatron/attention.py b/nemo/collections/nlp/modules/common/megatron/attention.py index 64e62fb819378..213a818b520c0 100644 --- a/nemo/collections/nlp/modules/common/megatron/attention.py +++ b/nemo/collections/nlp/modules/common/megatron/attention.py @@ -415,7 +415,7 @@ def forward( mixed_x_layer, _ = self.query_key_value(hidden_states) if self.is_adapter_available(): lora_kqv_adapter = self.get_adapter_module(AdapterName.LORA_KQV_ADAPTER) - if lora_kqv_adapter: + if lora_kqv_adapter and self.adapter_cfg[AdapterName.LORA_KQV_ADAPTER]['enabled']: lora_mixed_x_layer = lora_kqv_adapter(hidden_states) mixed_x_layer = mixed_x_layer + lora_mixed_x_layer @@ -437,7 +437,7 @@ def forward( mixed_kv_layer, _ = self.key_value(encoder_output) if self.is_adapter_available(): lora_kv_adapter = self.get_adapter_module(AdapterName.LORA_KV_ADAPTER) - if lora_kv_adapter: + if lora_kv_adapter and self.adapter_cfg[AdapterName.LORA_KV_ADAPTER]['enabled']: lora_mixed_kv_layer = lora_kv_adapter(encoder_output) mixed_kv_layer = mixed_kv_layer + lora_mixed_kv_layer @@ -459,7 +459,7 @@ def forward( query_layer, _ = self.query(hidden_states) if self.is_adapter_available(): lora_q_adapter = self.get_adapter_module(AdapterName.LORA_Q_ADAPTER) - if lora_q_adapter: + if lora_q_adapter and self.adapter_cfg[AdapterName.LORA_Q_ADAPTER]['enabled']: lora_q_layer = lora_q_adapter(hidden_states) query_layer = query_layer + lora_q_layer # [sq, b, hp] --> [sq, b, np, hn] @@ -472,11 +472,11 @@ def forward( if self.is_adapter_available(): key_infused_adapter = self.get_adapter_module(AdapterName.KEY_INFUSED) value_infused_adapter = self.get_adapter_module(AdapterName.VALUE_INFUSED) - if key_infused_adapter: + if key_infused_adapter and self.adapter_cfg[AdapterName.KEY_INFUSED]['enabled']: assert value_infused_adapter is not None, "Expected value_infused_adapter not found!" kls = key_layer.shape key_layer = key_infused_adapter(key_layer.reshape(kls[0], kls[1], -1)).reshape(kls) - if value_infused_adapter: + if value_infused_adapter and self.adapter_cfg[AdapterName.VALUE_INFUSED]['enabled']: assert key_infused_adapter is not None, "Expected key_infused_adapter not found!" vls = value_layer.shape value_layer = value_infused_adapter(value_layer.reshape(vls[0], vls[1], -1)).reshape(vls) @@ -574,7 +574,7 @@ def forward( output, bias = self.dense(context_layer) if self.is_adapter_available(): lora_dense_adapter = self.get_adapter_module(AdapterName.LORA_DENSE_ATTENTION_ADAPTER) - if lora_dense_adapter: + if lora_dense_adapter and self.adapter_cfg[AdapterName.LORA_DENSE_ATTENTION_ADAPTER]['enabled']: lora_dense_output = lora_dense_adapter(context_layer) output = output + lora_dense_output diff --git a/nemo/collections/nlp/modules/common/megatron/language_model.py b/nemo/collections/nlp/modules/common/megatron/language_model.py index bbeeade2d8c5e..99528ffd164f8 100755 --- a/nemo/collections/nlp/modules/common/megatron/language_model.py +++ b/nemo/collections/nlp/modules/common/megatron/language_model.py @@ -764,7 +764,9 @@ def forward( _sq, _bs, _hs = encoder_input.size() ptuning_adapter = self.get_adapter_module(AdapterName.PTUNING_ADAPTER) v = ptuning_adapter.virtual_tokens - if ptuning_adapter and _sq >= v: # The sequence should be longer the v to insert virtual embeddings. + if ( + ptuning_adapter and self.adapter_cfg[AdapterName.PTUNING_ADAPTER]['enabled'] and _sq >= v + ): # The sequence should be longer the v to insert virtual embeddings. virtual_embeddings = ptuning_adapter(_bs) encoder_input = encoder_input[ v:, :, : diff --git a/nemo/collections/nlp/modules/common/megatron/mlp.py b/nemo/collections/nlp/modules/common/megatron/mlp.py index aae86c54c1c44..6c20cb6669759 100644 --- a/nemo/collections/nlp/modules/common/megatron/mlp.py +++ b/nemo/collections/nlp/modules/common/megatron/mlp.py @@ -222,7 +222,7 @@ def forward(self, hidden_states): intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states) if self.is_adapter_available(): lora_dense_h_to_4h_adapter = self.get_adapter_module(AdapterName.LORA_Hto4H_ADAPTER) - if lora_dense_h_to_4h_adapter: + if lora_dense_h_to_4h_adapter and self.adapter_cfg[AdapterName.LORA_Hto4H_ADAPTER]['enabled']: lora_intermediate_parallel = lora_dense_h_to_4h_adapter(hidden_states) intermediate_parallel = intermediate_parallel + lora_intermediate_parallel @@ -270,7 +270,7 @@ def forward(self, hidden_states): output, output_bias = self.dense_4h_to_h(intermediate_parallel) if self.is_adapter_available(): lora_dense_4h_to_h_adapter = self.get_adapter_module(AdapterName.LORA_4HtoH_ADAPTER) - if lora_dense_4h_to_h_adapter: + if lora_dense_4h_to_h_adapter and self.adapter_cfg[AdapterName.LORA_4HtoH_ADAPTER]['enabled']: lora_output = lora_dense_4h_to_h_adapter(intermediate_parallel) output = output + lora_output return output, output_bias diff --git a/nemo/collections/nlp/modules/common/megatron/token_level_encoder_decoder.py b/nemo/collections/nlp/modules/common/megatron/token_level_encoder_decoder.py index b5d1289df6c59..b7b377940eb42 100644 --- a/nemo/collections/nlp/modules/common/megatron/token_level_encoder_decoder.py +++ b/nemo/collections/nlp/modules/common/megatron/token_level_encoder_decoder.py @@ -551,7 +551,7 @@ def forward( ptuning_adapter = self.get_adapter_module(AdapterName.PTUNING_ADAPTER) v = ptuning_adapter.virtual_tokens if ( - ptuning_adapter and _sq >= v + ptuning_adapter and self.adapter_cfg[AdapterName.PTUNING_ADAPTER]['enabled'] and _sq >= v ): # The sequence should be longer the v to insert virtual embeddings. virtual_embeddings = ptuning_adapter(_bs) enc_input = enc_input[ diff --git a/nemo/collections/nlp/modules/common/megatron/transformer.py b/nemo/collections/nlp/modules/common/megatron/transformer.py index 9bbe863d34ff1..f115b645666bf 100644 --- a/nemo/collections/nlp/modules/common/megatron/transformer.py +++ b/nemo/collections/nlp/modules/common/megatron/transformer.py @@ -538,7 +538,7 @@ def forward( if self.is_adapter_available(): adapter_1 = self.get_adapter_module(AdapterName.PRE_ATTN_ADAPTER) - if adapter_1: + if adapter_1 and self.adapter_cfg[AdapterName.PRE_ATTN_ADAPTER]['enabled']: attention_output = ( adapter_1(attention_output) + attention_output ) # simple adapter call with residual connection @@ -615,7 +615,7 @@ def forward( if self.is_adapter_available(): # TODO: (@adithyre) was able to move adapter_2 back to the end of the transformer after ptl 1.7 update. adapter_2 = self.get_adapter_module(AdapterName.POST_ATTN_ADAPTER) - if adapter_2: + if adapter_2 and self.adapter_cfg[AdapterName.POST_ATTN_ADAPTER]['enabled']: mlp_output = adapter_2(mlp_output) + mlp_output # simple adapter call with residual connection residual = layernorm_input diff --git a/scripts/nlp_language_modeling/merge_lora_weights/merge.py b/scripts/nlp_language_modeling/merge_lora_weights/merge.py index ce24186c0b213..a9317bd9d8e3f 100644 --- a/scripts/nlp_language_modeling/merge_lora_weights/merge.py +++ b/scripts/nlp_language_modeling/merge_lora_weights/merge.py @@ -91,7 +91,8 @@ def load_lora(lora_nemo, tp): def fix_for_O2(state_dict): new_state_dict = {} for k, v in state_dict.items(): - new_state_dict[k.replace('model.language_model', 'model.module.language_model')] = v + if "model.module." not in k: + new_state_dict[k.replace('model.', 'model.module.')] = v return new_state_dict @@ -110,22 +111,61 @@ def merge( curr_rank: current tp rank of the base model which is being merged with Lora. mcore: whether the model uses megatron core. """ - - for nl in range(num_layers): - if mcore: - key_self_attn_kqv = f'model.decoder.layers.{nl}.self_attention.linear_qkv.weight' - key_lora_in = f'model.decoder.layers.{nl}.self_attention.adapter_layer.lora_kqv_adapter.linear_in.weight' - key_lora_out = f'model.decoder.layers.{nl}.self_attention.adapter_layer.lora_kqv_adapter.linear_out.weight' - else: + mcore_layer_to_lora = {} + mcore_layer_to_lora["attention_qkv"] = { + "base_model_layer": "self_attention.linear_qkv.weight", + "lora_in": "self_attention.adapter_layer.lora_kqv_adapter.linear_in.weight", + "lora_out": "self_attention.adapter_layer.lora_kqv_adapter.linear_out.weight", + } + mcore_layer_to_lora["attention_dense"] = { + "base_model_layer": "self_attention.linear_proj.weight", + "lora_in": "self_attention.adapter_layer.lora_dense_attention_adapter.linear_in.weight", + "lora_out": "self_attention.adapter_layer.lora_dense_attention_adapter.linear_out.weight", + } + mcore_layer_to_lora["mlp_fc1"] = { + "base_model_layer": "mlp.linear_fc1.weight", + "lora_in": "mlp.adapter_layer.lora_hto4h_adapter.linear_in.weight", + "lora_out": "mlp.adapter_layer.lora_hto4h_adapter.linear_out.weight", + } + mcore_layer_to_lora["mlp_fc2"] = { + "base_model_layer": "mlp.linear_fc2.weight", + "lora_in": "mlp.adapter_layer.lora_4htoh_adapter.linear_in.weight", + "lora_out": "mlp.adapter_layer.lora_4htoh_adapter.linear_out.weight", + } + + if mcore: + for nl in range(num_layers): + for key in mcore_layer_to_lora.keys(): + key_base = f'model.decoder.layers.{nl}.{mcore_layer_to_lora[key]["base_model_layer"]}' + key_lora_in = f'model.decoder.layers.{nl}.{mcore_layer_to_lora[key]["lora_in"]}' + key_lora_out = f'model.decoder.layers.{nl}.{mcore_layer_to_lora[key]["lora_out"]}' + if key_lora_in in lora_state_dict[0] and key_lora_out in lora_state_dict[0]: + if key in ["attention_qkv", 'mlp_fc1']: + wt_lora_in = torch.cat([lora_state_dict[_tp][key_lora_in] for _tp in range(tp)], dim=0).float() + else: + wt_lora_in = torch.cat([lora_state_dict[_tp][key_lora_in] for _tp in range(tp)], dim=1).float() + + wt_lora_out = torch.cat([lora_state_dict[_tp][key_lora_out] for _tp in range(tp)], dim=0).float() + wt_base = base_model_state_dict[key_base] + wt_lora = wt_lora_out @ wt_lora_in + base_model_state_dict[key_base] = (wt_base.float() + wt_lora.to(wt_base.device)).type_as(wt_base) + print(f'merging for weight {key_base}') + else: + logging.warning("Non-mcore model only supports merging lora weights for attention_qkv layers") + for nl in range(num_layers): key_self_attn_kqv = f'model.language_model.encoder.layers.{nl}.self_attention.query_key_value.weight' key_lora_in = f'model.language_model.encoder.layers.{nl}.self_attention.adapter_layer.lora_kqv_adapter.linear_in.weight' key_lora_out = f'model.language_model.encoder.layers.{nl}.self_attention.adapter_layer.lora_kqv_adapter.linear_out.weight' - wt_lora_in = torch.cat([lora_state_dict[_tp][key_lora_in] for _tp in range(tp)], dim=0) - wt_lora_out = torch.cat([lora_state_dict[_tp][key_lora_out] for _tp in range(tp)], dim=0) - wt_self_attn = base_model_state_dict[key_self_attn_kqv] - wt_lora = wt_lora_out @ wt_lora_in - base_model_state_dict[key_self_attn_kqv] = wt_self_attn + wt_lora.type_as(wt_self_attn) - print("merging for weight", key_self_attn_kqv) + + wt_lora_in = torch.cat([lora_state_dict[_tp][key_lora_in] for _tp in range(tp)], dim=0).float() + wt_lora_out = torch.cat([lora_state_dict[_tp][key_lora_out] for _tp in range(tp)], dim=0).float() + wt_self_attn = base_model_state_dict[key_self_attn_kqv] + wt_lora = wt_lora_out @ wt_lora_in + base_model_state_dict[key_self_attn_kqv] = ( + wt_self_attn.float() + wt_lora.to(wt_self_attn.device) + ).type_as(wt_self_attn) + print("merging for weight", key_self_attn_kqv) + return base_model_state_dict @@ -214,6 +254,9 @@ def main(cfg) -> None: # load the merged_weights back into the base model, for this current rank. if model.cfg.megatron_amp_O2: merged_weights = fix_for_O2(merged_weights) + model.cfg.use_cpu_initialization = ( + False # set it back to False otherwise the merged model won't be loaded properly for futher tuning + ) model.load_state_dict(merged_weights) if cfg.trainer.accelerator != 'cpu' and model.global_rank == 0: From 4f3e39fd231c4c889e3712c1faa0df018691f5f6 Mon Sep 17 00:00:00 2001 From: Vladimir Bataev Date: Fri, 23 Feb 2024 12:06:59 +0400 Subject: [PATCH 02/12] Update k2 version (#8478) Signed-off-by: Vladimir Bataev --- scripts/installers/install_k2.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/installers/install_k2.sh b/scripts/installers/install_k2.sh index 36497b35a401b..18d948209ab85 100755 --- a/scripts/installers/install_k2.sh +++ b/scripts/installers/install_k2.sh @@ -15,7 +15,7 @@ # limitations under the License. K2_REPO=https://github.com/k2-fsa/k2 -LATEST_RELEASE=d12eec7 # Temporary fix for PyTorch 2.1.0 +LATEST_RELEASE=525cfa5 # fix for PyTorch 2.2.0 # uncomment the following line after the next k2 version is released (>1.24.4) #LATEST_RELEASE=$(git -c 'versionsort.suffix=-' \ # ls-remote --exit-code --refs --sort='version:refname' --tags ${K2_REPO} '*.*' \ From bb5188cfffc10b7233416997b232ffb461a1f46f Mon Sep 17 00:00:00 2001 From: jbaczek <45043825+jbaczek@users.noreply.github.com> Date: Fri, 23 Feb 2024 11:01:57 +0100 Subject: [PATCH 03/12] Add mcore full TE transformer layer spec (#8328) * Add spec and implement autocast layer Signed-off-by: Jan Baczek * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Jan Baczek * remove try-catchs, these dependecies are mandatory for this file Signed-off-by: Jan Baczek * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Jan Baczek * Check out this cool try/except clause Signed-off-by: Jan Baczek * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Remove unused import Signed-off-by: Jan Baczek * Add import tests to Jenkinsfile Signed-off-by: Jan Baczek * Move import tests to Jenkins and remove code that is developed only for passing tests Signed-off-by: Jan Baczek * Make test robust to faulty base configs Signed-off-by: Jan Baczek * Use proper GPT implementation in the test Signed-off-by: Jan Baczek * Update nemo/collections/nlp/models/language_modeling/megatron/gpt_full_te_layer_autocast_spec.py Co-authored-by: Sudhakar Singh Signed-off-by: jbaczek <45043825+jbaczek@users.noreply.github.com> * Update nemo/collections/nlp/models/language_modeling/megatron/gpt_full_te_layer_autocast_spec.py Co-authored-by: Sudhakar Singh Signed-off-by: jbaczek <45043825+jbaczek@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update nemo/collections/nlp/models/language_modeling/megatron/gpt_full_te_layer_autocast_spec.py Co-authored-by: Jaemin Choi Signed-off-by: jbaczek <45043825+jbaczek@users.noreply.github.com> * Update nemo/collections/nlp/models/language_modeling/megatron/gpt_full_te_layer_autocast_spec.py Co-authored-by: Jaemin Choi Signed-off-by: jbaczek <45043825+jbaczek@users.noreply.github.com> * Add TE knobs to the copy of AutocastTransformerLayer Signed-off-by: Jan Baczek * Add TE knobs to the copy of AutocastTransformerLayer Signed-off-by: Jan Baczek * Add dummy parameter to accomodated for the changes in mcore Signed-off-by: Jan Baczek * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update mcore to 0.5.0 in Jenkins pipeline Signed-off-by: Jan Baczek * Bump mcore commit. This is commit from tot, not any release. Signed-off-by: Jan Baczek * Remove from the test config option that is incompatible with bias_activation_fusion Signed-off-by: Jan Baczek * Bump TE version in CI to 1.4 Signed-off-by: Jan Baczek * Update test Signed-off-by: Jan Baczek * Change precision for the test - current runnens don't support bf16 Signed-off-by: Jan Baczek --------- Signed-off-by: Jan Baczek Signed-off-by: jbaczek <45043825+jbaczek@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Sudhakar Singh Co-authored-by: Jaemin Choi --- .github/workflows/import-test.yml | 107 ------- Jenkinsfile | 95 ++++++- .../gpt_full_te_layer_autocast_spec.py | 263 ++++++++++++++++++ .../language_modeling/megatron_gpt_model.py | 22 +- 4 files changed, 369 insertions(+), 118 deletions(-) delete mode 100644 .github/workflows/import-test.yml create mode 100644 nemo/collections/nlp/models/language_modeling/megatron/gpt_full_te_layer_autocast_spec.py diff --git a/.github/workflows/import-test.yml b/.github/workflows/import-test.yml deleted file mode 100644 index e9b10e1e34afc..0000000000000 --- a/.github/workflows/import-test.yml +++ /dev/null @@ -1,107 +0,0 @@ -name: CI-Import-Check - -on: - push: - pull_request: - paths: - - "**" - -# Check https://hub.docker.com/r/pytorch/pytorch/tags for latest tags -jobs: - - test-asr-imports: - runs-on: ubuntu-latest - container: - image: pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime - steps: - - name: Checkout repo - uses: actions/checkout@v2 - - name: Update base dependencies - run: | - apt-get update && apt-get install -y build-essential - apt-get install -y libsndfile1 make - - name: Install nemo dependencies - id: nemo-wheel - run: | - pip install Cython - # install test requirements - pip install -r requirements/requirements_test.txt - # Build nemo as a wheel - pip install build - python -m build --no-isolation --wheel - # Preserve wheel location - DIST_FILE=$(find ./dist -name "*.whl" | head -n 1) - echo "::set-output name=DIST_FILE::${DIST_FILE}" - - name: Test ASR Domain Imports - run: | - # Install NeMo Domain - pip install "${{ steps.nemo-wheel.outputs.DIST_FILE }}[asr]" - # Run import checks - python tests/core_ptl/check_imports.py --domain "asr" - # Uninstall NeMo - pip uninstall -y nemo_toolkit - - test-tts-imports: - runs-on: ubuntu-latest - container: - image: pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime - steps: - - name: Checkout repo - uses: actions/checkout@v2 - - name: Update base dependencies - run: | - apt-get update && apt-get install -y build-essential - apt-get install -y libsndfile1 make - - name: Install nemo dependencies - id: nemo-wheel - run: | - pip install Cython - # install test requirements - pip install -r requirements/requirements_test.txt - # Build nemo as a wheel - pip install build - python -m build --no-isolation --wheel - # Preserve wheel location - DIST_FILE=$(find ./dist -name "*.whl" | head -n 1) - echo "::set-output name=DIST_FILE::${DIST_FILE}" - - name: Test TTS Domain Imports - run: | - # Install NeMo Domain - pip install "${{ steps.nemo-wheel.outputs.DIST_FILE }}[tts]" - # Run import checks - python tests/core_ptl/check_imports.py --domain "tts" - # Uninstall NeMo - pip uninstall -y nemo_toolkit - - test-nlp-imports: - runs-on: ubuntu-latest - container: - image: pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime - steps: - - name: Checkout repo - uses: actions/checkout@v2 - - name: Update base dependencies - run: | - apt-get update && apt-get install -y build-essential - apt-get install -y libsndfile1 make - - name: Install nemo dependencies - id: nemo-wheel - run: | - pip install Cython - # install test requirements - pip install -r requirements/requirements_test.txt - # Build nemo as a wheel - pip install build - python -m build --no-isolation --wheel - # Preserve wheel location - DIST_FILE=$(find ./dist -name "*.whl" | head -n 1) - echo "::set-output name=DIST_FILE::${DIST_FILE}" - - name: Test NLP Domain Imports - run: | - # Install NeMo Domain - pip install "${{ steps.nemo-wheel.outputs.DIST_FILE }}[nlp]" - # Run import checks - python tests/core_ptl/check_imports.py --domain "nlp" - # Uninstall NeMo - pip uninstall -y nemo_toolkit - diff --git a/Jenkinsfile b/Jenkinsfile index 636d1b519f25f..b626f37fb804f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -68,7 +68,7 @@ pipeline { steps { sh 'git clone https://github.com/NVIDIA/TransformerEngine.git && \ cd TransformerEngine && \ - git fetch origin da30634a6c9ccdbb6c587b6c93b1860e4b038204 && \ + git fetch origin 8c9abbb80dba196f086b8b602a7cf1bce0040a6a && \ git checkout FETCH_HEAD && \ git submodule init && git submodule update && \ NVTE_FRAMEWORK=pytorch NVTE_WITH_USERBUFFERS=1 MPI_HOME=/usr/local/mpi pip install .' @@ -91,7 +91,7 @@ pipeline { steps { sh 'git clone https://github.com/NVIDIA/Megatron-LM.git && \ cd Megatron-LM && \ - git checkout 240a8ef7a21df201e47b5b2ae33cc5f4c5486849 && \ + git checkout 5f9c870f9f24b482509699d206a9dbb00958f6fc && \ pip install .' } } @@ -115,6 +115,13 @@ pipeline { sh 'python -c "import nemo.collections.tts as nemo_tts"' } } + stage('Import Checks'){ + steps { + sh 'python tests/core_ptl/check_imports.py --domain "asr"' + sh 'python tests/core_ptl/check_imports.py --domain "nlp"' + sh 'python tests/core_ptl/check_imports.py --domain "tts"' + } + } stage('L0: Unit Tests GPU') { steps { sh 'NEMO_NUMBA_MINVER=0.53 pytest -m "not pleasefixme" --with_downloads' @@ -3478,6 +3485,90 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' sh "rm -rf examples/nlp/language_modeling/token_classification_results" } } + stage('L2: Megatron GPT Pretraining and Resume Training TETransformerLayerTP=2') { + when { + anyOf { + branch 'main' + changeRequest target: 'main' + } + } + failFast true + steps { + sh "python examples/nlp/language_modeling/megatron_gpt_pretraining.py \ + trainer.devices=2 \ + trainer.accelerator=gpu \ + trainer.log_every_n_steps=1 \ + trainer.val_check_interval=2 \ + trainer.limit_val_batches=2 \ + trainer.accumulate_grad_batches=1 \ + trainer.max_steps=3 \ + trainer.precision=16 \ + trainer.gradient_clip_val=1.0 \ + exp_manager.exp_dir=examples/nlp/language_modeling/gpt_pretrain_results \ + ++model.name=megatron_gpt_full_te_layer_autocast \ + model.mcore_gpt=True \ + model.tensor_model_parallel_size=2 \ + model.optim.name=fused_adam \ + model.optim.lr=2e-4 \ + model.optim.sched.warmup_steps=1 \ + model.optim.sched.constant_steps=1 \ + model.optim.sched.min_lr=8e-5 \ + model.max_position_embeddings=128 \ + model.encoder_seq_length=128 \ + model.data.seq_length=128 \ + model.normalization=layernorm1p \ + model.bias_activation_fusion=True \ + model.bias_dropout_add_fusion=True \ + model.tokenizer.vocab_file=/home/TestData/nlp/megatron_gpt/data/gpt/vocab.json \ + model.tokenizer.merge_file=/home/TestData/nlp/megatron_gpt/data/gpt/merges.txt \ + model.num_layers=8 \ + model.hidden_size=256 \ + model.num_attention_heads=8 \ + model.activations_checkpoint_method=null \ + model.activations_checkpoint_granularity=null \ + model.activations_checkpoint_num_layers=null \ + model.data.data_prefix=[.5,/home/TestData/nlp/megatron_gpt/data/gpt/simple_wiki_gpt_preproc_text_document,.5,/home/TestData/nlp/megatron_gpt/data/gpt/simple_wiki_gpt_preproc_text_document] \ + model.data.index_mapping_dir=examples/nlp/language_modeling/gpt_index_mappings" + sh "python examples/nlp/language_modeling/megatron_gpt_pretraining.py \ + trainer.devices=2 \ + trainer.accelerator=gpu \ + trainer.log_every_n_steps=1 \ + trainer.val_check_interval=2 \ + trainer.limit_val_batches=2 \ + trainer.accumulate_grad_batches=1 \ + trainer.max_steps=6 \ + trainer.precision=16 \ + trainer.gradient_clip_val=1.0 \ + exp_manager.exp_dir=examples/nlp/language_modeling/gpt_pretrain_results \ + exp_manager.resume_if_exists=True \ + ++model.name=megatron_gpt_full_te_layer_autocast \ + model.mcore_gpt=True \ + model.tensor_model_parallel_size=2 \ + model.optim.name=fused_adam \ + model.optim.lr=2e-4 \ + model.optim.sched.warmup_steps=2 \ + model.optim.sched.constant_steps=2 \ + model.optim.sched.min_lr=8e-5 \ + model.max_position_embeddings=128 \ + model.encoder_seq_length=128 \ + model.data.seq_length=128 \ + model.normalization=layernorm1p \ + model.bias_activation_fusion=True \ + model.bias_dropout_add_fusion=True \ + model.tokenizer.vocab_file=/home/TestData/nlp/megatron_gpt/data/gpt/vocab.json \ + model.tokenizer.merge_file=/home/TestData/nlp/megatron_gpt/data/gpt/merges.txt \ + model.num_layers=8 \ + model.hidden_size=256 \ + model.num_attention_heads=8 \ + model.activations_checkpoint_method=null \ + model.activations_checkpoint_granularity=null \ + model.activations_checkpoint_num_layers=null \ + model.data.data_prefix=[.5,/home/TestData/nlp/megatron_gpt/data/gpt/simple_wiki_gpt_preproc_text_document,.5,/home/TestData/nlp/megatron_gpt/data/gpt/simple_wiki_gpt_preproc_text_document] \ + model.data.index_mapping_dir=examples/nlp/language_modeling/gpt_index_mappings" + sh "rm -rf examples/nlp/language_modeling/gpt_pretrain_results" + sh "rm -rf examples/nlp/language_modeling/gpt_index_mappings" + } + } // @chcui: model.cpu_offloading_num_layers=7 # temp workaround before m-lm !1124 is merged stage('L2: Megatron GPT Pretraining and Resume Training TP=2') { when { diff --git a/nemo/collections/nlp/models/language_modeling/megatron/gpt_full_te_layer_autocast_spec.py b/nemo/collections/nlp/models/language_modeling/megatron/gpt_full_te_layer_autocast_spec.py new file mode 100644 index 0000000000000..8e9be1120e944 --- /dev/null +++ b/nemo/collections/nlp/models/language_modeling/megatron/gpt_full_te_layer_autocast_spec.py @@ -0,0 +1,263 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Callable, Optional + +import torch +from megatron.core import parallel_state, tensor_parallel +from megatron.core.transformer.spec_utils import ModuleSpec +from megatron.core.transformer.utils import make_sharded_tensors_for_checkpoint + +from transformer_engine.pytorch import TransformerLayer + +from nemo.collections.nlp.parts import utils_funcs + + +# Copied from nemo/collections/nlp/modules/common/megatron/transformer.py +# as the source file is slated to be removed +class AutocastTransformerLayer(TransformerLayer): + def __init__( + self, + hidden_size: int, + ffn_hidden_size: int, + layernorm_epsilon: float, + num_attention_heads: int, + init_method: Callable, + output_layer_init_method: Callable, + hidden_dropout: float, + attention_dropout: float, + layer_number: Optional[int] = None, + kv_channels: Optional[int] = None, + self_attn_mask_type: str = "causal", + tp_group: Optional[Any] = None, + tp_size: int = 1, + params_dtype: torch.dtype = torch.float32, + get_rng_state_tracker: Optional[Callable] = None, + fuse_wgrad_accumulation: bool = False, + seq_length: Optional[int] = None, + micro_batch_size: Optional[int] = None, + sequence_parallel: bool = False, + apply_residual_connection_post_layernorm: bool = False, + output_layernorm: bool = False, + layer_type: str = "encoder", + drop_path_rate: float = 0, + use_emha: bool = False, + ub_tp_comm_overlap: bool = False, + ub_bulk_wgrad: bool = True, + ub_bulk_dgrad: bool = True, + ub_split_ag: bool = True, + ub_split_rs: bool = True, + ub_atomic_gemm_ag: bool = False, + ub_atomic_gemm_rs: bool = False, + autocast_dtype: Any = 16, + zero_centered_gamma: bool = False, + device: str = 'cuda', + ) -> None: + super().__init__( + hidden_size=hidden_size, + ffn_hidden_size=ffn_hidden_size, + layernorm_epsilon=layernorm_epsilon, + num_attention_heads=num_attention_heads, + init_method=init_method, + output_layer_init_method=output_layer_init_method, + hidden_dropout=hidden_dropout, + attention_dropout=attention_dropout, + layer_number=layer_number, + kv_channels=kv_channels, + self_attn_mask_type=self_attn_mask_type, + tp_group=tp_group, + tp_size=tp_size, + params_dtype=params_dtype, + get_rng_state_tracker=get_rng_state_tracker, + fuse_wgrad_accumulation=fuse_wgrad_accumulation, + seq_length=seq_length, + micro_batch_size=micro_batch_size, + sequence_parallel=sequence_parallel, + apply_residual_connection_post_layernorm=apply_residual_connection_post_layernorm, + output_layernorm=output_layernorm, + layer_type=layer_type, + drop_path_rate=drop_path_rate, + set_parallel_mode=tp_size > 1, + fuse_qkv_params=True, + zero_centered_gamma=zero_centered_gamma, + ub_tp_comm_overlap=ub_tp_comm_overlap, + ub_bulk_wgrad=ub_bulk_wgrad, + ub_bulk_dgrad=ub_bulk_dgrad, + ub_split_ag=ub_split_ag, + ub_split_rs=ub_split_rs, + ub_atomic_gemm_ag=ub_atomic_gemm_ag, + ub_atomic_gemm_rs=ub_atomic_gemm_rs, + device=device, + ) + # use_emha=use_emha, + + # Dtype for forward pass - ignore amp O2 + self.dtype = utils_funcs.torch_dtype_from_precision(autocast_dtype, megatron_amp_O2=None) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + encoder_output: Optional[torch.Tensor] = None, + enc_dec_attn_mask: Optional[torch.Tensor] = None, + inference_params: Optional[Any] = None, + is_first_microbatch: Optional[bool] = None, + checkpoint_core_attention: Optional[bool] = False, + ) -> torch.Tensor: + if self.dtype == torch.float32: + return super().forward( + hidden_states, + attention_mask, + encoder_output=encoder_output, + enc_dec_attn_mask=enc_dec_attn_mask, + inference_params=inference_params, + is_first_microbatch=is_first_microbatch, + checkpoint_core_attention=checkpoint_core_attention, + ) + with torch.autocast(device_type="cuda", dtype=self.dtype): + return super().forward( + hidden_states, + attention_mask, + encoder_output=encoder_output, + enc_dec_attn_mask=enc_dec_attn_mask, + inference_params=inference_params, + is_first_microbatch=is_first_microbatch, + checkpoint_core_attention=checkpoint_core_attention, + ) + + +from megatron.core.transformer.transformer_layer import BaseTransformerLayer + + +class TETransformerLayerAutocast(AutocastTransformerLayer, BaseTransformerLayer): + def __init__(self, config, layer_number=1, hidden_dropout=None): + self.config = config + self.is_first_microbatch = True + precision = 'bf16' if config.bf16 else 16 + + super().__init__( + hidden_size=config.hidden_size, + ffn_hidden_size=config.ffn_hidden_size, + layernorm_epsilon=config.layernorm_epsilon, + num_attention_heads=config.num_attention_heads, + init_method=config.init_method, + output_layer_init_method=config.output_layer_init_method, + hidden_dropout=config.hidden_dropout, + attention_dropout=config.attention_dropout, + layer_number=layer_number + self._get_layer_offset(), + kv_channels=config.kv_channels, + # self_attn_mask_type='causal', # Use default 'causal' + tp_size=parallel_state.get_tensor_model_parallel_world_size(), + params_dtype=config.params_dtype, + get_rng_state_tracker=tensor_parallel.random.get_cuda_rng_tracker, + fuse_wgrad_accumulation=config.gradient_accumulation_fusion, + seq_length=None, # used for jit warmup + micro_batch_size=None, # used for jit warmup + sequence_parallel=config.sequence_parallel, + apply_residual_connection_post_layernorm=config.apply_residual_connection_post_layernorm, + autocast_dtype=precision, + # use_emha=False, # Use default 'False' + ub_tp_comm_overlap=config.tp_comm_overlap, + ub_bulk_wgrad=config.tp_comm_bulk_wgrad, + ub_bulk_dgrad=config.tp_comm_bulk_dgrad, + ub_split_ag=config.tp_comm_split_ag, + ub_split_rs=config.tp_comm_split_rs, + ub_atomic_gemm_ag=config.tp_comm_atomic_ag, + ub_atomic_gemm_rs=config.tp_comm_atomic_rs, + zero_centered_gamma=config.layernorm_zero_centered_gamma, + device='cpu' if config.use_cpu_initialization else 'cuda', + ) + + # Called by MCore's TransformerBlock.forward + # megatron/core/transformer/transformer_block.py + def forward( + self, + hidden_states, + attention_mask, + context=None, + context_mask=None, + rotary_pos_emb=None, + inference_params=None, + packed_seq_params=None, # TODO: handle this + ): + hidden_states = super().forward( + hidden_states, + attention_mask=attention_mask, + encoder_output=context, + enc_dec_attn_mask=context_mask, + inference_params=inference_params, + is_first_microbatch=self.is_first_microbatch, + # checkpoint_core_attention, + ) + self.is_first_microbatch = False + context = None + + return hidden_states, context + + def _get_layer_offset(self): + + pipeline_rank = parallel_state.get_pipeline_model_parallel_rank() + + num_layers_per_pipeline_rank = ( + self.config.num_layers // parallel_state.get_pipeline_model_parallel_world_size() + ) + + if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None: + vp_rank = parallel_state.get_virtual_pipeline_model_parallel_rank() + vp_size = parallel_state.get_virtual_pipeline_model_parallel_world_size() + + total_num_layers = self.config.num_layers + num_layers_per_virtual_rank = num_layers_per_pipeline_rank // vp_size + total_virtual_chunks = total_num_layers // vp_size + offset = vp_rank * total_virtual_chunks + (pipeline_rank * num_layers_per_virtual_rank) + + else: + # Each stage gets a contiguous set of layers. + if parallel_state.get_pipeline_model_parallel_world_size() > 1: + offset = pipeline_rank * num_layers_per_pipeline_rank + else: + offset = 0 + + return offset + + def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()): + TENSOR_PARALLEL_LAYERS_AXIS_MAP = { + 'self_attention.layernorm_qkv.weight': 0, + 'self_attention.layernorm_qkv.bias': 0, + "self_attention.proj.weight": 1, + "layernorm_mlp.fc1_weight": 0, + "layernorm_mlp.fc1_bias": 0, + "layernorm_mlp.fc2_weight": 1, + } + + state_dict = self.state_dict(prefix='', keep_vars=True) + sharded_state_dict = make_sharded_tensors_for_checkpoint( + state_dict, prefix, TENSOR_PARALLEL_LAYERS_AXIS_MAP, sharded_offsets + ) + + # TODO: we need to add sharded_state_dict_keys_map to the config. Like in TransformerLayer submodules config + # prefixed_map = { + # f'{prefix}{k}': f'{prefix}{v}' + # for k, v in self.config.sharded_state_dict_keys_map.items() + # } + + # if prefixed_map: + # apply_prefix_mapping(sharded_state_dict, prefixed_map) + + return sharded_state_dict + + +# Use this spec to use the full Transformer layer from Transformer Engine +def get_gpt_full_te_layer_autocast_spec() -> ModuleSpec: + return ModuleSpec(module=TETransformerLayerAutocast) diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py index 752696ac8faa6..f0c14e920ce2b 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py @@ -39,6 +39,9 @@ is_dataset_built_on_rank, ) from nemo.collections.nlp.models.language_modeling.megatron.falcon.falcon_spec import get_falcon_layer_spec +from nemo.collections.nlp.models.language_modeling.megatron.gpt_full_te_layer_autocast_spec import ( + get_gpt_full_te_layer_autocast_spec, +) from nemo.collections.nlp.models.language_modeling.megatron.gpt_model import GPTModel from nemo.collections.nlp.models.language_modeling.megatron_base_model import MegatronBaseModel from nemo.collections.nlp.modules.common.megatron.build_model import build_model @@ -128,16 +131,17 @@ def mcore_supports_moe() -> bool: def get_specs(spec_name, num_experts=None): - if spec_name == '': - if num_experts is not None: - assert mcore_supports_moe(), "Megatron-core >= v0.5.0 is required for MoE" - return get_gpt_layer_with_transformer_engine_spec(num_experts) - else: - return get_gpt_layer_with_transformer_engine_spec() - elif spec_name == 'megatron_falcon_gpt': - return get_falcon_layer_spec() - else: + if num_experts is not None: + assert mcore_supports_moe(), "Megatron-core >= v0.5.0 is required for MoE" + + name_spec_dict = { + "": get_gpt_layer_with_transformer_engine_spec(num_experts), + "megatron_falcon_gpt": get_falcon_layer_spec(), + "megatron_gpt_full_te_layer_autocast": get_gpt_full_te_layer_autocast_spec(), + } + if spec_name not in name_spec_dict: raise ValueError(f"Spec name '{spec_name}' is not recognized.") + return name_spec_dict[spec_name] class MegatronGPTExportableModel(torch.nn.Module, Exportable): From 564b0e14785a22edb013b6b6e7886de0ae5d4ee5 Mon Sep 17 00:00:00 2001 From: Abhishree Thittenamane <47577437+athitten@users.noreply.github.com> Date: Fri, 23 Feb 2024 08:37:29 -0800 Subject: [PATCH 04/12] Handle float limit_val_batches (#8426) * Handle float limit_val_batches Signed-off-by: Abhishree * Rectify reconfiguration of float limit_val_batches Signed-off-by: Abhishree * Remove unused imports Signed-off-by: Abhishree * Scale len(val_dataloader) with float limit_val_batches Signed-off-by: Abhishree * Return len(dataloader) in microbatches Signed-off-by: Abhishree * Add back resetting of num val samples Signed-off-by: Abhishree * Fix to ensure float limit_val_batches is multiple of num_micro_batches Signed-off-by: Abhishree * Remove forcing eval samples to 1 for float limit_val_batches Signed-off-by: Abhishree * Fix bug wrt 0 limiot_val_batches Signed-off-by: Abhishree * Add missing mock_dataset line Signed-off-by: Abhishree * Avoid ensuring limit_val_batches is a mutliple of microbatches for 1.0 Signed-off-by: Abhishree * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Restore the hack forcing number of validation and test epochs to 1 Signed-off-by: Jan Baczek * Change limit_val_batches to 1.0 for GPT pretraining test. The integer value is covered in other tests Signed-off-by: Jan Baczek --------- Signed-off-by: Abhishree Signed-off-by: Jan Baczek Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jan Baczek --- Jenkinsfile | 4 +-- .../megatron/data_samplers.py | 14 ++++++--- .../language_modeling/megatron_base_model.py | 31 ++++++++++++++++++- .../language_modeling/megatron_gpt_model.py | 24 ++++++-------- 4 files changed, 51 insertions(+), 22 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index b626f37fb804f..663e837370269 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -3584,7 +3584,7 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' trainer.accelerator=gpu \ trainer.log_every_n_steps=1 \ trainer.val_check_interval=2 \ - trainer.limit_val_batches=2 \ + trainer.limit_val_batches=1.0 \ trainer.accumulate_grad_batches=1 \ trainer.max_steps=3 \ trainer.precision=16 \ @@ -3619,7 +3619,7 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' trainer.accelerator=gpu \ trainer.log_every_n_steps=1 \ trainer.val_check_interval=2 \ - trainer.limit_val_batches=2 \ + trainer.limit_val_batches=1.0 \ trainer.accumulate_grad_batches=1 \ trainer.max_steps=6 \ trainer.precision=16 \ diff --git a/nemo/collections/nlp/data/language_modeling/megatron/data_samplers.py b/nemo/collections/nlp/data/language_modeling/megatron/data_samplers.py index f977846477b0a..6818f99d0e4f4 100644 --- a/nemo/collections/nlp/data/language_modeling/megatron/data_samplers.py +++ b/nemo/collections/nlp/data/language_modeling/megatron/data_samplers.py @@ -81,9 +81,12 @@ def __len__(self): num_available_samples: int = self.total_samples - self.consumed_samples if self.global_batch_size is not None: if self.drop_last: - return num_available_samples // self.global_batch_size + num_global_batches = num_available_samples // self.global_batch_size else: - return (num_available_samples + self.global_batch_size - 1) // self.global_batch_size + num_global_batches = (num_available_samples + self.global_batch_size - 1) // self.global_batch_size + # return len of dataloader in terms of micro batches to avoid discrepancy between len of dataloader and + # num of batches fetched (as training step fetches in terms of micro batches) + return num_global_batches * (self.global_batch_size // self.micro_batch_times_data_parallel_size) else: return (num_available_samples - 1) // self.micro_batch_times_data_parallel_size + 1 @@ -162,9 +165,12 @@ def __len__(self): num_available_samples = active_total_samples - self.consumed_samples % active_total_samples if self.global_batch_size is not None: if self.drop_last: - return num_available_samples // self.global_batch_size + num_global_batches = num_available_samples // self.global_batch_size else: - return (num_available_samples + self.global_batch_size - 1) // self.global_batch_size + num_global_batches = (num_available_samples + self.global_batch_size - 1) // self.global_batch_size + # return len of dataloader in terms of micro batches to avoid discrepancy between len of dataloader and + # num of batches fetched (as training step fetches in terms of micro batches) + return num_global_batches * (self.global_batch_size // self.micro_batch_times_data_parallel_size) else: if self.drop_last: return num_available_samples // self.micro_batch_times_data_parallel_size diff --git a/nemo/collections/nlp/models/language_modeling/megatron_base_model.py b/nemo/collections/nlp/models/language_modeling/megatron_base_model.py index 5321a307b2c47..6a2ea80ec764d 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_base_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_base_model.py @@ -27,6 +27,7 @@ from pytorch_lightning.plugins.precision import MixedPrecisionPlugin from pytorch_lightning.trainer.connectors.logger_connector.fx_validator import _FxValidator from pytorch_lightning.trainer.trainer import Trainer +from pytorch_lightning.utilities.exceptions import MisconfigurationException from nemo.collections.nlp.models.nlp_model import NLPModel from nemo.collections.nlp.modules.common.megatron.attention import HAVE_FLASH_ATTENTION @@ -322,9 +323,37 @@ def _reconfigure_val_batches(self): """ Reconfigure trainer.limit_val_batches for pretraining """ + # Override limit_val_batches to be a multiple of num microbatches and so there are limit_val_batches//num_micro_batches num of global batches if isinstance(self.trainer.limit_val_batches, int): - # Override limit_val_batches to be a multiple of num microbatches and so there are limit_val_batches//num_micro_batches num of global batches self.trainer.limit_val_batches *= get_num_microbatches() + else: + assert isinstance(self.trainer.limit_val_batches, float) + # Don't reconfigure if limit_val_batches is 0.0 + if self.trainer.limit_val_batches == 0.0: + return + # len(self._validation_dl) returns len as num of microbatches + val_len_in_micro_batches = len(self._validation_dl) + if self._validation_ds is not None and len(self._validation_dl) != float("inf"): + if self.trainer.limit_val_batches == 1.0: + self.trainer.limit_val_batches = val_len_in_micro_batches + else: + limit_val_micro_batches = int(val_len_in_micro_batches * self.trainer.limit_val_batches) + if limit_val_micro_batches == 0 and self.trainer.limit_val_batches > 0.0: + min_percentage = 1.0 / len(self._validation_dl) + raise MisconfigurationException( + f"You requested to check {self.trainer.limit_val_batches} of the val_dataloader but" + f" {self.trainer.limit_val_batches} * {len(self._validation_dl)} < 1. Please increase the" + f" `limit_val_batches` argument. Try at least" + f" `limit_val_batches={min_percentage}`" + ) + # Make sure trainer.limit_val_batches is a multiple of num of microbatches + if limit_val_micro_batches < get_num_microbatches(): + self.trainer.limit_val_batches = get_num_microbatches() + else: + self.trainer.limit_val_batches = ( + limit_val_micro_batches - limit_val_micro_batches % get_num_microbatches() + ) + # Override num sanity steps to be a multiple of num of microbatches self.trainer.num_sanity_val_steps *= get_num_microbatches() diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py index f0c14e920ce2b..87dfc37b8b864 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py @@ -1176,15 +1176,11 @@ def loss_func(self, loss_mask, num_valid_tokens_in_ub, output_tensor): return loss def build_train_valid_test_datasets(self): - # Override limit_val_batches to be a multiple of num microbatches to prevent val_step from exiting in between a step - self._reconfigure_val_batches() - logging.info('Building GPT datasets.') if self.trainer.limit_val_batches > 1.0 and isinstance(self.trainer.limit_val_batches, float): raise ValueError("limit_val_batches must be an integer or float less than or equal to 1.0.") + logging.info('Building GPT datasets.') global_batch_size = self.cfg.global_batch_size max_train_steps = self.trainer.max_steps - eval_iters = (max_train_steps // self.trainer.val_check_interval + 1) * self.trainer.limit_val_batches - test_iters = self.trainer.limit_test_batches # Add extra FIM tokens to tokenizer if self.cfg.data.get('add_fim', False) and self.cfg.tokenizer.library == 'megatron': @@ -1192,16 +1188,12 @@ def build_train_valid_test_datasets(self): fim_tokens = [fim_tokens.prefix, fim_tokens.middle, fim_tokens.suffix, fim_tokens.pad, fim_tokens.eod] self.tokenizer.add_special_tokens({'additional_special_tokens': fim_tokens}) - train_valid_test_num_samples = [ - max_train_steps * global_batch_size, - eval_iters * global_batch_size, - test_iters * global_batch_size, - ] - - if self.trainer.limit_val_batches <= 1.0 and isinstance(self.trainer.limit_val_batches, float): - train_valid_test_num_samples[ - 1 - ] = 1 # This is to make sure we only have one epoch on every validation iteration + # The line below exploits a quirk in mcore dataset construction, to make number of epochs for validation and test equal to 1 + # The mcore dataset implementation uses the number N we provide via train_valid_test_num_samples to derive parameter E such that + # E = argmin_e e * N_d >= N, or equivalently E = ceildiv(N, N_d) + # Where N_d is the total number of samples in a dataset (files), and N is the requested number of samples (provided for every split in the list below). + # Setting N = 1 we force E to be 1 as well + train_valid_test_num_samples = [max_train_steps * global_batch_size, 1, 1] mock_dataset = self.cfg.data.get("mock_dataset", False) kwargs = { @@ -1329,6 +1321,8 @@ def setup(self, stage=None): self.setup_training_data(self.cfg.data) self.setup_validation_data(self.cfg.data) self.setup_test_data(self.cfg.data) + # Override limit_val_batches to be a multiple of num microbatches to prevent val_step from exiting in between a step + self._reconfigure_val_batches() if stage == 'fit': self.initialize_last_rank_embeddings() From f19b9a5a114035c3f634031efe23a7251281b903 Mon Sep 17 00:00:00 2001 From: yaoyu-33 <54727607+yaoyu-33@users.noreply.github.com> Date: Fri, 23 Feb 2024 11:28:32 -0800 Subject: [PATCH 05/12] Fix tutorial links in user guide (#8497) Signed-off-by: yaoyu-33 --- docs/source/starthere/tutorials.rst | 12 ++++++++++++ tutorials/multimodal/README.md | 1 + 2 files changed, 13 insertions(+) diff --git a/docs/source/starthere/tutorials.rst b/docs/source/starthere/tutorials.rst index f48662de937f8..a61c078175f5d 100644 --- a/docs/source/starthere/tutorials.rst +++ b/docs/source/starthere/tutorials.rst @@ -160,6 +160,18 @@ To run a tutorial: * - NLP - Synthetic Tabular Data Generation - `Synthetic Tabular Data Generation `_ + * - Multimodal + - Multimodal Data Preparation + - `Multimodal Data Preparation `_ + * - Multimodal + - NeVA (LLaVA) Tutorial + - `NeVA (LLaVA) Tutorial `_ + * - Multimodal + - Stable Diffusion Tutorial + - `Stable Diffusion Tutorial `_ + * - Multimodal + - DreamBooth Tutorial + - `DreamBooth Tutorial `_ * - TTS - NeMo TTS Primer - `NeMo TTS Primer `_ diff --git a/tutorials/multimodal/README.md b/tutorials/multimodal/README.md index 3da7c87d7c084..57002536940d5 100644 --- a/tutorials/multimodal/README.md +++ b/tutorials/multimodal/README.md @@ -13,3 +13,4 @@ This repository contains the following resources: * [Data Preparation](./Multimodal%20Data%20Preparation.ipynb) * [Train And Infer Stable Diffusion Model](./Stable%20Diffusion%20Tutorial.ipynb) * [Train DreanBooth Model](./DreamBooth%20Tutorial.ipynb) +* [Train Neva Model](./NeVA%20Tutorial.ipynb) From 5b38a7e6b9b2b75abb704a79b79b20a1e4f9ff35 Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Fri, 23 Feb 2024 15:19:29 -0500 Subject: [PATCH 06/12] Sequence Parallel for LoRA (#8369) * support lora + sequence parallel Signed-off-by: Chen Cui * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add more comments Signed-off-by: Chen Cui * add lora SP CI test Signed-off-by: Chen Cui * support lora for all linear modules as in #7988 Signed-off-by: Chen Cui * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Chen Cui Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Jenkinsfile | 36 +++++++++++++++++++ .../language_modeling/megatron_gpt_model.py | 6 ++-- .../megatron/adapters/parallel_adapters.py | 25 ++++++++++++- nemo/collections/nlp/parts/peft_config.py | 8 +++++ 4 files changed, 72 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 663e837370269..0fc492961c61b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -4291,6 +4291,42 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' sh "rm -rf /home/TestData/nlp/lora_tuning_tp2" } } + stage('L2: Megatron GPT PEFT Lora TP=2 SP') { + when { + anyOf { + branch 'main' + changeRequest target: 'main' + } + } + failFast true + steps { + sh "rm -rf /home/TestData/nlp/lora_tuning_tp2_sp" + sh "python examples/nlp/language_modeling/tuning/megatron_gpt_finetuning.py \ + trainer.devices=2 \ + trainer.log_every_n_steps=1 \ + trainer.max_epochs=9999 \ + trainer.max_steps=3 \ + trainer.val_check_interval=3 \ + ++trainer.limit_val_batches=2 \ + trainer.precision=16 \ + exp_manager.exp_dir=/home/TestData/nlp/lora_tuning_tp2 \ + model.pipeline_model_parallel_size=1 \ + model.tensor_model_parallel_size=2 \ + model.sequence_parallel=true \ + model.restore_from_path=/home/TestData/nlp/megatron_gpt/TP2/megatron_gpt_tp2.nemo \ + model.peft.peft_scheme='lora' \ + model.answer_only_loss=True \ + model.micro_batch_size=1 \ + model.global_batch_size=1 \ + model.data.train_ds.file_names=[/home/TestData/nlp/megatron_sft/quarel.jsonl] \ + model.data.train_ds.concat_sampling_probabilities=[1.0] \ + model.data.train_ds.num_workers=0 \ + model.data.validation_ds.num_workers=0 \ + model.data.validation_ds.file_names=[/home/TestData/nlp/megatron_sft/quarel.jsonl] \ + model.data.validation_ds.names=[quarel]" + sh "rm -rf /home/TestData/nlp/lora_tuning_tp2_sp" + } + } stage('L2: Megatron GPT Eval') { when { anyOf { diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py index 87dfc37b8b864..1da9f5df1e9ab 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py @@ -755,7 +755,7 @@ def _append_sequence_parallel_module_grads(self, module, grads): # (@adithyare) adapter training now extends MegatronGPTModel # so we have to add this check here to ensure we do not # perform all_reduce when grad is None. - # grad can be None when performing PeFT training. + # grad can be None when performing PEFT training. if sequence_parallel_param and param.requires_grad: if self.megatron_amp_O2: grad = param.main_grad @@ -775,7 +775,9 @@ def allreduce_sequence_parallel_gradients(self): self._append_sequence_parallel_module_grads(module, grads) else: self._append_sequence_parallel_module_grads(self.model, grads) - + if not grads: + # may be empty for PEFT training + return coalesced = torch._utils._flatten_dense_tensors(grads) torch.distributed.all_reduce(coalesced, group=parallel_state.get_tensor_model_parallel_group()) for buf, synced in zip(grads, torch._utils._unflatten_dense_tensors(coalesced, grads)): diff --git a/nemo/collections/nlp/modules/common/megatron/adapters/parallel_adapters.py b/nemo/collections/nlp/modules/common/megatron/adapters/parallel_adapters.py index d57d40b5c5813..ac85ea7a1d2ed 100644 --- a/nemo/collections/nlp/modules/common/megatron/adapters/parallel_adapters.py +++ b/nemo/collections/nlp/modules/common/megatron/adapters/parallel_adapters.py @@ -43,6 +43,10 @@ try: from megatron.core import ModelParallelConfig from megatron.core.tensor_parallel import ColumnParallelLinear, RowParallelLinear + from megatron.core.tensor_parallel.mappings import ( + gather_from_sequence_parallel_region, + scatter_to_sequence_parallel_region, + ) HAVE_MEGATRON_CORE = True @@ -147,11 +151,13 @@ def __init__( self.activation = activation_registry[activation]() self.norm_position = norm_position self.dim = dim - + self.input_is_parallel = input_is_parallel # megatron_gpt_peft_models will provide this arg, but deprecated ones do not. # in case this arg is not provided, use the dummy default config. if model_parallel_config is None: model_parallel_config = ModelParallelConfig() + self._sequence_parallel = model_parallel_config.sequence_parallel + model_parallel_config.sequence_parallel = False # SP is irrelevant for the lora linear layer if input_is_parallel: self.linear_in = RowParallelLinear( @@ -219,6 +225,9 @@ def __init__( # Setup adapter strategy self.setup_adapter_strategy(adapter_mixin_strategies.ReturnResultAdapterStrategy()) + # revert config change in case it is read elsewhere + model_parallel_config.sequence_parallel = self._sequence_parallel + def _get_init_fn(self, init_method: str): if init_method == 'xavier': init_fn = init.xavier_normal_ @@ -240,10 +249,24 @@ def forward(self, x): if self.norm_position == 'pre': x = self.layer_norm(x) + if self._sequence_parallel and not self.input_is_parallel: + # for attention_qkv and linear_fc1 + # layernorm before lora is impacted by sequence parallel, + # hence seq dim need to be gathered right before lora linear layers + # this function also handles the backward pass correctly + x = gather_from_sequence_parallel_region(x) x, _ = self.linear_in(x) # (@adithyare) ColumnLinear returns output and bias, we are ignoring the bias term. x = self.activation(x) x, _ = self.linear_out(x) + + if self._sequence_parallel and self.input_is_parallel: + # for attention_dense and linear_fc2 + # layernorm after lora is impacted by sequence parallel, + # hence seq dim need to be scattered right after lora linear layers + # this function also handles the backward pass correctly + x = scatter_to_sequence_parallel_region(x) + if self.norm_position == 'post': x = self.layer_norm(x) diff --git a/nemo/collections/nlp/parts/peft_config.py b/nemo/collections/nlp/parts/peft_config.py index 1d365723ebda5..815ad4d9e952a 100644 --- a/nemo/collections/nlp/parts/peft_config.py +++ b/nemo/collections/nlp/parts/peft_config.py @@ -16,6 +16,8 @@ from omegaconf import DictConfig +from nemo.utils import logging + try: from nemo.collections.nlp.modules.common.megatron.adapters.mcore_mixins import ( MCoreGPTEmbeddingMixin, @@ -148,6 +150,12 @@ def __init__(self, cfg): ) name_key_to_cfg[AdapterName.LORA_4HtoH_ADAPTER] = adapter_cfg name_key_to_mcore_mixins[AdapterName.LORA_4HtoH_ADAPTER] = [("mlp", MCoreMLPMixin)] + else: + logging.error( + f"Unrecognized target_module string: {module}.\n" + f"The possible options are: {list(PEFT_MODULE_MAP.values())}" + ) + exit(1) self.name_key_to_mcore_mixins = name_key_to_mcore_mixins super().__init__(lora_cfg, name_key_to_cfg) From aecb8efddbf834688f4d00a8e03a1b4e7bae525c Mon Sep 17 00:00:00 2001 From: Naga Venkatesh Gavini Date: Fri, 23 Feb 2024 13:23:01 -0800 Subject: [PATCH 07/12] Call proper method to replace (#8498) Signed-off-by: Naga Venkatesh Gavini --- tools/nemo_forced_aligner/utils/data_prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/nemo_forced_aligner/utils/data_prep.py b/tools/nemo_forced_aligner/utils/data_prep.py index 48bbb25fd9c6e..c5ee74a13b444 100644 --- a/tools/nemo_forced_aligner/utils/data_prep.py +++ b/tools/nemo_forced_aligner/utils/data_prep.py @@ -152,7 +152,7 @@ def restore_token_case(word, word_tokens): word = word.replace("▁▁", "▁") while "__" in word: - word = word.repalce("__", "_") + word = word.replace("__", "_") word_tokens_cased = [] word_char_pointer = 0 From 53a7b72eb10b1691dfc39698d51feca76b48e2af Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Fri, 23 Feb 2024 14:39:34 -0800 Subject: [PATCH 08/12] Added memory logger (#8395) * Added memory logger Signed-off-by: Selvaraj Anandaraj * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Selvaraj Anandaraj Co-authored-by: Selvaraj Anandaraj Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Eric Harper --- .../nlp/models/language_modeling/megatron_gpt_model.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py index 1da9f5df1e9ab..6a4ab113877d9 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py @@ -331,6 +331,7 @@ def __init__(self, cfg: DictConfig, trainer: Trainer): self.get_attention_mask_from_fusion = self.cfg.get('get_attention_mask_from_fusion', True) self.initialize_ub = self.cfg.get('ub_tp_comm_overlap', False) self.log_train_loss = bool(int(os.getenv("NEMO_LOG_TRAIN_LOSS", 1))) + self.log_memory_usage = bool(int(os.getenv("NEMO_LOG_MEMORY_USAGE", 0))) self.loss_broadcast_src_rank = None self.inference_params = None @@ -690,6 +691,12 @@ def training_step(self, dataloader_iter, batch_idx): self.allreduce_first_last_embeddings() self.megatron_timer_stop('allreduce_first_last_embeddings') + if self.log_memory_usage: + mem_reserved = torch.cuda.max_memory_reserved() + self.log( + 'peak_memory_usage', mem_reserved, prog_bar=True, rank_zero_only=True, batch_size=1, + ) + ## logging if self.log_train_loss: # When using pipeline parallelism, loss is calculated only in the last pipeline stage and From 0075d264337760de5c7752c2cf49826a4c2be148 Mon Sep 17 00:00:00 2001 From: tbartley94 <90423858+tbartley94@users.noreply.github.com> Date: Sat, 24 Feb 2024 09:04:07 -0800 Subject: [PATCH 09/12] Canary refactor for Riva (#8363) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * initial commit of bleu score tracking Signed-off-by: Travis Bartley * initial commit, refactoring aed models for riva Signed-off-by: Travis Bartley * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updating Canary to support torch metrics Signed-off-by: Travis Bartley * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * style fixes Signed-off-by: Travis Bartley * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * missed an empty batch conditional Signed-off-by: Travis Bartley * Fixing dataloader issues Signed-off-by: Travis Bartley * Finishing merge conflict with transcribe update Signed-off-by: Travis Bartley * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * style fix Signed-off-by: Travis Bartley * copyright header fix Signed-off-by: Travis Bartley * yet another merge conflict Signed-off-by: Travis Bartley * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * making paired data management safer Signed-off-by: Travis Bartley * sentencepiece needs bigger tokenizer... Signed-off-by: Travis Bartley * sentencepiece tokenizer vocab needs to be +2 from vocab for canary Signed-off-by: Travis Bartley * Update canary tokenizer to be more generic, updated metrics to manage special tokens removal themselves. Signed-off-by: Travis Bartley * merge conflit Signed-off-by: Travis Bartley * Simplified tokenizer and corrected bug in dataloader Signed-off-by: Travis Bartley * Cleaning up docstrings and fixing inference bug. Signed-off-by: Travis Bartley * adding example scripts Signed-off-by: Travis Bartley * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * cleaning up useless imports Signed-off-by: Travis Bartley * adding unit tests Signed-off-by: Travis Bartley * fixing unit tests Signed-off-by: Travis Bartley * cfg name change Signed-off-by: Travis Bartley * adding custom check to pass pytests Signed-off-by: Travis Bartley * removing print script Signed-off-by: Travis Bartley * catching bugs regarding tokens. Signed-off-by: Travis Bartley * added docstrings and made examples scripts more generic Signed-off-by: Travis Bartley * docstring deleted by accident Signed-off-by: Travis Bartley * plurals in namespace Signed-off-by: Travis Bartley * changing example script Signed-off-by: Travis Bartley --------- Signed-off-by: Travis Bartley Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Piotr Żelasko --- .../speech_multitask/fast-conformer_aed.yaml | 10 +- .../speech_multitask/speech_to_text_aed.py | 14 +- .../asr/data/audio_to_text_lhotse_prompted.py | 50 ++-- nemo/collections/asr/metrics/__init__.py | 3 + nemo/collections/asr/metrics/bleu.py | 212 +++++++++++++++++ nemo/collections/asr/metrics/wer.py | 26 ++- .../asr/models/aed_multitask_models.py | 220 +++++++----------- nemo/collections/asr/models/asr_model.py | 62 ++++- .../common/data/lhotse/dataloader.py | 24 +- .../common/tokenizers/canary_tokenizer.py | 89 +++---- .../asr/test_asr_multitask_model_bpe.py | 20 ++ .../collections/asr/test_custom_tokenizer.py | 18 +- 12 files changed, 499 insertions(+), 249 deletions(-) create mode 100644 nemo/collections/asr/metrics/bleu.py diff --git a/examples/asr/conf/speech_multitask/fast-conformer_aed.yaml b/examples/asr/conf/speech_multitask/fast-conformer_aed.yaml index f6adc68e8ab46..77260e515a904 100644 --- a/examples/asr/conf/speech_multitask/fast-conformer_aed.yaml +++ b/examples/asr/conf/speech_multitask/fast-conformer_aed.yaml @@ -13,6 +13,14 @@ name: "FastConformer-Transformer-MultiTask" # may help (or even be required to) stabilize the training. init_from_nemo_model: null +# If using example training script, below will be used to instantiate spl_tokens tokenizer. +# Similar can be done by calling CanaryTokenizer.build_special_tokenizer(tokens, output_dir). +# If a tokenizer exists in dir, will skip building and use already built tokenizer. +spl_tokens: + model_dir: ??? + tokens: ["translate", "transcribe", "en", "es", "de", "fr"] + force_rebuild: False # Set to True to build new tokenizer each time. + model: sample_rate: 16000 label_smoothing: 0.0 @@ -75,7 +83,7 @@ model: type: agg # Can be either bpe (SentencePiece tokenizer) or wpe (WordPiece tokenizer) or `agg` for aggregate tokenizers langs: spl_tokens: # special tokens model - dir: ??? + dir: null # Passed in training script type: bpe en: # English tokenizer (example, replace with whichever language you would like or add tokenizers to add tokenizer for additional languages) dir: ??? diff --git a/examples/asr/speech_multitask/speech_to_text_aed.py b/examples/asr/speech_multitask/speech_to_text_aed.py index 813ce03e8f38e..b0e5333249f4e 100644 --- a/examples/asr/speech_multitask/speech_to_text_aed.py +++ b/examples/asr/speech_multitask/speech_to_text_aed.py @@ -50,13 +50,12 @@ """ - import pytorch_lightning as pl from omegaconf import OmegaConf from nemo.collections.asr.models import EncDecMultiTaskModel from nemo.core.config import hydra_runner -from nemo.utils import logging +from nemo.utils import logging, model_utils from nemo.utils.exp_manager import exp_manager @@ -66,6 +65,17 @@ def main(cfg): trainer = pl.Trainer(**cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) + + # Check for spl tokens to create spl_tokenizer. + if cfg.get("spl_tokens"): + logging.info("Detected spl_tokens config. Building tokenizer.") + spl_cfg = cfg["spl_tokens"] + spl_tokenizer_cls = model_utils.import_class_by_path(cfg.model.tokenizer.custom_tokenizer["_target_"]) + spl_tokenizer_cls.build_special_tokenizer( + spl_cfg["tokens"], spl_cfg["model_dir"], force_rebuild=spl_cfg["force_rebuild"] + ) + cfg.model.tokenizer.langs.spl_tokens.dir = spl_cfg["model_dir"] + aed_model = EncDecMultiTaskModel(cfg=cfg.model, trainer=trainer) # Initialize the weights of the model from another model, if provided via config diff --git a/nemo/collections/asr/data/audio_to_text_lhotse_prompted.py b/nemo/collections/asr/data/audio_to_text_lhotse_prompted.py index 834711d937bd5..c409ec2d8d1c9 100644 --- a/nemo/collections/asr/data/audio_to_text_lhotse_prompted.py +++ b/nemo/collections/asr/data/audio_to_text_lhotse_prompted.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Sequence - import torch.utils.data from lhotse import CutSet from lhotse.cut import MixedCut, MonoCut @@ -56,12 +55,21 @@ def __init__( def __getitem__(self, cuts: CutSet) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: audio, audio_lens, cuts = self.load_audio(cuts) - tokens = self.prompt_format_fn(cuts, self.tokenizer, self.inference) + tokens, prompt_tokens = self.prompt_format_fn(cuts, self.tokenizer, inference=self.inference) + tokens = [torch.as_tensor(t) for t in tokens] token_lens = torch.tensor([t.size(0) for t in tokens], dtype=torch.long) tokens = collate_vectors(tokens, padding_value=self.padding_value) - return audio, audio_lens, tokens, token_lens + if self.inference: + prompt_tokens = [torch.as_tensor(t) for t in prompt_tokens] + prompt_token_lens = torch.tensor([t.size(0) for t in prompt_tokens], dtype=torch.long) + prompt_tokens = collate_vectors(prompt_tokens, padding_value=self.padding_value) + else: + prompt_tokens = None + prompt_token_lens = None + + return audio, audio_lens, tokens, token_lens, prompt_tokens, prompt_token_lens # Mapping from a string name to a known prompt formatter function. @@ -106,7 +114,7 @@ def canary(cuts: CutSet, tokenizer: TokenizerWrapper, inference: bool = False) - * <|nopnc|> * <|pnc|> * <|endoftext|> - * <|LANG|> - for each supported language, where LANG is a 2-char language code. + * <|LANG|> - for each supported language. * <|nospeech|> The prompt format syntax is as follows: @@ -124,7 +132,7 @@ def canary(cuts: CutSet, tokenizer: TokenizerWrapper, inference: bool = False) - ), "To use 'canary' prompt format, you must use the CanaryTokenizer." tokenizer = tokenizer._tokenizer - canary_tokens = [] + tokens, prompts = [], [] for cut in cuts: if isinstance(cut, MixedCut): cut = cut._first_non_padding_cut @@ -139,21 +147,17 @@ def canary(cuts: CutSet, tokenizer: TokenizerWrapper, inference: bool = False) - ) # Actual tokenization. If a cut has multiple supervisions, we'll stitch their tokenized texts together. - if not inference: - texts = [sup.text for sup in cut.supervisions] - langs = [sup.language for sup in cut.supervisions] - else: - texts, langs = None, None + texts = [sup.text for sup in cut.supervisions] + langs = [sup.language for sup in cut.supervisions] taskname = cut.custom['taskname'] pnc = cut.custom['pnc'] source_lang = cut.custom['source_lang'] target_lang = cut.custom['target_lang'] - prompted_tokens = canary_prompt(tokenizer, texts, langs, source_lang, target_lang, taskname, pnc) - - canary_tokens.append(prompted_tokens) - - return canary_tokens + tokens.append(canary_prompt(tokenizer, texts, langs, source_lang, target_lang, taskname, pnc)) + if inference: + prompts.append(canary_prompt(tokenizer, None, None, source_lang, target_lang, taskname, pnc)) + return tokens, prompts def canary_prompt( @@ -194,28 +198,28 @@ def canary_prompt( ) # src_lang_id/no_speech - src_lang_id = tokenizer.to_language_id(source_language) + src_lang_id = tokenizer.spl_token_to_id(source_language) prompted_tokens.append(src_lang_id) # task task = taskname - if task == 'asr': - prompted_tokens.append(tokenizer.transcribe_id) - elif task == 's2t_translation' or task == 'ast': - prompted_tokens.append(tokenizer.translate_id) + if task == 'asr' or task == "transcribe": + prompted_tokens.append(tokenizer.spl_token_to_id("transcribe")) + elif task == 's2t_translation' or task == 'ast' or task == "translate": + prompted_tokens.append(tokenizer.spl_token_to_id("translate")) else: raise ValueError(f"Unknown task: {task}") # tgt_lang_id - tgt_lang_id = tokenizer.to_language_id(target_language) + tgt_lang_id = tokenizer.spl_token_to_id(target_language) prompted_tokens.append(tgt_lang_id) # PnC pnc = f"{pnc}".lower().strip() # to account for bool or str if pnc in {'yes', 'true'}: - prompted_tokens.append(tokenizer.pnc_id) + prompted_tokens.append(tokenizer.spl_token_to_id("pnc")) elif pnc in {'no', 'false'}: - prompted_tokens.append(tokenizer.nopnc_id) + prompted_tokens.append(tokenizer.spl_token_to_id("nopnc")) else: raise ValueError(f"Unknown value for key 'pnc': {pnc}") diff --git a/nemo/collections/asr/metrics/__init__.py b/nemo/collections/asr/metrics/__init__.py index 9e32500719552..843d58ccf38f2 100644 --- a/nemo/collections/asr/metrics/__init__.py +++ b/nemo/collections/asr/metrics/__init__.py @@ -11,3 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from nemo.collections.asr.metrics.bleu import BLEU +from nemo.collections.asr.metrics.wer import WER diff --git a/nemo/collections/asr/metrics/bleu.py b/nemo/collections/asr/metrics/bleu.py new file mode 100644 index 0000000000000..011e3efe0c6a5 --- /dev/null +++ b/nemo/collections/asr/metrics/bleu.py @@ -0,0 +1,212 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Literal, Optional, Sequence, Union + +import torch +from torchmetrics.functional.text.bleu import _bleu_score_compute +from torchmetrics.text import SacreBLEUScore + +from nemo.collections.asr.parts.submodules.ctc_decoding import AbstractCTCDecoding +from nemo.collections.asr.parts.submodules.multitask_decoding import AbstractMultiTaskDecoding +from nemo.collections.asr.parts.submodules.rnnt_decoding import AbstractRNNTDecoding +from nemo.utils import logging + +__all__ = ['BLEU'] + + +def move_dimension_to_the_front(tensor, dim_index): + all_dims = list(range(tensor.ndim)) + return tensor.permute(*([dim_index] + all_dims[:dim_index] + all_dims[dim_index + 1 :])) + + +# TODO: Add documentation +class BLEU(SacreBLEUScore): + """ + This metric computes numerator, denominator, hypotheses lengths, and target lengths for Overall Bilingual Evaluation Understudy (BLEU) + between prediction and reference texts. When doing distributed training/evaluation the result of + ``res=BLEU.(predictions, predictions_lengths, targets, target_lengths)`` + calls will be all-reduced between all workers using SUM operations. + + If used with PytorchLightning LightningModule, include bleu_num bleur_den, bleu_pred_len, and bleu_target_len values inside + validation_step results. Then aggregate (sum) then at the end of validation epoch to correctly compute validation BLEUR. + + Example: + def validation_step(self, batch, batch_idx): + ... + bleu_values = self.bleu(predictions, predictions_len, transcript, transcript_len) + self.val_outputs = {'val_loss': loss_value, **bleu_values} + return self.val_outputs + + def on_validation_epoch_end(self): + ... + bleu_num = torch.stack([x['val_wer_num'] for x in self.val_outputs]).sum() + bleu_denom = torch.stack([x['val_wer_denom'] for x in self.val_outputs]).sum() + bleu_num = torch.stack([x[f"val_bleu_num"] for x in outputs]).sum(dim=0) + bleu_denom = torch.stack([x[f"val_bleu_denom"] for x in outputs]).sum(dim=0) + + val_bleu = {"val_bleu": self.bleu._compute_bleu(bleu_pred_len, bleu_target_len, bleu_num, bleu_denom)} + tensorboard_logs.update(val_bleu) + + self.val_outputs.clear() # free memory + return {'val_loss': val_loss_mean, 'log': tensorboard_logs} + + Args: + decoding: An instance of CTCDecoding, RNNTDecoding, or MultiTaskDecoding. + tokenize: Desired tokenizer for BLEU evaluation. (Depending on language, this will drastically affect BLEU score.) + n_gram: Maximum number of n_grams to compute BLEU values over. Max: 4. + lowercase: Whether to lowercase all inputs. + weights: List of float values to weight each n_gram score. + log_prediction: Whether to log a single decoded sample per call. + batch_dim_index: Index corresponding to batch dimension. (For RNNT.) + dist_dync_on_step: Whether to perform reduction on forward pass of metric. + + Returns: + res: a tuple of 3 zero dimensional float32 ``torch.Tensor` objects: a WER score, a sum of Levenstein's + distances for all prediction - reference pairs, total number of words in all references. + """ + + full_state_update: bool = True + + def __init__( + self, + decoding: Union[AbstractCTCDecoding, AbstractRNNTDecoding, AbstractMultiTaskDecoding], + tokenize: Literal["none", "13a", "zh", "intl", "char"] = "13a", + n_gram: int = 4, + lowercase: bool = False, + weights: Optional[Sequence[float]] = None, + smooth: bool = False, + log_prediction=True, + batch_dim_index=0, + dist_sync_on_step=False, + ): + super().__init__( + tokenize=tokenize, + n_gram=n_gram, + lowercase=lowercase, + weights=weights, + smooth=smooth, + dist_sync_on_step=dist_sync_on_step, + ) + self.has_spl_tokens = False + self.decoding = decoding + self.decode = None + if isinstance(self.decoding, AbstractRNNTDecoding): + self.decode = lambda predictions, predictions_lengths, predictions_mask, input_ids, targets: self.decoding.rnnt_decoder_predictions_tensor( + encoder_output=predictions, encoded_lengths=predictions_lengths + ) + elif isinstance(self.decoding, AbstractCTCDecoding): + self.decode = lambda predictions, predictions_lengths, predictions_mask, input_ids, targets: self.decoding.ctc_decoder_predictions_tensor( + decoder_outputs=predictions, + decoder_lengths=predictions_lengths, + fold_consecutive=self.fold_consecutive, + ) + elif isinstance(self.decoding, AbstractMultiTaskDecoding): + self.has_spl_tokens = True + self.decode = lambda predictions, prediction_lengths, predictions_mask, input_ids, targets: self.decoding.decode_predictions_tensor( + encoder_hidden_states=predictions, + encoder_input_mask=predictions_mask, + decoder_input_ids=input_ids, + return_hypotheses=False, + ) + else: + raise TypeError(f"WER metric does not support decoding of type {type(self.decoding)}") + + self.tokenize = tokenize + self.log_prediction = log_prediction + self.batch_dim_index = batch_dim_index + + def update( + self, + predictions: torch.Tensor, + predictions_lengths: torch.Tensor, + targets: torch.Tensor, + targets_lengths: torch.Tensor, + predictions_mask: Optional[torch.Tensor] = None, + input_ids: Optional[torch.Tensor] = None, + ): + """ + Updates metric state. + Args: + predictions: an integer torch.Tensor of shape ``[Batch, Time, {Vocabulary}]`` (if ``batch_dim_index == 0``) or + ``[Time, Batch]`` (if ``batch_dim_index == 1``) + predictions_lengths: an integer torch.Tensor of shape ``[Batch]`` + targets: an integer torch.Tensor of shape ``[Batch, Time]`` (if ``batch_dim_index == 0``) or + ``[Time, Batch]`` (if ``batch_dim_index == 1``) + target_lengths: an integer torch.Tensor of shape ``[Batch]`` + predictions_mask: a bool torch.Tensor of shape ``[Batch, Time]`` (if ``batch_dim_index == 0``) or + ``[Time, Batch]`` (if ``batch_dim_index == 1``). Required for MultiTaskDecoding. + input_ids: an int torch.Tensor of shape ``[Batch, Time]`` (if ``batch_dim_index == 0``) or + ``[Time, Batch]`` (if ``batch_dim_index == 1``). Required for MultiTaskDecoding. + """ + references = [] + with torch.no_grad(): + tgt_lenths_cpu_tensor = targets_lengths.long().cpu() + targets_cpu_tensor = targets.long().cpu() + # check batch_dim_index is first dim + if self.batch_dim_index != 0: + targets_cpu_tensor = move_dimension_to_the_front(targets_cpu_tensor, self.batch_dim_index) + # iterate over batch + for ind in range(targets_cpu_tensor.shape[0]): + tgt_len = tgt_lenths_cpu_tensor[ind].item() + target = targets_cpu_tensor[ind][:tgt_len].numpy().tolist() + reference = self.decoding.decode_tokens_to_str(target) + references.append(reference) + hypotheses, _ = self.decode(predictions, predictions_lengths, predictions_mask, input_ids, targets) + + if self.has_spl_tokens: + hypotheses = [self.decoding.strip_special_tokens(hyp) for hyp in hypotheses] + references = [self.decoding.strip_special_tokens(ref) for ref in references] + + if self.log_prediction: + logging.info(f"\n") + logging.info(f"reference:{references[0]}") + logging.info(f"predicted:{hypotheses[0]}") + + super().update(hypotheses, [references]) # Note: [references] since BLEU allows multiple references. + + def compute(self, return_all_metrics=True, prefix="", suffix=""): + """ + Returns BLEU values and component metrics. + + Args: + return_all_metrics: bool flag. On True, BLEU and composite metrics returned. If False, returns + only BLEU. Default: True. + prefix: str to prepend to metric value keys. + suffix: str to append to metric value keys. + + Returns: + Dict: key-value pairs of BLEU metrics and values. Keys are prepended and appended with prefix + and suffix flags, respectively. + """ + bleu = super().compute() + if return_all_metrics: + return { + f"{prefix}bleu{suffix}": bleu, + f"{prefix}bleu_pred_len{suffix}": self.preds_len.detach().float(), + f"{prefix}bleu_target_len{suffix}": self.target_len.detach().float(), + f"{prefix}bleu_num{suffix}": self.numerator.detach().float(), + f"{prefix}bleu_denom{suffix}": self.denominator.detach().float(), + } + return { + f"{prefix}bleu{suffix}": bleu, + } + + # Adding wrapper to avoid imports and extra variables over the namespace + def _compute_bleu( + self, predictions_lengths, targets_lengths, numerator, denominator, + ): + return _bleu_score_compute( + predictions_lengths, targets_lengths, numerator, denominator, self.n_gram, self.weights, self.smooth + ) diff --git a/nemo/collections/asr/metrics/wer.py b/nemo/collections/asr/metrics/wer.py index 28a5a5d73d197..1cb4cf06eaca8 100644 --- a/nemo/collections/asr/metrics/wer.py +++ b/nemo/collections/asr/metrics/wer.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Tuple, Union +from typing import List, Optional, Tuple, Union import editdistance import jiwer @@ -20,6 +20,7 @@ from torchmetrics import Metric from nemo.collections.asr.parts.submodules.ctc_decoding import AbstractCTCDecoding +from nemo.collections.asr.parts.submodules.multitask_decoding import AbstractMultiTaskDecoding from nemo.collections.asr.parts.submodules.rnnt_decoding import AbstractRNNTDecoding from nemo.utils import logging @@ -247,7 +248,7 @@ def on_validation_epoch_end(self): def __init__( self, - decoding: Union[AbstractCTCDecoding, AbstractRNNTDecoding], + decoding: Union[AbstractCTCDecoding, AbstractRNNTDecoding, AbstractMultiTaskDecoding], use_cer=False, log_prediction=True, fold_consecutive=True, @@ -262,17 +263,26 @@ def __init__( self.fold_consecutive = fold_consecutive self.batch_dim_index = batch_dim_index + self.has_spl_tokens = False self.decode = None if isinstance(self.decoding, AbstractRNNTDecoding): - self.decode = lambda predictions, predictions_lengths: self.decoding.rnnt_decoder_predictions_tensor( + self.decode = lambda predictions, predictions_lengths, predictions_mask, input_ids, targets: self.decoding.rnnt_decoder_predictions_tensor( encoder_output=predictions, encoded_lengths=predictions_lengths ) elif isinstance(self.decoding, AbstractCTCDecoding): - self.decode = lambda predictions, predictions_lengths: self.decoding.ctc_decoder_predictions_tensor( + self.decode = lambda predictions, predictions_lengths, predictions_mask, input_ids, targets: self.decoding.ctc_decoder_predictions_tensor( decoder_outputs=predictions, decoder_lengths=predictions_lengths, fold_consecutive=self.fold_consecutive, ) + elif isinstance(self.decoding, AbstractMultiTaskDecoding): + self.has_spl_tokens = True + self.decode = lambda predictions, prediction_lengths, predictions_mask, input_ids, targets: self.decoding.decode_predictions_tensor( + encoder_hidden_states=predictions, + encoder_input_mask=predictions_mask, + decoder_input_ids=input_ids, + return_hypotheses=False, + ) else: raise TypeError(f"WER metric does not support decoding of type {type(self.decoding)}") @@ -285,6 +295,8 @@ def update( predictions_lengths: torch.Tensor, targets: torch.Tensor, targets_lengths: torch.Tensor, + predictions_mask: Optional[torch.Tensor] = None, + input_ids: Optional[torch.Tensor] = None, ): """ Updates metric state. @@ -312,7 +324,11 @@ def update( target = targets_cpu_tensor[ind][:tgt_len].numpy().tolist() reference = self.decoding.decode_tokens_to_str(target) references.append(reference) - hypotheses, _ = self.decode(predictions, predictions_lengths) + hypotheses, _ = self.decode(predictions, predictions_lengths, predictions_mask, input_ids, targets) + + if self.has_spl_tokens: + hypotheses = [self.decoding.strip_special_tokens(hyp) for hyp in hypotheses] + references = [self.decoding.strip_special_tokens(ref) for ref in references] if self.log_prediction: logging.info(f"\n") diff --git a/nemo/collections/asr/models/aed_multitask_models.py b/nemo/collections/asr/models/aed_multitask_models.py index d0cd40339b42c..73c0b818bc751 100644 --- a/nemo/collections/asr/models/aed_multitask_models.py +++ b/nemo/collections/asr/models/aed_multitask_models.py @@ -12,27 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy -import itertools import os -import tempfile -from dataclasses import dataclass, field +from dataclasses import dataclass from math import ceil from typing import Any, Dict, List, Optional, Union -import editdistance import numpy as np import torch -import torch.distributed as dist -from omegaconf import DictConfig, ListConfig, OmegaConf, open_dict +from omegaconf import DictConfig, OmegaConf, open_dict from pytorch_lightning import Trainer -from torchmetrics.text import SacreBLEUScore -from tqdm.auto import tqdm from nemo.collections.asr.data.audio_to_text_lhotse_prompted import ( PromptedAudioToTextLhotseDataset, get_prompt_format_fn, ) +from nemo.collections.asr.metrics import BLEU, WER from nemo.collections.asr.models.asr_model import ASRModel, ExportableEncDecModel from nemo.collections.asr.parts.mixins import ASRBPEMixin, ASRTranscriptionMixin from nemo.collections.asr.parts.mixins.transcription import ( @@ -72,6 +66,23 @@ def lens_to_mask(lens, max_length): return mask +def _config_check(cfg): + if 'tokenizer' not in cfg: + raise ValueError("`cfg` must have `tokenizer` config to create a tokenizer !") + # Assert config has "prompt_format" + if "prompt_format" not in cfg: + raise ValueError("`cfg` must have `prompt_format` config to create a multi task model !") + # Assert config has `model_defaults` + if 'model_defaults' not in cfg: + raise ValueError("`cfg` must have `model_defaults` config to create a model !") + if "asr_enc_hidden" not in cfg.model_defaults: + raise ValueError("`cfg.model_defaults` must have `asr_enc_hidden` key !") + if "lm_enc_hidden" not in cfg.model_defaults: + raise ValueError("`cfg.model_defaults` must have `lm_enc_hidden` key !") + if "lm_dec_hidden" not in cfg.model_defaults: + raise ValueError("`cfg.model_defaults` must have `lm_dec_hidden` key !") + + @dataclass class MultiTaskTranscriptionInternalConfig(InternalTranscribeConfig): """ @@ -104,40 +115,19 @@ def __init__(self, cfg: DictConfig, trainer: Trainer = None): # Convert to Hydra 1.0 compatible DictConfig cfg = model_utils.convert_model_config_to_dict_config(cfg) cfg = model_utils.maybe_update_config_version(cfg) + _config_check(cfg) - if 'tokenizer' not in cfg: - raise ValueError("`cfg` must have `tokenizer` config to create a tokenizer !") - - # Setup the tokenizer - self._setup_tokenizer(cfg.tokenizer) - - # Assert config has "prompt_format" - if "prompt_format" not in cfg: - raise ValueError("`cfg` must have `prompt_format` config to create a multi task model !") self.prompt_format = cfg.prompt_format - - if "sample_rate" not in cfg: - raise ValueError("`cfg` must have `sample_rate` config to create a multi task model !") self.sample_rate = cfg.sample_rate + self._setup_tokenizer(cfg.tokenizer) super().__init__(cfg=cfg, trainer=trainer) # Setup audio preprocessor self.preprocessor = EncDecMultiTaskModel.from_config_dict(self.cfg.preprocessor) - # Setup audio encoder self.encoder = EncDecMultiTaskModel.from_config_dict(self.cfg.encoder) - # Assert config has `model_defaults` - if 'model_defaults' not in self.cfg: - raise ValueError("`cfg` must have `model_defaults` config to create a model !") - if "asr_enc_hidden" not in self.cfg.model_defaults: - raise ValueError("`cfg.model_defaults` must have `asr_enc_hidden` key !") - if "lm_enc_hidden" not in self.cfg.model_defaults: - raise ValueError("`cfg.model_defaults` must have `lm_enc_hidden` key !") - if "lm_dec_hidden" not in self.cfg.model_defaults: - raise ValueError("`cfg.model_defaults` must have `lm_dec_hidden` key !") - # Add projection layer if encoder and decoder differ in hidden size asr_enc_hidden_size = self.cfg.model_defaults.asr_enc_hidden decoder_hidden_size = self.cfg.model_defaults.lm_dec_hidden @@ -215,6 +205,12 @@ def __init__(self, cfg: DictConfig, trainer: Trainer = None): self.val_loss = GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True) + # TODO: PytorchMetrics lets you join two metrics together to save compute. But need to make wer and bleu have same outputs first + self.wer = WER(self.decoding, log_prediction=self.cfg.get("log_prediction")) + self.bleu = BLEU( + self.decoding, tokenize=self.cfg.get('bleu_tokenizer', "13a"), log_prediction=False + ) # Wer is handling logging + def change_decoding_strategy(self, decoding_cfg: DictConfig): """ Changes decoding strategy used during Multi Task decoding process. @@ -527,6 +523,8 @@ def input_types(self) -> Optional[Dict[str, NeuralType]]: "processed_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True), "transcript": NeuralType(('B', 'T'), LabelsType(), optional=True), "transcript_length": NeuralType(tuple('B'), LengthsType(), optional=True), + "prompt": NeuralType(('B', 'T'), LabelsType(), optional=True), + "prompt_length": NeuralType(tuple('B'), LengthsType(), optional=True), "sample_id": NeuralType(tuple('B'), LengthsType(), optional=True), } @@ -603,24 +601,14 @@ def forward( return transf_log_probs, encoded_len, enc_states, enc_mask - def compute_loss( - self, batch: tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor] | None - ) -> torch.Tensor: - """ - Run forward pass through the model and compute the loss. - - Args: - batch: a tuple of 4 tensors (signal, signal_len, tokens, tokens_len) as returned - by :class:`~nemo.collections.asr.data.audio_to_text_lhotse_prompted.PromptedAudioToTextLhotseDataset`. - When batch is ``None``, we'll return a zero tensor. - Returns: - The computed loss value as a single-element tensor. - """ + # PTL-specific methods + def training_step(self, batch, batch_nb): if batch is None: return torch.tensor([0.0]) - signal, signal_len, transcript, transcript_len = batch + # During training prompt and prompt_len are null, ignore. + signal, signal_len, transcript, transcript_len, prompt, prompt_len = batch input_ids, labels = transcript[:, :-1], transcript[:, 1:] transf_log_probs, encoded_len, enc_states, enc_mask = self.forward( @@ -630,14 +618,7 @@ def compute_loss( transcript_length=transcript_len, ) - transf_loss = self.loss(log_probs=transf_log_probs, labels=labels) - - return transf_loss - - # PTL-specific methods - def training_step(self, batch, batch_nb): - - audio_loss = self.compute_loss(batch) + audio_loss = self.loss(log_probs=transf_log_probs, labels=labels) tensorboard_logs = { 'train_loss': audio_loss, @@ -646,8 +627,9 @@ def training_step(self, batch, batch_nb): return {'loss': audio_loss, 'log': tensorboard_logs} - def validation_step(self, batch, batch_idx, dataloader_idx=0, eval_mode="val"): - signal, signal_len, transcript, transcript_len = batch + def validation_pass(self, batch, batch_idx, dataloader_idx=0, eval_mode="val"): + # During inference, dataloader passes pure prompt without transcript text. + signal, signal_len, transcript, transcript_len, prompt, prompt_len = batch input_ids, labels = transcript[:, :-1], transcript[:, 1:] transf_log_probs, encoded_len, enc_states, enc_mask = self.forward( @@ -657,95 +639,53 @@ def validation_step(self, batch, batch_idx, dataloader_idx=0, eval_mode="val"): transcript_length=transcript_len, ) - beam_hypotheses = self.decoding.decode_predictions_tensor( - encoder_hidden_states=enc_states, - encoder_input_mask=enc_mask, - decoder_input_ids=input_ids, - return_hypotheses=False, - )[0] - transf_loss = self.loss(log_probs=transf_log_probs, labels=labels) - - ground_truths = [self.tokenizer.ids_to_text(sent) for sent in transcript.detach().cpu().tolist()] - translations = [hyp for hyp in beam_hypotheses] - self.val_loss(loss=transf_loss, num_measurements=transf_log_probs.shape[0] * transf_log_probs.shape[1]) - output_dict = { f'{eval_mode}_loss': transf_loss, - 'translations': [self.decoding.strip_special_tokens(t) for t in translations], - 'ground_truths': [self.decoding.strip_special_tokens(g) for g in ground_truths], } - if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1: - self.validation_step_outputs[dataloader_idx].append(output_dict) - else: - self.validation_step_outputs.append(output_dict) + self.wer.update( + predictions=enc_states, + predictions_lengths=encoded_len, + targets=transcript, + targets_lengths=transcript_len, + predictions_mask=enc_mask, + input_ids=prompt, + ) + wer, wer_num, wer_denom = self.wer.compute() + output_dict.update({"val_wer": wer, "val_wer_num": wer_num, "val_wer_denom": wer_denom}) + self.wer.reset() + + self.bleu.update( + predictions=enc_states, + predictions_lengths=encoded_len, + targets=transcript, + targets_lengths=transcript_len, + predictions_mask=enc_mask, + input_ids=prompt, + ) + bleu_metrics = self.bleu.compute(prefix=f"{eval_mode}_") + output_dict.update(bleu_metrics) + self.bleu.reset() return output_dict - def test_step(self, batch, batch_idx, dataloader_idx=0): - return self.validation_step(batch, batch_idx, dataloader_idx, eval_mode="test") - - def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0, eval_mode: str = "val"): - """ - Called at the end of validation to aggregate outputs. - :param outputs: list of individual outputs of each validation step. - """ - if not outputs: - return - - if isinstance(outputs[0], dict): - outputs = [outputs] - - for output in outputs: - eval_loss = getattr(self, 'val_loss').compute() - translations = list(itertools.chain(*[x['translations'] for x in output])) - ground_truths = list(itertools.chain(*[x['ground_truths'] for x in output])) - - # Gather translations and ground truths from all workers - tr_and_gt = [None for _ in range(self.world_size)] - # we also need to drop pairs where ground truth is an empty string - if self.world_size > 1: - dist.all_gather_object( - tr_and_gt, [(t, g) for (t, g) in zip(translations, ground_truths) if g.strip() != ''] - ) - else: - tr_and_gt[0] = [(t, g) for (t, g) in zip(translations, ground_truths) if g.strip() != ''] - - if self.global_rank == 0: - _translations = [] - _ground_truths = [] - for rank in range(0, self.world_size): - _translations += [t for (t, g) in tr_and_gt[rank]] - _ground_truths += [g for (t, g) in tr_and_gt[rank]] - - sacre_bleu = SacreBLEUScore()(_translations, [[x] for x in _ground_truths]).item() - sb_score = sacre_bleu * self.world_size - - wer_scores, wer_words = 0, 0 - for h, r in zip(_translations, _ground_truths): - wer_words += len(r.split()) - wer_scores += editdistance.eval(h.split(), r.split()) - wer_score = 1.0 * wer_scores * self.world_size / wer_words - - else: - sb_score = 0.0 - wer_score = 0.0 - - # logging here only. - dataloader_prefix = self.get_validation_dataloader_prefix(dataloader_idx) - self.log(f"{dataloader_prefix}{eval_mode}_loss", eval_loss, sync_dist=True) - self.log(f"{dataloader_prefix}{eval_mode}_sacreBLEU", sb_score, sync_dist=True) - self.log(f"{dataloader_prefix}{eval_mode}_WER", wer_score, sync_dist=True) - - # in multi-validation case, anything after first one will become NaN - # as we are resetting the metric here. - # TODO: fix this, (not sure which hook will be ideal for this) - self.val_loss.reset() + def validation_step(self, batch, batch_idx, dataloader_idx=0): + metrics = self.validation_pass(batch, batch_idx, dataloader_idx, eval_mode="val") + if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1: + self.validation_step_outputs[dataloader_idx].append(metrics) + else: + self.validation_step_outputs.append(metrics) + return metrics - def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0): - return self.multi_validation_epoch_end(outputs, dataloader_idx, eval_mode="test") + def test_step(self, batch, batch_idx, dataloader_idx=0): + metrics = self.validation_pass(batch, batch_idx, dataloader_idx, eval_mode="test") + if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1: + self.validation_step_outputs[dataloader_idx].append(metrics) + else: + self.validation_step_outputs.append(metrics) + return metrics def test_dataloader(self): if self._test_dl is not None: @@ -837,7 +777,7 @@ def _transcribe_forward(self, batch: Any, trcfg: MultiTaskTranscriptionConfig): log_probs, encoded_len, enc_states, enc_mask = self.forward( input_signal=batch[0], input_signal_length=batch[1] ) - decoder_input_ids = batch[2].to(trcfg._internal.device) + decoder_input_ids = batch[-2].to(trcfg._internal.device) output = dict( log_probs=log_probs, encoded_lengths=encoded_len, @@ -991,7 +931,7 @@ def get_transcribe_config(cls) -> MultiTaskTranscriptionConfig: return MultiTaskTranscriptionConfig() def predict_step(self, batch, batch_idx=0, dataloader_idx=0, has_processed_signal=False): - signal, signal_len, transcript, transcript_len = batch + signal, signal_len, _, _, prompt, prompt_len = batch processed_signal = None processed_signal_length = None @@ -1006,14 +946,14 @@ def predict_step(self, batch, batch_idx=0, dataloader_idx=0, has_processed_signa input_signal_length=signal_len, processed_signal=processed_signal, processed_signal_length=processed_signal_length, - transcript=transcript, - transcript_length=transcript_len, + transcript=prompt, + transcript_length=prompt_len, ) text = self.decoding.decode_predictions_tensor( encoder_hidden_states=enc_states, encoder_input_mask=enc_mask, - decoder_input_ids=transcript, + decoder_input_ids=prompt, return_hypotheses=False, )[0] diff --git a/nemo/collections/asr/models/asr_model.py b/nemo/collections/asr/models/asr_model.py index 00b948ea9b6f0..9242b268b8e8b 100644 --- a/nemo/collections/asr/models/asr_model.py +++ b/nemo/collections/asr/models/asr_model.py @@ -30,18 +30,60 @@ class ASRModel(ModelPT, ABC): def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0): - val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean() - wer_num = torch.stack([x['val_wer_num'] for x in outputs]).sum() - wer_denom = torch.stack([x['val_wer_denom'] for x in outputs]).sum() - tensorboard_logs = {'val_loss': val_loss_mean, 'val_wer': wer_num / wer_denom} - return {'val_loss': val_loss_mean, 'log': tensorboard_logs} + val_loss = {} + tensorboard_logs = {} + + if 'val_loss' in outputs[0]: + val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean() + val_loss = {'val_loss': val_loss_mean} + + tensorboard_logs.update(val_loss) + + if "val_wer_num" in outputs[0]: + wer_num = torch.stack([x['val_wer_num'] for x in outputs]).sum() + wer_denom = torch.stack([x['val_wer_denom'] for x in outputs]).sum() + val_wer = {'val_wer': wer_num / wer_denom} + + tensorboard_logs.update(val_wer) + + if "val_bleu_num" in outputs[0]: + bleu_pred_len = torch.stack([x[f"val_bleu_pred_len"] for x in outputs]).sum() + bleu_target_len = torch.stack([x[f"val_bleu_target_len"] for x in outputs]).sum() + bleu_num = torch.stack([x[f"val_bleu_num"] for x in outputs]).sum(dim=0) + bleu_denom = torch.stack([x[f"val_bleu_denom"] for x in outputs]).sum(dim=0) + val_bleu = {"val_bleu": self.bleu._compute_bleu(bleu_pred_len, bleu_target_len, bleu_num, bleu_denom)} + + tensorboard_logs.update(val_bleu) + + return {**val_loss, 'log': tensorboard_logs} def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0): - val_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean() - wer_num = torch.stack([x['test_wer_num'] for x in outputs]).sum() - wer_denom = torch.stack([x['test_wer_denom'] for x in outputs]).sum() - tensorboard_logs = {'test_loss': val_loss_mean, 'test_wer': wer_num / wer_denom} - return {'test_loss': val_loss_mean, 'log': tensorboard_logs} + val_loss = {} + tensorboard_logs = {} + + if 'test_loss' in outputs[0]: + val_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean() + val_loss = {'test_loss': val_loss_mean} + + tensorboard_logs.update(val_loss) + + if "test_wer_num" in outputs[0]: + wer_num = torch.stack([x['test_wer_num'] for x in outputs]).sum() + wer_denom = torch.stack([x['test_wer_denom'] for x in outputs]).sum() + val_wer = {'test_wer': wer_num / wer_denom} + + tensorboard_logs.update(val_wer) + + if "test_bleu_num" in outputs[0]: + bleu_pred_len = torch.stack([x[f"test_bleu_pred_len"] for x in outputs]).sum() + bleu_target_len = torch.stack([x[f"test_bleu_target_len"] for x in outputs]).sum() + bleu_num = torch.stack([x[f"test_bleu_num"] for x in outputs]).sum() + bleu_denom = torch.stack([x[f"test_bleu_denom"] for x in outputs]).sum() + val_bleu = {"test_bleu": self.wer._compute_bleu(bleu_pred_len, bleu_target_len, bleu_num, bleu_denom)} + + tensorboard_logs.update(val_bleu) + + return {**val_loss, 'log': tensorboard_logs} @classmethod def list_available_models(cls) -> 'List[PretrainedModelInfo]': diff --git a/nemo/collections/common/data/lhotse/dataloader.py b/nemo/collections/common/data/lhotse/dataloader.py index c454cafabe02f..9eeb8800066ab 100644 --- a/nemo/collections/common/data/lhotse/dataloader.py +++ b/nemo/collections/common/data/lhotse/dataloader.py @@ -13,11 +13,10 @@ # limitations under the License. import logging -import random import warnings from dataclasses import dataclass from functools import partial -from typing import Any, Callable, Optional +from typing import Any, Optional import torch from lhotse import CutSet @@ -29,6 +28,8 @@ IterableDatasetWrapper, make_worker_init_fn, ) +from lhotse.lazy import LazyFlattener +from lhotse.utils import fastcopy from omegaconf import DictConfig, OmegaConf from nemo.collections.common.data.lhotse.cutset import read_cutset_from_config @@ -130,6 +131,9 @@ def get_lhotse_dataloader_from_config( # Duration filtering, same as native NeMo dataloaders. cuts = cuts.filter(DurationFilter(config.min_duration, config.max_duration)) + # Expands cuts if multiple translations are provided. + cuts = CutSet(LazyFlattener(cuts.map(_flatten_alt_text))) + # 2. Optional augmentations. # 2.a. Noise mixing. if config.noise_path is not None: @@ -279,3 +283,19 @@ def _normalize_loudness(cuts: CutSet, db_norm: float) -> CutSet: def _merge_supervisions(cuts: CutSet) -> CutSet: return cuts.merge_supervisions() + + +def _flatten_alt_text(cut) -> list: + ans = [cut] + if cut.custom is None or cut.custom.get("alt_text") is None: + return ans + cut = cut.move_to_memory(audio_format="wav") # performs I/O once and holds audio in memory from now on + # Popping to ease eyesight on debug. + paired_text = cut.custom.pop("alt_text") + for data in paired_text.values(): + # Copy to avoid lazy dataloading issues + data = data.copy() + text_instance = cut.map_supervisions(lambda s: fastcopy(s, text=data["text"], language=data["lang"])) + text_instance.custom = {"text": data.pop("text"), "lang": data.pop("lang"), **data} + ans.append(text_instance) + return ans diff --git a/nemo/collections/common/tokenizers/canary_tokenizer.py b/nemo/collections/common/tokenizers/canary_tokenizer.py index b812cdb46dd55..aed95c1f9312e 100644 --- a/nemo/collections/common/tokenizers/canary_tokenizer.py +++ b/nemo/collections/common/tokenizers/canary_tokenizer.py @@ -11,41 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import os from functools import cached_property from pathlib import Path -from typing import Dict +from typing import Dict, List from nemo.collections.common.tokenizers.aggregate_tokenizer import AggregateTokenizer from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer, create_spt_model -__all__ = ['CanaryTokenizer'] - - -LANGUAGES = { - "en": "english", - "de": "german", - "es": "spanish", - "fr": "french", -} +from nemo.utils import logging -TO_LANGUAGE_CODE = { - **{language: code for code, language in LANGUAGES.items()}, -} - -SPECIAL_TOKENS = [ - "", - "<|endoftext|>", - "<|startoftranscript|>", - *[f"<|{lang}|>" for lang in list(LANGUAGES.keys())], - "<|transcribe|>", - "<|translate|>", - "<|nopnc|>", - "<|pnc|>", - "<|nospeech|>", -] +__all__ = ['CanaryTokenizer'] -UNUSED_SPECIAL_TOKENS = [f"<|spltoken{i}|>" for i in range(18)] +# Default tokens for compatibility with Canary. +DEFAULT_TOKENS = ["<|nospeech|>", "", "<|endoftext|>", "<|startoftranscript|>", "<|pnc|>", "<|nopnc|>"] class CanaryTokenizer(AggregateTokenizer): @@ -57,11 +36,11 @@ def __init__(self, tokenizers: Dict): super().__init__(tokenizers) # for easy access of special tokens - special_tokens: Dict[str, int] = {} - for special in SPECIAL_TOKENS: - special_tokens[special] = self.token_to_id(special, lang_id='spl_tokens') - - self.special_tokens = special_tokens + self.special_tokens = {} + for special in tokenizers['spl_tokens'].vocab: + # Search for special prompting tokens + if (special.startswith("<|") and special.endswith("|>")) or special == "": + self.special_tokens[special] = self.token_to_id(special, lang_id='spl_tokens') @cached_property def eos_id(self) -> int: @@ -71,22 +50,6 @@ def eos_id(self) -> int: def bos_id(self) -> int: return self.special_tokens["<|startoftranscript|>"] - @cached_property - def transcribe_id(self) -> int: - return self.special_tokens["<|transcribe|>"] - - @cached_property - def translate_id(self) -> int: - return self.special_tokens["<|translate|>"] - - @cached_property - def nopnc_id(self) -> int: - return self.special_tokens["<|nopnc|>"] - - @cached_property - def pnc_id(self) -> int: - return self.special_tokens["<|pnc|>"] - @cached_property def nospeech_id(self) -> int: return self.special_tokens["<|nospeech|>"] @@ -95,27 +58,35 @@ def nospeech_id(self) -> int: def pad_id(self) -> int: return self.special_tokens[""] - def to_language_id(self, language): - if token_id := self.special_tokens.get(f"<|{language}|>", None): + def spl_token_to_id(self, token): + if token_id := self.special_tokens.get(f"<|{token}|>", None): return token_id - - raise KeyError(f"Language {language} not found in tokenizer.") + raise KeyError(f"Token {token} not found in tokenizer.") @staticmethod - def build_special_tokenizer(output_dir: str | Path) -> SentencePieceTokenizer: - output_dir = Path(output_dir) + def build_special_tokenizer( + tokens: List[str], model_dir: str | Path, force_rebuild: bool = False + ) -> SentencePieceTokenizer: + if force_rebuild: + logging.info("Building special tokenizer") + # Checks for artifacts of previous build. + for file in ["tokenizer.model", "tokenizer.vocab", "vocab.txt", "train_text.txt"]: + if os.path.exists(file): + os.remove(file) + tokens = DEFAULT_TOKENS + [f"<|{t}|>" for t in tokens] + output_dir = Path(model_dir) output_dir.mkdir(exist_ok=True, parents=True) text_path = output_dir / "train_text.txt" - all_tokens = SPECIAL_TOKENS + UNUSED_SPECIAL_TOKENS - train_text = "\n".join(all_tokens) + train_text = "\n".join(tokens) text_path.write_text(train_text) model_path = output_dir / "tokenizer.model" create_spt_model( str(text_path), - vocab_size=32, + vocab_size=len(tokens) + 2, sample_size=-1, do_lower_case=False, output_dir=str(output_dir), - user_defined_symbols=all_tokens, + user_defined_symbols=tokens, ) - return SentencePieceTokenizer(str(model_path)) + spl_tokenizer = SentencePieceTokenizer(str(model_path)) + return spl_tokenizer diff --git a/tests/collections/asr/test_asr_multitask_model_bpe.py b/tests/collections/asr/test_asr_multitask_model_bpe.py index 81ad6589cb5da..d250fbcf74a14 100644 --- a/tests/collections/asr/test_asr_multitask_model_bpe.py +++ b/tests/collections/asr/test_asr_multitask_model_bpe.py @@ -22,6 +22,7 @@ from nemo.collections.asr.models.aed_multitask_models import EncDecMultiTaskModel from nemo.collections.asr.parts.submodules import multitask_beam_decoding as beam_decode from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis +from nemo.collections.common.tokenizers import CanaryTokenizer @pytest.fixture() @@ -307,3 +308,22 @@ def test_transcribe_tensor(self, asr_model, test_data_dir): outputs = asr_model.transcribe(audio, batch_size=1) # assert len(outputs) == 1 # assert isinstance(outputs[0], str) + + @pytest.mark.unit + def test_build_tokenizer(self, asr_model, test_data_dir): + # Load audio file + task_tokens = ["ast", "asr"] + lang_tokens = ["en", "es", "de", "fr"] + tokens = task_tokens + lang_tokens + spl_tokenizer_from_build = CanaryTokenizer.build_special_tokenizer(tokens, test_data_dir) + + tokenizer_cfg = {'dir': os.path.join(test_data_dir), 'type': 'bpe'} + spl_tokenizer_from_load = asr_model._make_tokenizer(tokenizer_cfg, "spl_tokens")[0] + + tokens += ["<|nospeech|>", "", "<|endoftext|>", "<|startoftranscript|>", "<|pnc|>", "<|nopnc|>"] + + ids1 = [spl_tokenizer_from_build.tokens_to_ids(t)[0] for t in tokens] + ids2 = [spl_tokenizer_from_load.tokens_to_ids(t)[0] for t in tokens] + + for i, j in zip(ids1, ids2): + assert i == j diff --git a/tests/collections/asr/test_custom_tokenizer.py b/tests/collections/asr/test_custom_tokenizer.py index 79cb6255fb31a..5a033045b7099 100644 --- a/tests/collections/asr/test_custom_tokenizer.py +++ b/tests/collections/asr/test_custom_tokenizer.py @@ -19,15 +19,16 @@ from omegaconf import OmegaConf from nemo.collections.asr.parts.mixins import ASRBPEMixin -from nemo.collections.common.tokenizers.canary_tokenizer import SPECIAL_TOKENS, UNUSED_SPECIAL_TOKENS, CanaryTokenizer +from nemo.collections.common.tokenizers.canary_tokenizer import DEFAULT_TOKENS, CanaryTokenizer from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer, create_spt_model from nemo.core import Serialization @pytest.fixture(scope="session") def special_tokenizer_path(tmp_path_factory) -> str: + tokens = ["asr", "ast", "en", "de", "fr", "es"] tmpdir = tmp_path_factory.mktemp("spl_tokens") - CanaryTokenizer.build_special_tokenizer(tmpdir) + CanaryTokenizer.build_special_tokenizer(tokens, tmpdir) return str(tmpdir) @@ -41,11 +42,14 @@ def lang_tokenizer_path(tmp_path_factory) -> str: def test_canary_tokenizer_build_special_tokenizer(tmp_path): - tokenizer = CanaryTokenizer.build_special_tokenizer(tmp_path) - expected_tokens = [""] + SPECIAL_TOKENS + UNUSED_SPECIAL_TOKENS + ["▁"] + tokens = ["asr", "ast", "en", "de", "fr", "es"] + tokenizer = CanaryTokenizer.build_special_tokenizer(tokens, tmp_path) + expected_tokens = DEFAULT_TOKENS + [f"<|{t}|>" for t in tokens] + ["▁", ""] tokens = [] for i in range(tokenizer.tokenizer.vocab_size()): tokens.append(tokenizer.tokenizer.IdToPiece(i)) + expected_tokens.sort(), tokens.sort() + print(expected_tokens, tokens) assert expected_tokens == tokens @@ -74,10 +78,10 @@ class DummyModel(ASRBPEMixin, Serialization): assert set(tokenizer.tokenizers_dict.keys()) == {"spl_tokens", "en"} assert isinstance(tokenizer.tokenizers_dict["spl_tokens"], SentencePieceTokenizer) - assert tokenizer.tokenizers_dict["spl_tokens"].vocab_size == 32 + assert tokenizer.tokenizers_dict["spl_tokens"].vocab_size == 14 assert isinstance(tokenizer.tokenizers_dict["en"], SentencePieceTokenizer) assert tokenizer.tokenizers_dict["en"].vocab_size == 6 - assert tokenizer.text_to_ids("<|startoftranscript|>", lang_id="spl_tokens") == [31, 3] # "_" comes first - assert tokenizer.text_to_ids("a", lang_id="en") == [32 + 1, 32 + 2] + assert tokenizer.text_to_ids("<|startoftranscript|>", lang_id="spl_tokens") == [13, 4] # "_" comes first + assert tokenizer.text_to_ids("a", lang_id="en") == [14 + 1, 14 + 2] From 5b077c43c565a70196583f167ada7478ff26c066 Mon Sep 17 00:00:00 2001 From: Adi Renduchintala Date: Sun, 25 Feb 2024 01:47:51 -0800 Subject: [PATCH 10/12] add alpha scaling to lora (#8248) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * removed pdeprecated eft model Signed-off-by: arendu * add alpha Signed-off-by: arendu * default for alpha Signed-off-by: arendu * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add alpha scaling to lora (#8483) * coldfix (#8412) Signed-off-by: George Zelenfroynd Signed-off-by: Michal Futrega * Fixed errors in the CTM gen functions (#8416) (#8420) Signed-off-by: Taejin Park Co-authored-by: Taejin Park Signed-off-by: Michal Futrega * Add change_vocabulary and save_tokenizers() support to Multitask ASR models (#8357) (#8367) * Add change_vocabulary and save_tokenizers() support * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update nemo/collections/asr/models/aed_multitask_models.py --------- Signed-off-by: smajumdar Signed-off-by: Somshubra Majumdar Co-authored-by: Somshubra Majumdar Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Piotr Żelasko Signed-off-by: Michal Futrega * fix path location and branch (#8314) * fix path location and branch (#8304) * fix path location and branch Signed-off-by: Nithin Rao Koluguri * change to a floating point number Signed-off-by: Nithin Rao Koluguri --------- Signed-off-by: Nithin Rao Koluguri Co-authored-by: Nithin Rao Koluguri Co-authored-by: Somshubra Majumdar * updat ebranch in tutorial Signed-off-by: Nithin Rao Koluguri --------- Signed-off-by: Nithin Rao Koluguri Co-authored-by: Nithin Rao Co-authored-by: Somshubra Majumdar Co-authored-by: Nithin Rao Koluguri Signed-off-by: Michal Futrega * Add TP comm overlap knobs to AutocastTransformerLayer (#8290) Signed-off-by: Jaemin Choi Co-authored-by: Jaemin Choi Signed-off-by: Michal Futrega * add deallocate pipeline output optimization (#8279) (#8318) * add deallocate pipeline output optimization * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Jimmy Zhang Co-authored-by: JimmyZhang12 <67203904+JimmyZhang12@users.noreply.github.com> Co-authored-by: Jimmy Zhang Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Signed-off-by: Michal Futrega * remove assertion (#8302) (#8321) Signed-off-by: dimapihtar Co-authored-by: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> Signed-off-by: Michal Futrega * Keep max_seqlen and cu_seqlens_argmin for later micro-batches when PP>1 (#8334) (#8346) Signed-off-by: Sangkug Lym Co-authored-by: Sangkug Lym Co-authored-by: Eric Harper Signed-off-by: Michal Futrega * Enable megatron core loggers for GPT pretraining (#8354) (#8384) * Logging changes tested for gpt_pretraining * Additional args * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Aishwarya Bhandare Co-authored-by: ashbhandare Co-authored-by: Aishwarya Bhandare Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Eric Harper Signed-off-by: Michal Futrega * Fix dreambooth data sampler issue (#8400) (#8413) * Turn on drop last * Some neva fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: yaoyu-33 Co-authored-by: yaoyu-33 <54727607+yaoyu-33@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Signed-off-by: Michal Futrega * add ensemble decoding fix (#8427) (#8433) Signed-off-by: Nithin Rao Koluguri Co-authored-by: Nithin Rao Signed-off-by: Michal Futrega * NeVA Tutorial Notebook (#8217) * init commit - neva tutorial Signed-off-by: Pratyush Muthukumar * NeVA tutorial notebook Signed-off-by: Pratyush Muthukumar * init commit - neva tutorial Signed-off-by: Pratyush Muthukumar Signed-off-by: Pratyush Muthukumar Signed-off-by: Pratyush Muthukumar * NeVA tutorial notebook Signed-off-by: Pratyush Muthukumar Signed-off-by: Pratyush Muthukumar Signed-off-by: Pratyush Muthukumar * requested changes Signed-off-by: Pratyush Muthukumar Signed-off-by: Pratyush Muthukumar * add inference via script Signed-off-by: Pratyush Muthukumar * requested changes Signed-off-by: Pratyush Muthukumar * requested changes Signed-off-by: Pratyush Muthukumar * add codeblocks to run torchrun in notebook Signed-off-by: Pratyush Muthukumar --------- Signed-off-by: Pratyush Muthukumar Signed-off-by: Pratyush Muthukumar Co-authored-by: Pratyush Muthukumar Signed-off-by: Michal Futrega * mcore customization doc minor fix (#8421) (#8437) Signed-off-by: Huiying Li Co-authored-by: Huiying Signed-off-by: Michal Futrega * Add `loop_labels` algorithm for TDT greedy decoding (#8215) * Add `loop_labels` algorithm for TDT greedy decoding Signed-off-by: Vladimir Bataev * Use `loop_labels` by default Signed-off-by: Vladimir Bataev * Loop labels greedy decoding v2 Signed-off-by: Vladimir Bataev * Add comments. Clean up Signed-off-by: Vladimir Bataev * Add comments Signed-off-by: Vladimir Bataev * Add comments Signed-off-by: Vladimir Bataev * Add tests for batched hypotheses Signed-off-by: Vladimir Bataev * Add tests for batched alignments Signed-off-by: Vladimir Bataev * Add comments Signed-off-by: Vladimir Bataev * Fix comment Signed-off-by: Vladimir Bataev * Fix test Signed-off-by: Vladimir Bataev * Add computer for TDT Signed-off-by: Vladimir Bataev * Fix TDT decoding algorithm Signed-off-by: Vladimir Bataev * Use loop frames by default for TDT Signed-off-by: Vladimir Bataev * Remove "loop frames" implementation for TDT Signed-off-by: Vladimir Bataev * Clean up Signed-off-by: Vladimir Bataev * Add comments Signed-off-by: Vladimir Bataev * Fix confidence. Use tensor for durations. Signed-off-by: Vladimir Bataev --------- Signed-off-by: Vladimir Bataev Signed-off-by: Michal Futrega * Add dist ckpt support for regular optimizers (#7749) (#8293) * Add dist ckpt support for regular optimizers * [tutorial] fixed missing RIR scripts file. (#8257) * fix imports * imports fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ci imports fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * revert asr notebook * revert asr notebook --------- Signed-off-by: Mikołaj Błaż Signed-off-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Signed-off-by: dimapihtar Co-authored-by: mikolajblaz Co-authored-by: Eric Harper Co-authored-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Co-authored-by: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> Co-authored-by: dimapihtar Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Signed-off-by: Michal Futrega * Multimodal r1.23.0 bug fix (#8315) (#8339) * Rename quick-gelu * ddpm config guard * Fix ddpm edit api * Fix insert_image_token cfg issue * neva updates * reformat * Add back jenkins * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix jenkins * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix bugs * Update default neva template --------- Signed-off-by: yaoyu-33 Co-authored-by: yaoyu-33 <54727607+yaoyu-33@users.noreply.github.com> Co-authored-by: Eric Harper Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Signed-off-by: Michal Futrega * mcore ds fix (#8283) (#8385) * [tutorial] fixed missing RIR scripts file. (#8257) * add values to en tts dict (#7879) * mcore ds fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update mcore * revert asr files * add comments * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add support for mcore mock dataset * update mcore version * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update gpt cfg * update mcore commit * fix Bert unit tests * update bert tests * fix bert mcore test * fix gpt jenkins tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update apex & TE commits * revert apex installation * turn off the fusion for jenkins --------- Signed-off-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Signed-off-by: Mariana Graterol Fuenmayor Signed-off-by: Dmytro Pykhtar Signed-off-by: dimapihtar Co-authored-by: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> Co-authored-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Co-authored-by: Mariana <47233618+mgrafu@users.noreply.github.com> Co-authored-by: Dmytro Pykhtar Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Pablo Garay Co-authored-by: Eric Harper Signed-off-by: Michal Futrega * MCore dataset compatibility for tokenizers (#8390) (#8397) * Add unique_identifiers for all tokenizers and eod for SentencePieceTokenizer * Add generalized token aliases to TokenizerSpec to conform with MegatronTokenizer's interface. Remove now-redundant individual fixes from AutoTokenizer and SentencePieceTokenizer. --------- Signed-off-by: Valerie Sarge Co-authored-by: Valerie Sarge Co-authored-by: Pablo Garay Co-authored-by: Eric Harper Signed-off-by: Michal Futrega * Canary: inference tokenization improvements; preserving custom keys when creating tarred manifests (#8432) * Improvements for Canary: - carry over custom keys when creatin tarred manifests - selectable text field in ASR eval - get rid of prompt slicing, create proper inference prompts Signed-off-by: Piotr Żelasko * set ensure_ascii=False in tarred conversion to avoid breaking tokenizers trained on UTF-8 encoding Signed-off-by: Piotr Żelasko --------- Signed-off-by: Piotr Żelasko Signed-off-by: Michal Futrega * add sbert to IR (#8445) * add sbert to IR Signed-off-by: ataghibakhsh * add doc Signed-off-by: ataghibakhsh * fix the auto_tokenizer property method reset bug Signed-off-by: ataghibakhsh * addressed bot comments Signed-off-by: ataghibakhsh * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: ataghibakhsh Co-authored-by: Eric Harper Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Signed-off-by: Michal Futrega * Update readme (#8440) * update Signed-off-by: eharper * udpate Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * landing pages added * landing page added for vision * landing pages updated * some minor changes to the main readme * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * update Signed-off-by: eharper * typo fixed * update Signed-off-by: eharper --------- Signed-off-by: eharper Co-authored-by: ntajbakhsh Signed-off-by: Michal Futrega * NeMo-Mistral to HF converter bugfix. (#8353) (#8442) Signed-off-by: Alexandros Koumparoulis Co-authored-by: akoumpa <153118171+akoumpa@users.noreply.github.com> Signed-off-by: Michal Futrega * Fixing mcore bert for TP, PP and SP (#8336) (#8443) * Fixing mcore bert for TP, PP and SP * Fixing mcore bert for TP, PP and SP * Fixing mcore version * Fixing mcore version * Update Jenkinsfile * Update Jenkinsfile * Update Jenkinsfile --------- Signed-off-by: Shanmugam Ramasamy <111910568+shanmugamr1992@users.noreply.github.com> Co-authored-by: Shanmugam Ramasamy <111910568+shanmugamr1992@users.noreply.github.com> Co-authored-by: Shanmugam Ramasamy Co-authored-by: Eric Harper Signed-off-by: Michal Futrega * Add LoRA support to all linear layers (#7988) * Added LoRA support for the Dense layer of Attention * Added LoRA MLP support to MCore and NeMo models. * Change LoRA config default to QKV. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed bug with ddp training. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * MCoreMixin chages. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * using new commit of meg-LM Signed-off-by: arendu * add cpu_offloading_num_layers to conversion script until bug in megatron is fixed Signed-off-by: Chen Cui * fix peft mixin arguments to follow mcore 0.5 Signed-off-by: Chen Cui * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update megatron commit to fix ci error Signed-off-by: Chen Cui * try to fix ci Signed-off-by: Chen Cui * try to fix ci Signed-off-by: Chen Cui * add cfg default Signed-off-by: Chen Cui --------- Signed-off-by: Adi Renduchintala Signed-off-by: Jiaqi Zeng Signed-off-by: arendu Signed-off-by: Chen Cui Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Adi Renduchintala Co-authored-by: Jiaqi Zeng Co-authored-by: arendu Co-authored-by: HeyyyyyyG <49757268+HeyyyyyyG@users.noreply.github.com> Co-authored-by: Chen Cui Co-authored-by: Eric Harper Signed-off-by: Michal Futrega * Add Neva Template for NV-DPO Models (#8358) * add/rename from nvgpt to nv_steerlm, add nv_dpo template Signed-off-by: HuiyingLi * add nv_dpo conversation to accomendate empty system message Signed-off-by: HuiyingLi * handle nv_dpo template text generation Signed-off-by: HuiyingLi * add prompt string to nvgpt Signed-off-by: HuiyingLi * bugfix for inference prompt template Signed-off-by: HuiyingLi * bug fix for grabbing clean text Signed-off-by: Huiying Li * fix code format Signed-off-by: Huiying Li --------- Signed-off-by: HuiyingLi Signed-off-by: Huiying Li Signed-off-by: Michal Futrega * Rebase scaling alpha Signed-off-by: Michal Futrega * default for alpha Signed-off-by: arendu Signed-off-by: Michal Futrega * Rebase scaling alpha Signed-off-by: Michal Futrega --------- Signed-off-by: George Zelenfroynd Signed-off-by: Michal Futrega Signed-off-by: Taejin Park Signed-off-by: smajumdar Signed-off-by: Somshubra Majumdar Signed-off-by: Nithin Rao Koluguri Signed-off-by: Jaemin Choi Signed-off-by: Jimmy Zhang Signed-off-by: dimapihtar Signed-off-by: Sangkug Lym Signed-off-by: Aishwarya Bhandare Signed-off-by: yaoyu-33 Signed-off-by: Pratyush Muthukumar Signed-off-by: Pratyush Muthukumar Signed-off-by: Huiying Li Signed-off-by: Vladimir Bataev Signed-off-by: Mikołaj Błaż Signed-off-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Signed-off-by: Mariana Graterol Fuenmayor Signed-off-by: Dmytro Pykhtar Signed-off-by: Valerie Sarge Signed-off-by: Piotr Żelasko Signed-off-by: ataghibakhsh Signed-off-by: eharper Signed-off-by: Alexandros Koumparoulis Signed-off-by: Shanmugam Ramasamy <111910568+shanmugamr1992@users.noreply.github.com> Signed-off-by: Adi Renduchintala Signed-off-by: Jiaqi Zeng Signed-off-by: arendu Signed-off-by: Chen Cui Signed-off-by: HuiyingLi Co-authored-by: George <37293288+Jorjeous@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Taejin Park Co-authored-by: Somshubra Majumdar Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Piotr Żelasko Co-authored-by: Nithin Rao Co-authored-by: Jaemin Choi Co-authored-by: Jaemin Choi Co-authored-by: JimmyZhang12 <67203904+JimmyZhang12@users.noreply.github.com> Co-authored-by: Jimmy Zhang Co-authored-by: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> Co-authored-by: Sangkug Lym Co-authored-by: Eric Harper Co-authored-by: ashbhandare Co-authored-by: Aishwarya Bhandare Co-authored-by: yaoyu-33 <54727607+yaoyu-33@users.noreply.github.com> Co-authored-by: Pratyush Muthukumar <30813477+PannuMuthu@users.noreply.github.com> Co-authored-by: Pratyush Muthukumar Co-authored-by: Huiying Co-authored-by: Vladimir Bataev Co-authored-by: mikolajblaz Co-authored-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Co-authored-by: dimapihtar Co-authored-by: Mariana <47233618+mgrafu@users.noreply.github.com> Co-authored-by: Dmytro Pykhtar Co-authored-by: Pablo Garay Co-authored-by: Valerie Sarge Co-authored-by: Ali Taghibakhshi <71892896+JRD971000@users.noreply.github.com> Co-authored-by: ntajbakhsh Co-authored-by: akoumpa <153118171+akoumpa@users.noreply.github.com> Co-authored-by: Shanmugam Ramasamy <111910568+shanmugamr1992@users.noreply.github.com> Co-authored-by: Shanmugam Ramasamy Co-authored-by: Tugrul Konuk Co-authored-by: Adi Renduchintala Co-authored-by: Jiaqi Zeng Co-authored-by: arendu Co-authored-by: HeyyyyyyG <49757268+HeyyyyyyG@users.noreply.github.com> Co-authored-by: Chen Cui --------- Signed-off-by: arendu Signed-off-by: George Zelenfroynd Signed-off-by: Michal Futrega Signed-off-by: Taejin Park Signed-off-by: smajumdar Signed-off-by: Somshubra Majumdar Signed-off-by: Nithin Rao Koluguri Signed-off-by: Jaemin Choi Signed-off-by: Jimmy Zhang Signed-off-by: dimapihtar Signed-off-by: Sangkug Lym Signed-off-by: Aishwarya Bhandare Signed-off-by: yaoyu-33 Signed-off-by: Pratyush Muthukumar Signed-off-by: Pratyush Muthukumar Signed-off-by: Huiying Li Signed-off-by: Vladimir Bataev Signed-off-by: Mikołaj Błaż Signed-off-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Signed-off-by: Mariana Graterol Fuenmayor Signed-off-by: Dmytro Pykhtar Signed-off-by: Valerie Sarge Signed-off-by: Piotr Żelasko Signed-off-by: ataghibakhsh Signed-off-by: eharper Signed-off-by: Alexandros Koumparoulis Signed-off-by: Shanmugam Ramasamy <111910568+shanmugamr1992@users.noreply.github.com> Signed-off-by: Adi Renduchintala Signed-off-by: Jiaqi Zeng Signed-off-by: Chen Cui Signed-off-by: HuiyingLi Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Michal Futrega Co-authored-by: George <37293288+Jorjeous@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Taejin Park Co-authored-by: Somshubra Majumdar Co-authored-by: Piotr Żelasko Co-authored-by: Nithin Rao Co-authored-by: Jaemin Choi Co-authored-by: Jaemin Choi Co-authored-by: JimmyZhang12 <67203904+JimmyZhang12@users.noreply.github.com> Co-authored-by: Jimmy Zhang Co-authored-by: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> Co-authored-by: Sangkug Lym Co-authored-by: Eric Harper Co-authored-by: ashbhandare Co-authored-by: Aishwarya Bhandare Co-authored-by: yaoyu-33 <54727607+yaoyu-33@users.noreply.github.com> Co-authored-by: Pratyush Muthukumar <30813477+PannuMuthu@users.noreply.github.com> Co-authored-by: Pratyush Muthukumar Co-authored-by: Huiying Co-authored-by: Vladimir Bataev Co-authored-by: mikolajblaz Co-authored-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Co-authored-by: dimapihtar Co-authored-by: Mariana <47233618+mgrafu@users.noreply.github.com> Co-authored-by: Dmytro Pykhtar Co-authored-by: Pablo Garay Co-authored-by: Valerie Sarge Co-authored-by: Ali Taghibakhshi <71892896+JRD971000@users.noreply.github.com> Co-authored-by: ntajbakhsh Co-authored-by: akoumpa <153118171+akoumpa@users.noreply.github.com> Co-authored-by: Shanmugam Ramasamy <111910568+shanmugamr1992@users.noreply.github.com> Co-authored-by: Shanmugam Ramasamy Co-authored-by: Tugrul Konuk Co-authored-by: Jiaqi Zeng Co-authored-by: HeyyyyyyG <49757268+HeyyyyyyG@users.noreply.github.com> Co-authored-by: Chen Cui --- .../tuning/conf/megatron_gpt_finetuning_config.yaml | 1 + .../modules/common/megatron/adapters/parallel_adapters.py | 6 ++++++ nemo/collections/nlp/parts/peft_config.py | 1 + 3 files changed, 8 insertions(+) diff --git a/examples/nlp/language_modeling/tuning/conf/megatron_gpt_finetuning_config.yaml b/examples/nlp/language_modeling/tuning/conf/megatron_gpt_finetuning_config.yaml index af561ffe0aad3..96752696da411 100644 --- a/examples/nlp/language_modeling/tuning/conf/megatron_gpt_finetuning_config.yaml +++ b/examples/nlp/language_modeling/tuning/conf/megatron_gpt_finetuning_config.yaml @@ -96,6 +96,7 @@ model: lora_tuning: target_modules: ['attention_qkv'] # this can either be 'attention_qkv','attention_dense','mlp_fc1','mlp_fc2', attention (qkv & dense), mlp (fc1 & fc2) adapter_dim: 32 + alpha: ${model.peft.lora_tuning.adapter_dim} adapter_dropout: 0.0 column_init_method: 'xavier' # IGNORED if linear_adapter is used, options: xavier, zero or normal row_init_method: 'zero' # IGNORED if linear_adapter is used, options: xavier, zero or normal diff --git a/nemo/collections/nlp/modules/common/megatron/adapters/parallel_adapters.py b/nemo/collections/nlp/modules/common/megatron/adapters/parallel_adapters.py index ac85ea7a1d2ed..9690d5d216976 100644 --- a/nemo/collections/nlp/modules/common/megatron/adapters/parallel_adapters.py +++ b/nemo/collections/nlp/modules/common/megatron/adapters/parallel_adapters.py @@ -139,6 +139,7 @@ def __init__( input_is_parallel: bool = False, # NOTE: (@ertkonuk) we need this for LoRA adapters that are applied to RowParallelLinear layers dropout: float = 0.0, model_parallel_config: Optional[ModelParallelConfig] = None, + alpha: float | None = None, **kwargs, ): super().__init__() @@ -151,7 +152,9 @@ def __init__( self.activation = activation_registry[activation]() self.norm_position = norm_position self.dim = dim + self.alpha = alpha if alpha is not None else self.dim self.input_is_parallel = input_is_parallel + # megatron_gpt_peft_models will provide this arg, but deprecated ones do not. # in case this arg is not provided, use the dummy default config. if model_parallel_config is None: @@ -274,6 +277,8 @@ def forward(self, x): if self.dropout is not None: x = self.dropout(x) + x = x * (self.alpha / self.dim) + return x @@ -290,6 +295,7 @@ class ParallelLinearAdapterConfig(AdapterConfig): gather_output: bool = True input_is_parallel: bool = False dropout: float = 0.0 + alpha: float | None = None network_alpha: int | None = None _target_: str = "{0}.{1}".format(ParallelLinearAdapter.__module__, ParallelLinearAdapter.__name__) diff --git a/nemo/collections/nlp/parts/peft_config.py b/nemo/collections/nlp/parts/peft_config.py index 815ad4d9e952a..97305991d0b32 100644 --- a/nemo/collections/nlp/parts/peft_config.py +++ b/nemo/collections/nlp/parts/peft_config.py @@ -182,6 +182,7 @@ def _create_lora_config(self, cfg, lora_cfg, in_features, out_features, adapter_ "row_init_method": lora_cfg.get("row_init_method", "zero"), "gather_output": False, "dropout": lora_cfg.adapter_dropout, + "alpha": lora_cfg.get("alpha", lora_cfg.adapter_dim), } if lora_cfg.weight_tying: From 2fbfceb2d145d9961a9639a203d51fb885a44fe8 Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Sun, 25 Feb 2024 13:00:54 -0500 Subject: [PATCH 11/12] Update PEFT Doc (#8501) * update peft doc Signed-off-by: Chen Cui * remove old prompt learning doc and notebook Signed-off-by: Chen Cui * fix table Signed-off-by: Chen Cui * fix table Signed-off-by: Chen Cui * fix table Signed-off-by: Chen Cui * revert accidental commit Signed-off-by: Chen Cui * revert accidental commit Signed-off-by: Chen Cui --------- Signed-off-by: Chen Cui --- README.rst | 12 +- .../nlp/nemo_megatron/peft/landing_page.rst | 16 +- .../nlp/nemo_megatron/peft/quick_start.rst | 6 +- .../nlp/nemo_megatron/prompt_learning.rst | 390 --------- .../nlp/Multitask_Prompt_and_PTuning.ipynb | 786 ------------------ 5 files changed, 18 insertions(+), 1192 deletions(-) delete mode 100644 docs/source/nlp/nemo_megatron/prompt_learning.rst delete mode 100644 tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb diff --git a/README.rst b/README.rst index 3135bdbfabdd1..ea35f9bc96abc 100644 --- a/README.rst +++ b/README.rst @@ -57,10 +57,10 @@ such as FSDP, Mixture-of-Experts, and RLHF with TensorRT-LLM to provide speedups Introduction ------------ -NVIDIA NeMo Framework is a generative AI framework built for researchers and pytorch developers +NVIDIA NeMo Framework is a generative AI framework built for researchers and pytorch developers working on large language models (LLMs), multimodal models (MM), automatic speech recognition (ASR), and text-to-speech synthesis (TTS). -The primary objective of NeMo is to provide a scalable framework for researchers and developers from industry and academia +The primary objective of NeMo is to provide a scalable framework for researchers and developers from industry and academia to more easily implement and design new generative AI models by being able to leverage existing code and pretrained models. For technical documentation, please see the `NeMo Framework User Guide `_. @@ -68,8 +68,8 @@ For technical documentation, please see the `NeMo Framework User Guide `_ and training is automatically scalable to 1000s of GPUs. -When applicable, NeMo models take advantage of the latest possible distributed training techniques, -including parallelism strategies such as +When applicable, NeMo models take advantage of the latest possible distributed training techniques, +including parallelism strategies such as * data parallelism * tensor parallelism @@ -84,7 +84,7 @@ and mixed precision training recipes with bfloat16 and FP8 training. NeMo's Transformer based LLM and Multimodal models leverage `NVIDIA Transformer Engine `_ for FP8 training on NVIDIA Hopper GPUs and leverages `NVIDIA Megatron Core `_ for scaling transformer model training. -NeMo LLMs can be aligned with state of the art methods such as SteerLM, DPO and Reinforcement Learning from Human Feedback (RLHF), +NeMo LLMs can be aligned with state of the art methods such as SteerLM, DPO and Reinforcement Learning from Human Feedback (RLHF), see `NVIDIA NeMo Aligner `_ for more details. NeMo LLM and Multimodal models can be deployed and optimized with `NVIDIA Inference Microservices (Early Access) `_. @@ -93,7 +93,7 @@ NeMo ASR and TTS models can be optimized for inference and deployed for producti For scaling NeMo LLM and Multimodal training on Slurm clusters or public clouds, please see the `NVIDIA Framework Launcher `_. The NeMo Framework launcher has extensive recipes, scripts, utilities, and documentation for training NeMo LLMs and Multimodal models and also has an `Autoconfigurator `_ -which can be used to find the optimal model parallel configuration for training on a specific cluster. +which can be used to find the optimal model parallel configuration for training on a specific cluster. To get started quickly with the NeMo Framework Launcher, please see the `NeMo Framework Playbooks `_ The NeMo Framework Launcher does not currently support ASR and TTS training but will soon. diff --git a/docs/source/nlp/nemo_megatron/peft/landing_page.rst b/docs/source/nlp/nemo_megatron/peft/landing_page.rst index c90dcdfff1c59..4461feffb5db2 100644 --- a/docs/source/nlp/nemo_megatron/peft/landing_page.rst +++ b/docs/source/nlp/nemo_megatron/peft/landing_page.rst @@ -12,14 +12,14 @@ fraction of the computational and storage costs. NeMo supports four PEFT methods which can be used with various transformer-based models. -==================== ===== ===== ========= == -\ GPT 3 NvGPT LLaMa 1/2 T5 -==================== ===== ===== ========= == -Adapters (Canonical) ✅ ✅ ✅ ✅ -LoRA ✅ ✅ ✅ ✅ -IA3 ✅ ✅ ✅ ✅ -P-Tuning ✅ ✅ ✅ ✅ -==================== ===== ===== ========= == +==================== ===== ======== ========= ====== == +\ GPT 3 Nemotron LLaMa 1/2 Falcon T5 +==================== ===== ======== ========= ====== == +LoRA ✅ ✅ ✅ ✅ ✅ +P-Tuning ✅ ✅ ✅ ✅ ✅ +Adapters (Canonical) ✅ ✅ ✅ ✅ +IA3 ✅ ✅ ✅ ✅ +==================== ===== ======== ========= ====== == Learn more about PEFT in NeMo with the :ref:`peftquickstart` which provides an overview on how PEFT works in NeMo. Read about the supported PEFT methods diff --git a/docs/source/nlp/nemo_megatron/peft/quick_start.rst b/docs/source/nlp/nemo_megatron/peft/quick_start.rst index 000e242b95087..fd46444eee541 100644 --- a/docs/source/nlp/nemo_megatron/peft/quick_start.rst +++ b/docs/source/nlp/nemo_megatron/peft/quick_start.rst @@ -62,7 +62,7 @@ Base model classes PEFT in NeMo is built with a mix-in class that does not belong to any model in particular. This means that the same interface is available to different NeMo models. Currently, NeMo supports PEFT for GPT-style -models such as GPT 3, NvGPT, LLaMa 1/2 (``MegatronGPTSFTModel``), as +models such as GPT 3, Nemotron, LLaMa 1/2 (``MegatronGPTSFTModel``), as well as T5 (``MegatronT5SFTModel``). Full finetuning vs PEFT @@ -78,11 +78,13 @@ PEFT. trainer = MegatronTrainerBuilder(config).create_trainer() model_cfg = MegatronGPTSFTModel.merge_cfg_with(config.model.restore_from_path, config) + ### Training API ### model = MegatronGPTSFTModel.restore_from(restore_path, model_cfg, trainer) # restore from pretrained ckpt - + peft_cfg = LoRAPEFTConfig(model_cfg) + + peft_cfg = LoraPEFTConfig(model_cfg) + model.add_adapter(peft_cfg) trainer.fit(model) # saves adapter weights only + ### Inference API ### # Restore from base then load adapter API model = MegatronGPTSFTModel.restore_from(restore_path, trainer, model_cfg) + model.load_adapters(adapter_save_path, peft_cfg) diff --git a/docs/source/nlp/nemo_megatron/prompt_learning.rst b/docs/source/nlp/nemo_megatron/prompt_learning.rst deleted file mode 100644 index 8fe481019a6f1..0000000000000 --- a/docs/source/nlp/nemo_megatron/prompt_learning.rst +++ /dev/null @@ -1,390 +0,0 @@ -.. _promptlearning: - -Prompt Learning ---------------- - -Within NeMo we refer to **p-tuning** and **prompt tuning** methods collectively as prompt learning. Both methods are parameter efficient alternatives to fine-tuning pretrained language models. Our NeMo implementation makes it possible to use one pretrained GPT model on many downstream tasks without needing to tune the model's full set of parameters. It also allows for adding new tasks to your model without overwriting or disrupting previous tasks for which the model has already been p-tuned/prompt-tuned. Because the original model parameters are frozen and never altered by either method, p-tuning/prompt-tuning also avoids catastrophic forgetting issues often encountered when fine-tuning models. - -Instead of selecting discrete text prompts in a manual or automated fashion, prompt tuning and p-tuning utilize virtual prompt embeddings that can be optimized via gradient descent. The only difference between prompt tuning and p-tuning within NeMo-Megatron is the architecture used to tune the soft prompt tokens during training. - -- Our prompt tuning implementation is based off Lester et. al’s EMNLP 2021 paper "`The Power of Scale for Parameter-Efficient Prompt Tuning `_" -- Our p-tuning implementation is based off Liu et al's paper "`GPT Understands, Too `_" - -Our continuous learning capability for combined p-tuning and prompt tuning with GPT style models is a NeMo specific extension of the author's original work. - -Please also checkout our `prompt learning tutorial notebook. `_ - - -Terminology -^^^^^^^^^^^ -We will be using the terms ``continuous``, ``soft``, and ``virtual`` token interchangeably to refer to embeddings inserted into the model prompt that have no concrete mapping to strings or characters within the model’s vocabulary. These virtual token embeddings exist in contrast to the ``discrete``, ``hard``, or ``real`` tokens that do make up the model’s vocabulary. Virtual tokens are purely 1D vectors with dimensionality equal to that of each real token embedding, matching the ``hidden_size`` hyperparameter. In training and inference, continuous token embeddings are inserted among discrete token embeddings according to a template you provide in the model's config. We will demonstrate how to do this below. - -When referring to p-tuning and prompt tuning together, we will be using the phrase prompt learning for simplicity. - -Prompt Tuning -^^^^^^^^^^^^^ - -In prompt-tuning a pretrained GPT model, soft prompt embeddings are initialized as a 2D matrix of size ``total_virtual_tokens X hidden_size``. Each task the model is prompt-tuned to perform has its own 2D embedding matrix associated with it. Tasks do not share any parameters during training or inference. All GPT model parameters are frozen and only the embedding parameters for each task are updated during training. - -In prompt tuning you can specify how the embeddings are initialized for each task. You can either - -- Initialize embedding parameters according to some random distribution -- Initialize embedding parameters from existing vocabulary embeddings (recommended) - -If you choose to initialize virtual token embeddings from existing embedding weights, you can provide the string of words you want to use for initialization in the model's config. This string will be tokenized and tiled or truncated to match the specified number of virtual tokens you would like to use (``total_virtual_tokens``). Vocab embeddings are copied and used to initialize the soft prompt embedding matrix for each task. The vocab embeddings themselves are not updated or changed during prompt tuning. - -P-Tuning -^^^^^^^^ - -In p-tuning, an LSTM model is used to predict virtual token embeddings. We refer to this LSTM model as our ``prompt_encoder``. LSTM parameters are randomly initialized at the start of p-tuning. All GPT model parameters are frozen, and only the LSTM weights are updated at each training step. LSTM parameters are shared between all tasks that are p-tuned at the same time, but the LSTM model outputs unique virtual token embeddings for each task. The virtual tokens predicted by the LSTM are inserted among the discrete token input in the exact same manner as with prompt-tuning. You still specify the number of virtual tokens you want to use by setting ``total_virtual_tokens`` and each virtual token embedding is still a 1D vector of size ``hidden_size``. - -Using Both Prompt and P-Tuning -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A single pretrained GPT model can use both p-tuning and prompt-tuning. While you must decide to use either p-tuning or prompt-tuning for each task you want your model to perform, you can p-tune your model on a set of tasks *A*, then prompt tune your same model on a different set of tasks *B*, then finally run inference on tasks from both *A* and *B* at the same time. During prompt-tuning or p-tuning, tasks tuned at the same time must use the same number of virtual tokens. During inference, tasks using differing amounts of virtual tokens can be run at the same time. - -When p-tuning completes, prompt tuned virtual tokens from the p-tuning ``prompt_encoder`` are automatically moved to the ``prompt_table`` where all prompt tuned and p-tuned soft prompts are stored. The LSTM ``prompt_encoder`` is then removed from the model. This allows us to preserve previously p-tuned soft prompts while still maintaining the ability to add new p-tuned or prompt-tuned soft prompts in the future. The ``prompt_table`` uses the ``taskname`` as a key to look up the correct virtual tokens for a specified task. The ``prompt_table``'s hash table data structure also makes it possible for each task to flexibly use a different number of virtual tokens. - -P-tuning usually requires fewer virtual tokens per task to achieve good results but uses a higher number of parameters compared to prompt-tuning. For example, if you prompt tune a 125M parameter GPT model (with hidden size 768) on two tasks, using 100 virtual tokens per task, the total parameters tuned during prompt tuning would equal 153k (~.1% of the pre-trained model size). If you p-tune the same 125M GPT model on 2 tasks, using an LSTM with two layers and 10 tokens per task, you will be tuning 8.3M parameters (~6.6% of the pre-trained model size). The increased number of parameters used during p-tuning is mitigated by our ``prompt_table``. When p-tuned soft prompts are placed in the prompt table, only the parameters for the predicted virtual tokens are saved. This allows us to keep the benefit of tuning a larger number of parameters during training, while also preserving the parameter efficiency of prompt-tuning during inference and storing of the model. - -Because p-tuning shares parameters between tasks during training, p-tuning your model on multiple tasks that are similar might allow your model to share insight between tasks. In the same vein, p-tuning on many very different tasks at once might perform worse than prompt tuning, which tunes a distinct set of parameters per task. **Generally we recommend using p-tuning over prompt tuning.** - -Users can also optionally tune the model's full parameters in addition to the soft prompt parameters. See ``model.lm_finetune`` in the Prompt Learning Config section for details on how to configure this. - -Dataset Preprocessing -^^^^^^^^^^^^^^^^^^^^^ - -The prompt learning dataset accepts a list of json/dictionary objects or a list of json file names where each json file contains a collection of json objects. Each json object must include the field ``taskname`` which is a string identifier for the task the data example corresponds to. They should also include one or more fields corresponding to different sections of the discrete text prompt. The input data might look like: - -.. code:: - - [ - {"taskname": "squad", "context": [CONTEXT_PARAGRAPH_TEXT1], "question": [QUESTION_TEXT1], "answer": [ANSWER_TEXT1]}, - {"taskname": "squad", "context": [CONTEXT_PARAGRAPH_TEXT2], "question": [QUESTION_TEXT2], "answer": [ANSWER_TEXT2]}, - {"taskname": "intent_and_slot", "utterance": [UTTERANCE_TEXT1], "label": [INTENT_TEXT1][SLOT_TEXT1]}, - {"taskname": "intent_and_slot", "utterance": [UTTERANCE_TEXT2], "label": [INTENT_TEXT2][SLOT_TEXT2]}, - {"taskname": "sentiment", "sentence": [SENTENCE_TEXT1], "label": [SENTIMENT_LABEL1]}, - {"taskname": "sentiment", "sentence": [SENTENCE_TEXT2], "label": [SENTIMENT_LABEL2]}, - ] - -These additional fields can be unlimited in number and will be used to help map different parts of the discrete text input to a prompt template that you define. We show how this mapping works and how to construct your prompt template in the Prompt Formatting section. Data examples for each dataset can all be passed to the dataset class in one file, or in separate ``.jsonl`` files in a list. - -.. _data-example-label: - -Prompt Formatting -^^^^^^^^^^^^^^^^^ - -To customize different prompts for different tasks, we simply need to specify the prompt task template in the config file at ``model.task_templates``. The virtual token markers ``<|VIRTUAL_PROMPT_#|>`` signify where you want virtual tokens to be placed in the template string. ``<|VIRTUAL_PROMPT_0|>``, ``<|VIRTUAL_PROMPT_1|>``, and ``<|VIRTUAL_PROMPT_2|>`` indicate where a number of virtual tokens matching the values given at ``virtual_token_splits[0]``, ``virtual_token_splits[1]`` and ``virtual_token_splits[2]`` will be placed. The other variable fields ``{var}`` refer to the fields in the data json. - -For example, given: - -- the data json ``{"sentence1": "And he said, Mama, I'm home.", "sentence2": "He didn't say a word."}`` -- virtual token splits set to ``virtual_token_splits = [3, 3, 3]`` -- a prompt template set to ``prompt_template = "<|VIRTUAL_PROMPT_0|> Hypothesis: [sentence1], <|VIRTUAL_PROMPT_1|> Premise: [sentence2] <|VIRTUAL_PROMPT_2|> Answer:"`` - -the input will be translated into ``VVV Hypothesis: And he said, Mama, I'm home. VVV Premise: He didn't say a word. VVV Answer:``, where ``VVV`` are three virtual tokens. - -**We recommend you first try prompt learning by placing all virtual tokens at the very beginning of your prompt template** like we do with the ``sentiment`` task example below. We've found this gives strong performance. -.. code:: - - config.model.task_templates = [ - { - "taskname": "sentiment", - "prompt_template": "<|VIRTUAL_PROMPT_0|> {sentence} sentiment: {label}", - "total_virtual_tokens": 10, - "virtual_token_splits": [10], - "truncate_field": "sentence", - "answer_only_loss": False, - }, - { - "taskname": "intent_and_slot", - "prompt_template": "<|VIRTUAL_PROMPT_0|> Predict intent and slot <|VIRTUAL_PROMPT_1|> :\n{utterance}{label}", - "total_virtual_tokens": 10, - "virtual_token_splits": [7, 3], - "truncate_field": None, - "answer_only_loss": True, - "answer_field": "label" - } - ] - -.. _prompt-formatting-label: - -``model.task_templates`` Config Parameters -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. list-table:: - :widths: 15 15 25 - :header-rows: 1 - - * - **Parameter** - - **Data type** - - **Description** - * - **taskname** - - string - - Short string denoting the task, used to lookup task specific virtual tokens from the ``prompt_table``. Refers to the same ``taskname`` in the dataset json objects. - * - **prompt_template** - - string - - a string showing the model where to place virtual tokens and how to map dataset json fields to where they belong in the model prompt - * - **total_virtual_tokens** - - int - - specifies the total number of virtual tokens that will be inserted into the model prompt - * - **virtual_token_splits** - - list of ints - - specifies the number of virtual tokens that belong at each ``<|VIRTUAL_PROMPT_#|>`` marker. ``virtual_token_splits`` values should add up to ``total_virtual_tokens``. The number of ``virtual_token_splits`` should match the number of ``<|VIRTUAL_PROMPT_#|>`` markers. - * - **answer_only_loss** - - bool - - Whether to limit loss calculation to only the answer portion of the prompt during tuning. Strongly recommended for long prompts. - * - **answer_field** - - string - - The field in the data json corresponding to the answer. The loss will only be calculated on this portion of the prompt if ``answer_only_loss`` is ``True``. The answer field must be at the end of the prompt template. - * - **truncate_field** - - string - - specifies which field in the data json to truncate if the length of the input exceeds the maximum sequence length of the model. If ``truncate_field`` is set to ``None``, examples that are too long are simply dropped from the dataset. - -Prompt Learning Specific Config Values -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. list-table:: - :widths: 15 15 25 - :header-rows: 1 - - * - **Parameter** - - **Data type** - - **Description** - * - **model.nemo_path** - - string - - Path to where you want to save your model after prompt tuning/p-tuning, must end in `.nemo` - * - **model.virtual_prompt_style** - - string - - one of 'prompt-tuning', 'p-tuning', or 'inference' - * - **model.language_model_path** - - string - - Path to the GPT language model .nemo file you want to use for prompt learning, not needed if ``restore_path`` is set - * - **model.restore_path** - - string - - Path to a .nemo file of existing ``MegatronGPTPromptLearningModel`` that has already been prompt tuned or p-tuned on at least one task. P-tuned or prompt tuned in this training session will be added to this model's `prompt_table`. Should be set to ``null`` if none. - * - **model.new_tasks** - - list of strings - - List of new tasknames to be prompt or p-tuned, - * - **model.existing_tasks** - - list of strings - - List of tasks the model has already been p-tuned/prompt-tuned for, needed when a restore path is given. Should be set to ``[]`` if None. - * - **model.task_templates** - - list - - See the ``model.task_templates`` Config Parameters Table above - * - **model.prompt_tuning.new_prompt_init_methods** - - list of strings - - List of 'text' or 'random', should correspond to the order of tasks listed in ``model.new_tasks``. Only needed if `virtual_prompt_style='prompt-tuning'` - * - **model.prompt_tuning.new_prompt_init_text** - - list of strings - - The text you want to use for soft prompt initalization if ``model.prompt_tuning.new_prompt_init_methods`` is set to 'text' for a task. Should correspond to the order of tasks listed in ``model.new_tasks``. The text is tokenized and clipped or tiled to match ``total_virtual_tokens`` in ``model.task_templates``. The vocab embeddings associated with each token are copied and use to initialize the soft prompts before tuning. - * - **model.p_tuning.dropout** - - float - - LSTM prompt encoder dropout prob - * - **model.p_tuning.num_layers** - - int - - Num layers in LSTM prompt encoder - * - **model.tensor_model_parallel_size** - - int - - intra-layer model parallelism, must match the ``tensor_model_parallel_size`` of the GPT model given at ``language_model_path`` - * - **model.batch_size** - - int - - global batch size - * - **model.data.train_ds** - - list of strings - - list of ``.json`` or ``.jsonl`` training dataset files with json ojects that have the dataset format described above - * - **model.data.validation_ds** - - list of strings - - list of ``.json`` or ``.jsonl`` validation dataset files with json ojects that have the dataset format described above - * - **model.data.add_eos** - - bool - - Whether to add an EOS token at the end of each training example (recommended). - -An example config file can be found at https://github.com/NVIDIA/NeMo/tree/stable/examples/nlp/language_modeling/conf/megatron_gpt_prompt_learning_config.yaml - -Setting New Tasks -^^^^^^^^^^^^^^^^^ - -After you p-tune or prompt-tune your model, you can always go back and p-tune or prompt-tune your model on more tasks without over writing the virtual prompts who've trained already. You can also use a different number of ``total_virtual_tokens`` between each training session as long as tasks ptuned or prompt tuned at the same time have the same number of ``total_virtual_tokens``. For this reason, when you ptune on a new task, you need to tell your model which of your tasks are new and which ones already exist (and thus you don't want to tune them). You do this by setting the ``new_tasks`` and ``existing_tasks`` values in the config file. - -Example Multi-Task Prompt Tuning Config and Command -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -First define a config called ``multitask-prompt-learning.yaml`` demonstrated below. **In the** ``exp_manager`` **portion of the config,** ``save_nemo_on_train_end`` **should be set to** ``False`` **to avoid unnecessarily saving the incorrect model weights.** This is already done in the example `megatron_gpt_prompt_learning_config.yaml config `_ that you should use as your starting point. The correct prompt learning model will be saved at the ``model.nemo_path`` you set. - -.. code:: - - name: multitask_prompt_tuning - trainer: ... - exp_manager: ... - model: - seed: 1234 - nemo_path: ${name}.nemo - virtual_prompt_style: "prompt-tuning" - encoder_seq_length: 2048 - tensor_model_parallel_size: 1 - pipeline_model_parallel_size: 1 - global_batch_size: 16 - micro_batch_size: 4 - - restore_path: null - language_model_path: models/megatron_125M_gpt.nemo - existing_tasks: [] - new_tasks: ["sentiment", "intent_and_slot"] - - task_templates: - - taskname: "sentiment" - prompt_template: "<|VIRTUAL_PROMPT_0|> {sentence} sentiment: {label}" - total_virtual_tokens: 100 - virtual_token_splits: [100] - truncate_field: null - answer_only_loss: False - - - taskname: "intent_and_slot" - prompt_template: "<|VIRTUAL_PROMPT_0|> Predict intent and slot <|VIRTUAL_PROMPT_1|> :\n{utterance}{label}" - total_virtual_tokens: 100 - virtual_token_splits: [80, 20] - truncate_field: null - answer_only_loss: True - answer_field: "label" - - prompt_tuning: - new_prompt_init_methods: ["text", "text"] - new_prompt_init_text: ["financial sentiment analysis postive neutral negative", "intent and slot classification virtual assistant task bot please"] - - data: - train_ds: ["data/financial_phrase_bank_train.jsonl", "data/assistent_train.jsonl"] - validation_ds: ["data/financial_phrase_bank_val.jsonl", "data/assistent_val.jsonl"] - add_eos: True - shuffle: True - num_workers: 1 - pin_memory: True - - optim: ... - -(See https://github.com/NVIDIA/NeMo/tree/stable/examples/nlp/language_modeling/conf/megatron_gpt_prompt_learning_config.yaml for what should go in the ``trainer``, ``exp_manager``, and ``optim`` sections.) - -Then run the command - -.. code:: - - python megatron_gpt_prompt_learning.py --config-name=multitask-prompt-learning.yaml - - -Example Multi-Task P-Tuning Config and Command After Prompt-Tuning -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Update ``multitask-prompt-learning.yaml`` from the example above with p-tuning parameters for the new task. Be sure to update ``model.existing_tasks`` with the tasknames from previous prompt learning runs and to use the ``.nemo`` file saved at the end of your last prompt learning session. Values different from the config above have stars commented next to them. - -In this example, the SQuAD task includes the question context as part of the prompt. Because the context is long, we recommend setting ``answer_only_loss`` to ``True`` for this task, and any task where a significant portion of the prompt is not a part of the answer. ``answer_only_loss`` tells the model to only calculate the cross-entropy loss on the answer portion of the training example. Though we recommend placing all virtual tokens at the beginning of the prompt, we place them throughout the prompt in this example to demonstrate how to do so. - -.. code:: - - name: multitask_p_tuning # *** - trainer: ... - exp_manager: ... - model: - seed: 1234 - nemo_path: ${name}.nemo - virtual_prompt_style: "p-tuning" # *** - encoder_seq_length: 2048 - tensor_model_parallel_size: 1 - pipeline_model_parallel_size: 1 - global_batch_size: 16 - micro_batch_size: 4 - - restore_path: multitask_prompt_tuning.nemo # *** - language_model_path: models/megatron_125M_gpt.nemo - existing_tasks: ["sentiment", "intent_and_slot"] # *** - new_tasks: ["squad"] - - task_templates: - - taskname: "sentiment" - prompt_template: "<|VIRTUAL_PROMPT_0|> {sentence} sentiment: {label}" - total_virtual_tokens: 100 - virtual_token_splits: [100] - truncate_field: null - answer_only_loss: False - - - taskname: "intent_and_slot" - prompt_template: "<|VIRTUAL_PROMPT_0|> Predict intent and slot <|VIRTUAL_PROMPT_1|> :\n{utterance}{label}" - total_virtual_tokens: 100 - virtual_token_splits: [80, 20] - truncate_field: null - answer_only_loss: True - answer_field: "label" - - - taskname: "squad" # *** - prompt_template: "<|VIRTUAL_PROMPT_0|> Answer the question from the context {question} {context} Answer: {answer}" # *** - total_virtual_tokens: 9 # *** - virtual_token_splits: [9] # *** - truncate_field: context # *** - answer_only_loss: True # *** - answer_field: "answer" # *** - - p_tuning: # *** - dropout: 0.0 # *** - num_layers: 2 # *** - - data: - train_ds: ["data/squad_train.jsonl"] # *** - validation_ds: ["data/squad_val.jsonl"] # *** - add_eos: True - shuffle: True - num_workers: 1 - pin_memory: True - - optim: ... - -Then run the command again: - -.. code:: - - python megatron_gpt_prompt_learning.py --config-name=multitask-prompt-learning.yaml - - -Example Multi-Task Inference -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The inference file can contain a mix of prompts from all the tasks the model has been prompt tuned on. - -.. code:: - - python megatron_gpt_prompt_learning_eval.py \ - virtual_prompt_model_file=PATH_TO_NEMO_PROMPT_LEARNING_MODEL_FILE \ - gpt_model_file=PATH_TO_FROZEN_GPT_MODEL_FILE \ - inference.greedy=True \ - inference.add_BOS=False \ - trainer.devices=1 \ - trainer.num_nodes=1 \ - tensor_model_parallel_size=1 \ - pipeline_model_parallel_size=1 \ - prompts=[prompt1,prompt2] - -``virtual_prompt_model_file`` should be a path to a .nemo file saved after p-tuning/prompt tuning and ``model_file`` is still the path to the gpt model's .nemo file. - -prompts in this case should be a list of .json or .jsonl files containing json objects similar to the ones used during prompt learning. They should have keys that match the fields specified in the prompt template. Fields can be dropped from the prompt dict and their corresponding section of the prompt template will be automatically removed. - -For example, say the prompt template during p-tuning/prompt-tuning looked like: - -.. code:: - - '<|VIRTUAL_PROMPT_0|> Context: {context} Question: {question} Answer: {answer}' - -but you don't want to include the answer field during inference. Just don't include the answer field in the prompt dict like below: - -.. code:: - - {"taskname": "squad", "context": "some paragraph", "question": "question related to paragraph"} - {"taskname": "squad", "context": "another paragraph", "question": "a different question related to paragraph"} - - -And the dataset class will automatically format your input to have the form: - -.. code:: - - [ - '<|VIRTUAL_PROMPT_0|> Context: some paragraph Question: question related to paragraph Answer: ', - '<|VIRTUAL_PROMPT_0|> Context: another paragraph Question: a different question related to paragraph Answer: ' - ] - -Generally prompt learning inference is just like running inference with a GPT model. The only difference is you need to add ``virtual_prompt_model_file=PATH_TO_NEMO_PROMPT_LEARNING_MODEL_FILE`` to your command if you're using a p-tuned/prompt-tuned model. - -Example prompt learning script: `NeMo/examples/nlp/language_modeling/megatron_gpt_prompt_learning.py.py `__. - -Example prompt tuned inference script: `NeMo/examples/nlp/language_modeling/megatron_gpt_eval.py `__. diff --git a/tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb b/tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb deleted file mode 100644 index 076a8ffad3df7..0000000000000 --- a/tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb +++ /dev/null @@ -1,786 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "b7a434f4", - "metadata": {}, - "outputs": [], - "source": [ - "BRANCH='main'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "developmental-gibraltar", - "metadata": {}, - "outputs": [], - "source": [ - "\"\"\"\n", - "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", - "\n", - "Instructions for setting up Colab are as follows:\n", - "1. Open a new Python 3 notebook.\n", - "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", - "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", - "4. Run this cell to set up dependencies.\n", - "\"\"\"\n", - "# If you're using Google Colab and not running locally, run this cell\n", - "\n", - "# install NeMo\n", - "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" - ] - }, - { - "cell_type": "markdown", - "id": "42daf8bf", - "metadata": {}, - "source": [ - "# Introduction\n", - "\n", - "In this notebook we demonstrate how to use p-tuning and prompt tuning within NeMo-Megatron. Both methods are parameter efficient alternatives to fine-tuning pretrained language models. Our NeMo implementation makes it possible to use one pretrained GPT model on many downstream tasks without needing to tune the model’s full set of parameters. It also allows for adding new tasks to your model without overwriting or disrupting previous tasks for which the model has already been p-tuned/prompt-tuned. Because the original model parameters are frozen and never altered by either method, p-tuning/prompt-tuning also avoid catastrophic forgetting issues often encountered when fine-tuning models.\n", - "\n", - "- Our prompt tuning implementation is based off Lester et. al’s EMNLP 2021 paper [The Power of Scale for Parameter-Efficient Prompt Tuning](https://arxiv.org/abs/2104.08691)\n", - "\n", - "- Our p-tuning implementation is based off Liu et al's paper [GPT Understands, Too](https://arxiv.org/abs/2103.10385).\n", - "\n", - "- Command line usage examples and API documentation can be found in [our user docs](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/nemo_megatron/prompt_learning.html). \n", - "\n", - "\"Prompt\n", - "\n", - "Our continuous learning capability for combined p-tuning and prompt tuning with GPT style models is a NeMo specific extension of the author’s original work.\n", - "\n", - "# The Plan\n", - "\n", - "We are going to show you how to:\n", - " \n", - " 1. P-Tune/Prompt Tune a model on multiple tasks at the same time\n", - " 2. Add a new task to a model that has already been P-Tuned/Prompt Tuned previously\n", - " \n", - "We will first p-tune a GPT model on sentiment analysis, and intent and slot classification tasks. Then we will show how to add the squad question answering task to the same model we already p-tuned once.\n", - "\n", - "\n", - "# Technical Overview\n", - "Instead of selecting discrete text prompts in a manual or automated fashion, prompt tuning and p-tuning utilize virtual prompt embeddings that can be optimized via gradient decent. The only difference between prompt tuning and p-tuning within NeMo-Megatron is the architecture used to tune the soft prompt tokens during training.\n", - "\n", - "### Terminology\n", - "We will be using the terms `continuous`, `soft`, and `virtual` token interchangeably to refer to embeddings inserted into the model prompt that have no concrete mapping to strings or characters within the model’s vocabulary. These virtual token embeddings exist in contrast to the `discrete`, `hard`, or `real` tokens that do make up the model’s vocabulary. Virtual tokens are purely 1D vectors with dimensionality equal to that of each real token embedding, matching the `hidden_size` hyperparameter. In training and inference, continuous token embeddings are inserted among discrete token embeddings according to a template you provide in the model’s config. We will demonstrate how to do this below.\n", - "\n", - "When referring to p-tuning and prompt tuning together, we will be using the phrase prompt learning for simplicity.\n", - "\n", - "### Prompt-Tuning\n", - "In prompt-tuning a pretrained GPT model, soft prompt embeddings are initialized as a 2D matrix of size `total_virtual_tokens X hidden_size`. Each task the model is prompt-tuned to perform has its own 2D embedding matrix associated with it. Tasks do not share any parameters during training or inference. All GPT model parameters are frozen and only the embedding parameters for each task are updated during training.\n", - "\n", - "In prompt tuning you can specify how the embeddings are initialized for each task. You can either\n", - "\n", - "1. Initialize embedding parameters according to some random distribution\n", - "2. Initialize embedding parameters from existing vocabulary embeddings (recommended)\n", - "\n", - "If you choose to initialize virtual token embeddings from existing embedding weights, you can provide the string of words you want to use for initialization in the model’s config. This string will be tokenized and tiled or truncated to match the specified number of virtual tokens you would like to use (`total_virtual_tokens`). Vocab embeddings are copied and used to initialize the soft prompt embedding matrix for each task. The vocab embeddings themselves are not updated or changed during prompt tuning.\n", - "\n", - "\n", - "### P-Tuning\n", - "In p-tuning, an LSTM model is used to predict virtual token embeddings. We refer to this LSTM model as our `prompt_encoder`. LSTM parameters are randomly initialized at the start of p-tuning. All GPT model parameters are frozen, and only the LSTM weights are updated at each training step. LSTM parameters are shared between all tasks that are p-tuned at the same time, but the LSTM model outputs unique virtual token embeddings for each task. The virtual tokens predicted by the LSTM are inserted among the discrete token input in the exact same manner as with prompt-tuning. You still specify the number of virtual tokens you want to use by setting `total_virtual_tokens` and each virtual token embedding is still a 1D vector of size `hidden_size`.\n", - "\n", - "\n", - "\n", - "# The Best of Both\n", - "A single pretrained GPT model can use both p-tuning and prompt-tuning. While you must decide to use either p-tuning or prompt-tuning for each task you want your model to perform, you can p-tune your model on a set of tasks A, then prompt tune your same model on a different set of tasks B, then finally run inference on tasks from both A and B at the same time. During prompt-tuning or p-tuning, tasks tuned at the same time must use the same number of virtual tokens. During inference, tasks using differing amounts of virtual tokens can be run at the same time.\n", - "\n", - "Please see our [docs for more comparisons between prompt and p-tuning](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/nemo_megatron/prompt_learning.html). \n", - "\n", - "With all that covered, let's get started!\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "31c27562", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import wget" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "0bfc7709", - "metadata": {}, - "source": [ - "# Tasks and Datasets\n", - "We will be using p-tuning to teach our GPT model to do **Question Answering**.\n", - "\n", - "We will be using the [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) reading comprehension dataset, consisting of questions posed by crowd workers on a set of Wikipedia articles, where the answer to every question is a segment of text. More information on [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) can be found on their website or in their paper by Rajpurkar et. al \"[Know What You Don’t Know: Unanswerable Questions for SQuAD](https://arxiv.org/pdf/1806.03822.pdf)\"." - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "e0b0072a", - "metadata": {}, - "source": [ - "# Data Preparation\n", - "\n", - "The prompt learning dataset loader accepts a list of json/dictionary objects or a list of json file names where each json file contains a collection of json objects. Each json object must include the field `taskname` which is a string identifier for the task the data example corresponds to. They should also include one or more fields corresponding to different sections of the discrete text prompt. The input data might look like:\n", - "\n", - "```\n", - "[\n", - " {\"taskname\": \"squad\", \"context\": [CONTEXT_PARAGRAPH_TEXT1], \"question\": [QUESTION_TEXT1], \"answer\": [ANSWER_TEXT1]},\n", - " {\"taskname\": \"squad\", \"context\": [CONTEXT_PARAGRAPH_TEXT2], \"question\": [QUESTION_TEXT2], \"answer\": [ANSWER_TEXT2]},\n", - "]\n", - "```\n", - "\n", - "These additional fields can be unlimited in number and will be used to help map different parts of the discrete text input to a prompt template that you define. We will show how this mapping works and how to construct your prompt template in the `Prompt Formatting` section. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0dbd41fd", - "metadata": {}, - "outputs": [], - "source": [ - "# You can replace DATA_DIR and NEMO_DIR with your own locations\n", - "DATA_DIR = \"data\"\n", - "NEMO_DIR = '.'\n", - "\n", - "os.makedirs(DATA_DIR, exist_ok=True)" - ] - }, - { - "cell_type": "markdown", - "id": "504a7b40", - "metadata": {}, - "source": [ - "\n", - "For each dataset we have preprocessing scripts pre-written in NeMo's example directory located in `examples/nlp`. Let's download those now. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e72a1dc1", - "metadata": {}, - "outputs": [], - "source": [ - "# download the preprocessing scripts from github for the purpose of this tutorial\n", - "wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/scripts/dataset_processing/nlp/squad/prompt_learning_squad_preprocessing.py', NEMO_DIR)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "71813919", - "metadata": {}, - "source": [ - "Now let's down load and process the dataset." - ] - }, - { - "cell_type": "markdown", - "id": "816791de", - "metadata": {}, - "source": [ - "### SQuAD Dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fa16d8ac", - "metadata": {}, - "outputs": [], - "source": [ - "SQUAD_DIR = os.path.join(DATA_DIR, \"SQuAD\")\n", - "os.makedirs(SQUAD_DIR, exist_ok=True)\n", - "\n", - "# Download the SQuAD dataset\n", - "!wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json\n", - "!wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json\n", - "!mv train-v1.1.json {SQUAD_DIR}\n", - "!mv dev-v1.1.json {SQUAD_DIR}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "64e3e25b", - "metadata": {}, - "outputs": [], - "source": [ - "# Preprocess squad data\n", - "!python $NEMO_DIR/prompt_learning_squad_preprocessing.py --data-dir {SQUAD_DIR}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b562d1de", - "metadata": {}, - "outputs": [], - "source": [ - "# What the squad dataset looks like after processing\n", - "!head -4 $SQUAD_DIR/squad_train.jsonl" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "a385d319", - "metadata": {}, - "source": [ - "We made a `.jsonl` file for each of the train, validation, and testing splits of the squad data. Every `.jsonl` file contains json objects with the fields `taskname`, `context`, `question`, and `answer`. The preprocessing script is called `prompt_learning_squad_preprocessing.py`. It should be in your `NEMO_DIR` and at `scripts/dataset_processing/nlp/squad/prompt_learning_squad_preprocessing.py` in the NeMo repo. \n", - "\n", - "The SQuAD dataset consists of various topics like `Beyoncé`, `IPod`, and `Symbiosis`. Each topic has several paragraphs associated with it, and each paragraph has several questions and answers related to it. When we separated the train/validation/test splits, we separated them on the topic level. For example, if the training set contains paragraphs and questions about the topic `Beyoncé`, neither the validation nor test sets will contain any questions on this topic. All questions about a certain topic are isolated to one split of the data. \n", - "\n", - "Like the Financial PhraseBank Dataset, we randomly selected 80% of the questions for training, 10% for validation, and 10% for test. This resulted in `69125` test examples, `8952` validation examples, and `8744` testing examples. The `answer` field was removed from test examples.\n", - "\n", - "Training on the full train split could take a lot of time, so we are going to clip the train split to 2k examples for the sake of this tutorial, and limit the validation dataset to 200 samples." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0f1473ba", - "metadata": {}, - "outputs": [], - "source": [ - "! head -2000 $SQUAD_DIR/squad_train.jsonl > $SQUAD_DIR/squad_short_train.jsonl\n", - "! head -200 $SQUAD_DIR/squad_val.jsonl > $SQUAD_DIR/squad_short_val.jsonl\n" - ] - }, - { - "cell_type": "markdown", - "id": "2e19c8dc", - "metadata": {}, - "source": [ - "# P-Tuning Model Config Setup\n", - "\n", - "Now we will begin setting up the config file used for prompt/p-tuning our GPT models! GPT Prompt learning within NeMo uses a class called `MegatronGPTPromptLearningModel` which has its own config file. We will start by loading an example prompt learning config file, then make changes to it to fit our tasks and training plans. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5749c387", - "metadata": {}, - "outputs": [], - "source": [ - "from omegaconf import OmegaConf\n", - "\n", - "CONFIG_DIR = os.path.join(NEMO_DIR, \"conf\")\n", - "os.makedirs(CONFIG_DIR, exist_ok=True)\n", - "\n", - "# Download the example config file\n", - "wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/language_modeling/conf/megatron_gpt_prompt_learning_config.yaml', CONFIG_DIR)\n", - "\n", - "# Load the example config file so we can start editing it\n", - "CONFIG_PATH = os.path.join(CONFIG_DIR, \"megatron_gpt_prompt_learning_config.yaml\")\n", - "config = OmegaConf.load(CONFIG_PATH)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "ce966bcf", - "metadata": {}, - "source": [ - "First let's set the datasets we've created in the config. We are going to start by p-tuning a GPT model on a small subset of the **Squad** task. We do this by setting the following config params below: " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6bb1590f", - "metadata": {}, - "outputs": [], - "source": [ - "config.model.data.train_ds = [f\"{SQUAD_DIR}/squad_short_train.jsonl\"]\n", - "config.model.data.validation_ds = [f\"{SQUAD_DIR}/squad_short_val.jsonl\"]" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "4e021b24", - "metadata": {}, - "source": [ - "### Prompt Formatting\n", - "Now that we have our dataset, lets define what we want the prompt to look like. \n", - "\n", - "The squad dataset json files contain fields named \"context\", \"question\" and \"answer\". The prompt formatting template allows us to arrange these fields and decide where to insert virtual prompts. We can add the `<|VIRTUAL_PROMPT_0|>` token anywhere between the fields (although we recommend simply adding it in the leftmost position will be sufficient).\n", - "\n", - "For example, given a data jsonl file with examples like this: \n", - "\n", - "\n", - "**{\"taskname\": \"squad\", \"context\": \"Super Bowl 50 was an American football ga... numerals 50.\", \"question\": \"What does AFC stand for?\", \"answer\": \"American Football Conference\"}**. \n", - "\n", - "\n", - "We can create a prompt template set to `prompt_template = \"<|VIRTUAL_PROMPT_0|> Context: {context}\\n\\nquestion: {question}\\n\\nanswer: {answer}\"` other options are also possible, for example the `\\n` can be replaced with whitespace or the other of the context and question can be swapped. The answer however, should be at the end.\n", - "\n", - "Let's configure the prompt template for the task below:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f935b411", - "metadata": {}, - "outputs": [], - "source": [ - "config.model.task_templates = [\n", - " \n", - " {\n", - " \"taskname\": \"squad\",\n", - " \"prompt_template\": \"<|VIRTUAL_PROMPT_0|> Context: {context}\\n\\nQuestion: {question}\\n\\nAnswer:{answer}\",\n", - " \"total_virtual_tokens\": 15,\n", - " \"virtual_token_splits\": [15],\n", - " \"truncate_field\": \"context\",\n", - " \"answer_only_loss\": True,\n", - " \"answer_field\": \"answer\",\n", - " },\n", - " \n", - "]" - ] - }, - { - "cell_type": "markdown", - "id": "dcc438b5", - "metadata": {}, - "source": [ - "Note each `task_template` item has 5 fields. \n", - "\n", - "- **`prompt_template`** is a string showing the model where to place virtual tokens and how to map dataset json fields to where they belong in the model prompt. \n", - "\n", - "\n", - "- **`taskname`** refers to the same `taskname` in the dataset json objects. \n", - "\n", - "\n", - "- **`total_virtual_tokens`** specifies the total number of virtual tokens that will be inserted into the model prompt.\n", - "\n", - "\n", - "- **`virtual_token_splits`** specifies the number of virtual tokens that belong at each `<|VIRTUAL_PROMPT_#|>` marker. `virtual_token_splits` values should add up to `total_virtual_tokens`. The number of `virtual_token_splits` should match the number of `<|VIRTUAL_PROMPT_#|>` markers. \n", - "\n", - "\n", - "- **`truncate_field`** specifies which field in the data json to truncate if the length of the input exceeds the maximum sequence length of the model. If `truncate_field` is set to `None`, examples that are too long are simply dropped from the dataset.\n", - "\n", - "\n", - "- **`answer_only_loss`** Whether to limit loss calculation to only the answer portion of the prompt during tuning. `True` Strongly recommended for long prompts, but shorter prompts with single word answers seem to benefit from setting this to `False`. \n", - "\n", - "\n", - "- **`answer_field`** The field in the data json corresponding to the answer. The loss will only be calculated on this portion of the prompt if `answer_only_loss` is `True`. The answer field must be at the end of the prompt template.\n", - "\n", - "In the `task_templates` we set above, `squad` has a different number of virtual tokens than `sentiment` and `intent_and_slot`. This is because we will be p-tuning on `squad` after we p-tune on the other two tasks and **we do not need to use the same number of virtual tokens between sessions**. We also set the `truncate` field for squad because the context can sometimes be longer than the model's max sequence length, and we want that field to be truncated if the example is too long. Lastly, we set `answer_only_loss` to true for `squad` due to the longer prompt. We've found `answer_only_loss=True` to work significantly better for this task." - ] - }, - { - "cell_type": "markdown", - "id": "84579c7a", - "metadata": {}, - "source": [ - "### Setting New Tasks\n", - "After you p-tune your model this time, you can always go back and p-tune or prompt-tune your model on more tasks without over writing the virtual prompts who've trained this time. You can also use a different number of `total_virtual_tokens` between each training session as long as tasks p-tuned or prompt tuned at the same time have the same number of `total_virtual_tokens`. For this reason, when you p-tune on a new task, you need to tell your model which of your tasks are new and which ones already exist (and thus you don't want to tune them). \n", - "\n", - "You do this by setting the `new_tasks` and `existing_tasks` values in the config file. Because we are p-tuning a model with no existing tasks, you should set `existing_tasks=[]` and `new_tasks=[\"sentiment\", \"intent_and_slot\"]` as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "57a73e01", - "metadata": {}, - "outputs": [], - "source": [ - "config.model.existing_tasks = []\n", - "config.model.new_tasks = [\"squad\"]" - ] - }, - { - "cell_type": "markdown", - "id": "3b77e88c", - "metadata": {}, - "source": [ - "After p-tuning and/or prompt tuning is complete, you can run inference on all tasks at the same time, regardless of their `total_virtual_tokens` value." - ] - }, - { - "cell_type": "markdown", - "id": "a0d5017e", - "metadata": {}, - "source": [ - "### Setting The Pre-Trained GPT Model\n", - "We still need to set which GPT model we want to p-tune/prompt tune. Prompt learning methods work best with large GPT language models (5B or above), but the purposes of this tutorial, we are going to download a 345M parameter GPT model from NVIDIA NGC." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "48cdf868", - "metadata": {}, - "outputs": [], - "source": [ - "# Check what GPT .nemo models we have available on NGC\n", - "from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel\n", - "MegatronGPTModel.list_available_models()" - ] - }, - { - "cell_type": "markdown", - "id": "ede350ed", - "metadata": {}, - "source": [ - "If we wanted to use the GPT model class directly, we could instantiate a trainer then download the model by calling running \n", - "`gpt_model = MegatronGPTModel.from_pretrained(model_name=\"megatron_gpt_345m\", trainer=trainer).cuda()`. But we just need the `.nemo` file in our working NeMo directory in this tutorial, so we will download it using `wget`. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "364439a1", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "# Download the model from NGC\n", - "gpt_file_name = \"megatron_gpt_345m.nemo\"\n", - "!wget -nc --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/nemo/megatron_gpt_345m/versions/1/files/megatron_gpt_345m.nemo -O {NEMO_DIR}/{gpt_file_name}" - ] - }, - { - "cell_type": "markdown", - "id": "1d6a8a67", - "metadata": {}, - "source": [ - "Now that we have a `.nemo` GPT file to work with. We need to add its path in our prompt learning config. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2778a5fa", - "metadata": {}, - "outputs": [], - "source": [ - "# Set GPT model path on prompt learning config\n", - "config.model.language_model_path = gpt_file_name" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "943a9c83", - "metadata": {}, - "source": [ - "We can also set where we want the final prompt tuned model to be saved by setting `model.nemo_path`. By default the tuned prompt learning model will be saved in your current working directory to a `.nemo` file with the same name as your experiment (`config.name`). Let's change the save name to be `p_tuned_gpt.nemo`. **Your model path must end in `.nemo`.**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a278cbdf", - "metadata": {}, - "outputs": [], - "source": [ - "config.exp_manager.checkpoint_callback_params.save_nemo_on_train_end= True\n", - "config.exp_manager.checkpoint_callback_params.always_save_nemo= True\n", - "config.exp_manager.checkpoint_callback_params.save_best_model= True" - ] - }, - { - "cell_type": "markdown", - "id": "378a73e7", - "metadata": {}, - "source": [ - "### Setting P-Tuning Specific Params\n", - "Within the config file, p-tuning and prompt-tuning each have a couple of hyperparameters specific to them. We first need to tell the model that we want to do p-tuning, not prompt-tuning. To do this, we set the **`model.virtual_prompt_style`** hyperparameter like this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "68763763", - "metadata": {}, - "outputs": [], - "source": [ - "from nemo.collections.nlp.modules.common import VirtualPromptStyle\n", - "config.model.virtual_prompt_style = VirtualPromptStyle.P_TUNING" - ] - }, - { - "cell_type": "markdown", - "id": "947dec63", - "metadata": {}, - "source": [ - "Then we can set the 2 p-tuning specific parameters. Reminder, p-tuning uses an LSTM prompt encoder to predict virtual tokens. \n", - "\n", - "- **`p_tuning.dropout`** the LSTM prompt encoder dropout probability \n", - "- **`p_tuning.num_layers`** the number of LSTM layers you want your p-tuning prompt encoder to have\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "03f893ef", - "metadata": {}, - "outputs": [], - "source": [ - "config.model.p_tuning.dropout = 0.0\n", - "config.model.p_tuning.num_layers = 2\n", - "config.model.global_batch_size = 2\n", - "config.model.micro_batch_size = 1" - ] - }, - { - "cell_type": "markdown", - "id": "a988d16e", - "metadata": {}, - "source": [ - "Let's have a look at all the values we've set in the model config. You can change any of these values in the same manner we've been using above. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12a37ada", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "# Final model config\n", - "print(OmegaConf.to_yaml(config.model))" - ] - }, - { - "cell_type": "markdown", - "id": "6b4bc7f3", - "metadata": {}, - "source": [ - "### Setting Prompt-Tuning Specific Params\n", - "\n", - "Though we are not using prompt tuning in this training session, let's go over the prompt tuning specific parameters we would use if we were. \n", - "\n", - "- **`prompt_tuning.new_prompt_init_methods`** Whether you want to initialize virtual token embeddings from the embeddings of existing parts of the model's vocabulary (either 'text' or 'random')\n", - "- **`prompt_tuning.new_prompt_init_text`** The text you want to use if you have 'text' in the list above, should be None otherwise. \n", - "\n", - "Each of the above hyperparameters are a list of strings. \n", - "\n", - "`new_prompt_init_methods` would look like `[\"text\", \"random\", \"text\", \"text\"]` if you were prompt tuning on 4 tasks at once, and you wanted the second task in `new_tasks` to use random initialization. \n", - "\n", - "`new_prompt_init_text` might look like `[\"some text I want to use\", None, \"some other text\", \"task text goes here\"]` for those four new tasks. \n", - "\n", - "The order of both should correspond to the order of the tasks you have listed in `model.new_tasks`. " - ] - }, - { - "cell_type": "markdown", - "id": "4c048852", - "metadata": {}, - "source": [ - "# Building the PyTorch Lightning Trainer\n", - "NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem.\n", - "\n", - "Let's first instantiate a Trainer object" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "90f85b2a", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import pytorch_lightning as pl\n", - "from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategyNotebook\n", - "from pytorch_lightning.plugins.environments import TorchElasticEnvironment\n", - "\n", - "# let's modify some trainer configs\n", - "# check if we have GPU available and uses it\n", - "accelerator = 'gpu' if torch.cuda.is_available() else 'cpu'\n", - "config.trainer.accelerator = accelerator\n", - "config.trainer.devices = 1\n", - "config.trainer.max_epochs = 4\n", - "config.trainer.val_check_interval = 1.0\n", - "\n", - "# for PyTorch Native AMP set precision=16\n", - "config.trainer.precision = 16 if torch.cuda.is_available() else 32\n", - "\n", - "# setup cluster environment parameters\"\n", - "# use torch elastic cluster environment so `create_process_externally` is True\n", - "# the launcher is set to None. It will not try to spawn new processes.\n", - "# It won't create the misconfiguration error because of the `interactive session`\n", - "os.environ[\"LOCAL_RANK\"] = '0'\n", - "os.environ[\"RANK\"] = '0'\n", - "os.environ[\"WORLD_SIZE\"] = '1'\n", - "\n", - "strategy = NLPDDPStrategyNotebook(find_unused_parameters=False, no_ddp_communication_hook=True)\n", - "plugins = [TorchElasticEnvironment()]\n", - "trainer = pl.Trainer(plugins= plugins, strategy=strategy, **config.trainer)\n", - "\n", - "print(\"Trainer config - \\n\")\n", - "print(OmegaConf.to_yaml(config.trainer))" - ] - }, - { - "cell_type": "markdown", - "id": "4d0124c1", - "metadata": {}, - "source": [ - "# Setting up a NeMo Experiment\n", - "\n", - "NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f2c943ba", - "metadata": {}, - "outputs": [], - "source": [ - "from nemo.utils.exp_manager import exp_manager\n", - "\n", - "# Set name of the experiment \n", - "config.name = 'p_tuning'\n", - "config.exp_manager.resume_if_exists = False\n", - "\n", - "# Init the experiment manager and view the exp_dir\n", - "exp_dir = exp_manager(trainer, config.get(\"exp_manager\", None))\n", - "exp_dir = str(exp_dir)\n", - "print(exp_dir)" - ] - }, - { - "cell_type": "markdown", - "id": "5860bd90", - "metadata": {}, - "source": [ - "We can also set learning hyperparameters as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4c4ec542", - "metadata": {}, - "outputs": [], - "source": [ - "# Set some of the learning parameters\n", - "config.model.optim.lr = 1e-4\n", - "config.model.precision = config.trainer.precision" - ] - }, - { - "cell_type": "markdown", - "id": "298b3dce", - "metadata": {}, - "source": [ - "# First P-Tuning Session\n", - "The only thing left to do is load up the model and begin p-tuning!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b4bda19b", - "metadata": {}, - "outputs": [], - "source": [ - "from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import MegatronGPTPromptLearningModel\n", - "\n", - "model = MegatronGPTPromptLearningModel(cfg=config.model, trainer=trainer)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2d99f433", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "# Training set to 2 epochs by default in a cell above\n", - "# Each epoch will take around 1min 15sec, but training time can vary\n", - "trainer.fit(model)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "6aab09d4", - "metadata": {}, - "source": [ - "# Inference After P-Tuning\n", - "One way to run inference after p-tuning or prompt-tuning your model is to call `model.generate()`. `model.generate()` takes in \n", - "\n", - "- `inputs` which can be either a list of dictionary objects or `.jsonl` files containing dictionary objects, \n", - "- `length_params`\n", - "- `sampling_params`\n", - "\n", - "as arguments. More information about the [text generation API can be found here](https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/modules/common/transformer/text_generation.py).\n", - "\n", - "If `length_params` and `sampling_params` are set to `None`, the model generates output with a greedy decoding strategy and generates up to `30` new tokens. Most predictive downstream tasks (not text generation tasks), use greedy sampling. To see other ways to run inference with your prompt learning model and more details on how to define various inference parameters, visit `examples/nlp/language_modeling/megatron_gpt_eval.py`.\n", - "\n", - "Below are some randomly selected test examples from the sentiment classification and intent and slot classification test files. Notice that the `label` field is dropped from all test examples. The `MegatronPromptLearningDataset` called within `.generate()` automatically leaves fields in the prompt template empty when they are not provided in the data json. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dc95e764", - "metadata": {}, - "outputs": [], - "source": [ - "test_examples = [\n", - " {\"taskname\": \"squad\", \"context\": \"The build was released for download later in the day in standard 32-bit and 64-bit versions, plus a special 64-bit version which included SDKs and developer tools (Visual Studio Express and Expression Blend) for developing Metro-style apps. The Windows Store was announced during the presentation, but was not available in this build. According to Microsoft, there were about 535,000 downloads of the developer preview within the first 12 hours of its release. Originally set to expire on March 11, 2012, in February 2012 the Developer Preview's expiry date was changed to January 15, 2013.\", \"question\": \"When was the Developer preview initially intended to expire?\"},\n", - " {\"taskname\": \"squad\", \"context\": \"The structures of most federal governments incorporate mechanisms to protect the rights of component states. One method, known as 'intrastate federalism', is to directly represent the governments of component states in federal political institutions. Where a federation has a bicameral legislature the upper house is often used to represent the component states while the lower house represents the people of the nation as a whole. A federal upper house may be based on a special scheme of apportionment, as is the case in the senates of the United States and Australia, where each state is represented by an equal number of senators irrespective of the size of its population.\", \"question\": \"What is a bicameral legislature?\"},\n", - " {\"taskname\": \"squad\", \"context\": \"Imported mystery religions, which offered initiates salvation in the afterlife, were a matter of personal choice for an individual, practiced in addition to carrying on one's family rites and participating in public religion. The mysteries, however, involved exclusive oaths and secrecy, conditions that conservative Romans viewed with suspicion as characteristic of \\\"magic\\\", conspiratorial (coniuratio), or subversive activity. Sporadic and sometimes brutal attempts were made to suppress religionists who seemed to threaten traditional morality and unity, as with the senate's efforts to restrict the Bacchanals in 186 BC.\", \"question\": \"What was the practice of religion to the Romans?\"}\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "74a5a358", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "response = model.generate(inputs=test_examples, length_params=None)\n", - "\n", - "print('The prediction results of some sample queries with the trained model:')\n", - "for result in response['sentences']:\n", - " print(result)\n", - " print(\"-\" * 30)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 - } From a7709b833674a85874daabe18719aebca8033320 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 25 Feb 2024 20:59:00 -0700 Subject: [PATCH 12/12] release updates (#8394) * release updates (#8378) * [tutorial] fixed missing RIR scripts file. (#8257) Signed-off-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> * add values to en tts dict (#7879) Signed-off-by: Mariana Graterol Fuenmayor * mcore ds fix Signed-off-by: Dmytro Pykhtar * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update mcore Signed-off-by: dimapihtar * revert asr files Signed-off-by: dimapihtar * add comments Signed-off-by: dimapihtar * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add support for mcore mock dataset Signed-off-by: dimapihtar * update mcore version Signed-off-by: dimapihtar * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update gpt cfg Signed-off-by: dimapihtar * update mcore commit Signed-off-by: dimapihtar * fix Bert unit tests Signed-off-by: dimapihtar * update bert tests Signed-off-by: dimapihtar * fix bert mcore test Signed-off-by: dimapihtar * fix gpt jenkins tests Signed-off-by: dimapihtar * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add support for dict data input type Signed-off-by: dimapihtar * add mock ds test Signed-off-by: dimapihtar * add test for dict data input type Signed-off-by: dimapihtar * mcore ds fix Signed-off-by: dimapihtar * data input fix Signed-off-by: dimapihtar --------- Signed-off-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Signed-off-by: Mariana Graterol Fuenmayor Signed-off-by: Dmytro Pykhtar Signed-off-by: dimapihtar Signed-off-by: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> Co-authored-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Co-authored-by: Mariana <47233618+mgrafu@users.noreply.github.com> Co-authored-by: Dmytro Pykhtar Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Pablo Garay * Update megatron_gpt_model.py Signed-off-by: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> --------- Signed-off-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Signed-off-by: Mariana Graterol Fuenmayor Signed-off-by: Dmytro Pykhtar Signed-off-by: dimapihtar Signed-off-by: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> Co-authored-by: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> Co-authored-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Co-authored-by: Mariana <47233618+mgrafu@users.noreply.github.com> Co-authored-by: Dmytro Pykhtar Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Pablo Garay Co-authored-by: Eric Harper --- Jenkinsfile | 56 +++++++++---------- .../language_modeling/megatron_gpt_model.py | 5 +- 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 0fc492961c61b..cea7083f5718f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -3698,7 +3698,7 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' model.activations_checkpoint_method='block' \ model.activations_checkpoint_granularity='full' \ model.activations_checkpoint_num_layers=1 \ - model.data.data_prefix=[.5,/home/TestData/nlp/megatron_gpt/data/gpt/simple_wiki_gpt_preproc_text_document,.5,/home/TestData/nlp/megatron_gpt/data/gpt/simple_wiki_gpt_preproc_text_document] \ + model.data.data_prefix='{train:[1.0,/home/TestData/nlp/megatron_gpt/data/gpt/simple_wiki_gpt_preproc_text_document],validation:[/home/TestData/nlp/megatron_gpt/data/gpt/simple_wiki_gpt_preproc_text_document],test:[/home/TestData/nlp/megatron_gpt/data/gpt/simple_wiki_gpt_preproc_text_document]}' \ model.data.index_mapping_dir=examples/nlp/language_modeling/gpt_index_mappings" // commented out to save time on github ci @adithyare //sh "python examples/nlp/language_modeling/megatron_gpt_pretraining.py \ @@ -5243,34 +5243,34 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' } } failFast true - //parallel { - //stage('MockGPTDataset') { - // steps { - // sh "python examples/nlp/language_modeling/megatron_gpt_pretraining.py \ - // trainer.max_steps=10 \ - // trainer.limit_val_batches=7 \ - // trainer.val_check_interval=10 \ - // exp_manager.exp_dir=examples/nlp/language_modeling/gpt_pretrain_results \ - // model.data.data_impl=mock \ - // model.data.data_prefix=[] \ - // " - // sh "rm -rf examples/nlp/language_modeling/gpt_pretrain_results" - // } - //} - //stage('MockT5Dataset') { - steps { - sh "python examples/nlp/language_modeling/megatron_t5_pretraining.py \ - trainer.max_steps=10 \ - trainer.limit_val_batches=3 \ - trainer.val_check_interval=10 \ - exp_manager.exp_dir=examples/nlp/language_modeling/t5_pretrain_results \ - model.data.data_impl=mock \ - model.data.data_prefix=[] \ - " - sh "rm -rf examples/nlp/language_modeling/t5_pretrain_results" + parallel { + stage('MockGPTDataset') { + steps { + sh "python examples/nlp/language_modeling/megatron_gpt_pretraining.py \ + trainer.max_steps=10 \ + trainer.limit_val_batches=7 \ + trainer.val_check_interval=10 \ + exp_manager.exp_dir=examples/nlp/language_modeling/gpt_pretrain_results \ + model.data.data_impl=mock \ + model.data.data_prefix=[] \ + " + sh "rm -rf examples/nlp/language_modeling/gpt_pretrain_results" + } + } + stage('MockT5Dataset') { + steps { + sh "python examples/nlp/language_modeling/megatron_t5_pretraining.py \ + trainer.max_steps=10 \ + trainer.limit_val_batches=3 \ + trainer.val_check_interval=10 \ + exp_manager.exp_dir=examples/nlp/language_modeling/t5_pretrain_results \ + model.data.data_impl=mock \ + model.data.data_prefix=[] \ + " + sh "rm -rf examples/nlp/language_modeling/t5_pretrain_results" + } + } } - //} - //} } stage('L2: TTS Fast dev runs 1') { diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py index 6a4ab113877d9..9937b1b84bd34 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py @@ -1204,12 +1204,11 @@ def build_train_valid_test_datasets(self): # Setting N = 1 we force E to be 1 as well train_valid_test_num_samples = [max_train_steps * global_batch_size, 1, 1] - mock_dataset = self.cfg.data.get("mock_dataset", False) + mock_dataset = True if self.cfg.data.get("data_impl", "mmap") == "mock" else False kwargs = { "is_built_on_rank": is_dataset_built_on_rank, "random_seed": self.cfg.seed, "sequence_length": self.cfg.data.seq_length, - "split": self.cfg.data.splits_string, "path_to_cache": self.cfg.data.index_mapping_dir, "tokenizer": self.tokenizer, "reset_position_ids": self.reset_position_ids, @@ -1218,11 +1217,13 @@ def build_train_valid_test_datasets(self): "mock": mock_dataset, } + # support for dict data input type if isinstance(self.cfg.data.data_prefix, DictConfig): _pref = self.cfg.data.data_prefix kwargs['blend_per_split'] = [_pref['train'], _pref['validation'], _pref['test']] else: kwargs['blend'] = self.cfg.data.data_prefix + kwargs["split"] = self.cfg.data.splits_string if self.cfg.data.get('add_fim', False): dataset_config = GPTFIMDatasetConfig(self.tokenizer, self.cfg.data.fim, **kwargs)