Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions recipes/SLURP/direct/hparams/train.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,7 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter
limit: !ref <number_of_epochs>

# Models
asr_model: !apply:speechbrain.pretrained.EncoderDecoderASR.from_hparams
source: speechbrain/asr-crdnn-rnnlm-librispeech
run_opts: {"device":"cuda:0"}
asr_model_source: speechbrain/asr-crdnn-rnnlm-librispeech

slu_enc: !new:speechbrain.nnet.containers.Sequential
input_shape: [null, null, !ref <ASR_encoder_dim>]
Expand Down
8 changes: 8 additions & 0 deletions recipes/SLURP/direct/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -340,6 +340,14 @@ def text_pipeline(semantics):
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])

# Download pretrained ASR model
from speechbrain.pretrained import EncoderDecoderASR

hparams["asr_model"] = EncoderDecoderASR.from_hparams(
source=hparams["asr_model_source"],
run_opts={"device": run_opts["device"]},
)

# Brain class initialization
slu_brain = SLU(
modules=hparams["modules"],
Expand Down
4 changes: 1 addition & 3 deletions recipes/fluent-speech-commands/direct/hparams/train.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,7 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter
limit: !ref <number_of_epochs>

# Models
asr_model: !apply:speechbrain.pretrained.EncoderDecoderASR.from_hparams
source: speechbrain/asr-crdnn-rnnlm-librispeech
run_opts: {"device":"cuda:0"}
asr_model_source: speechbrain/asr-crdnn-rnnlm-librispeech

slu_enc: !new:speechbrain.nnet.containers.Sequential
input_shape: [null, null, !ref <ASR_encoder_dim>]
Expand Down
8 changes: 8 additions & 0 deletions recipes/fluent-speech-commands/direct/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,14 @@ def text_pipeline(semantics):
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])

# Download pretrained ASR model
from speechbrain.pretrained import EncoderDecoderASR

hparams["asr_model"] = EncoderDecoderASR.from_hparams(
source=hparams["asr_model_source"],
run_opts={"device": run_opts["device"]},
)

# Brain class initialization
slu_brain = SLU(
modules=hparams["modules"],
Expand Down
4 changes: 1 addition & 3 deletions recipes/timers-and-such/direct/hparams/train.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,7 @@ epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter
limit: !ref <number_of_epochs>

# Models
asr_model: !apply:speechbrain.pretrained.EncoderDecoderASR.from_hparams
source: speechbrain/asr-crdnn-rnnlm-librispeech
run_opts: {"device":"cuda:0"}
asr_model_source: speechbrain/asr-crdnn-rnnlm-librispeech

slu_enc: !new:speechbrain.nnet.containers.Sequential
input_shape: [null, null, !ref <ASR_encoder_dim>]
Expand Down
8 changes: 8 additions & 0 deletions recipes/timers-and-such/direct/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,6 +344,14 @@ def text_pipeline(semantics):
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])

# Download pretrained ASR model
from speechbrain.pretrained import EncoderDecoderASR

hparams["asr_model"] = EncoderDecoderASR.from_hparams(
source=hparams["asr_model_source"],
run_opts={"device": run_opts["device"]},
)

# Brain class initialization
slu_brain = SLU(
modules=hparams["modules"],
Expand Down
12 changes: 12 additions & 0 deletions tests/recipes/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,18 @@ If you like to test for all recipes belonging to one dataset:
python -c 'from tests.utils.recipe_tests import run_recipe_tests; print("TEST FAILED!") if not(run_recipe_tests(filters_fields=["Dataset"], filters=[["CommonLanguage", "LibriSpeech"]], do_checks=False, run_opts="--device=cuda")) else print("TEST PASSED")'
```

You can run the recipe on the CPU just by setting the run_opts properly:
```
python -c 'from tests.utils.recipe_tests import run_recipe_tests; print("TEST FAILED!") if not(run_recipe_tests(filters_fields=["Dataset"], filters=[["CommonLanguage", "LibriSpeech"]], do_checks=False, run_opts="--device=cpu")) else print("TEST PASSED")'
```

In some cases, you might want to test the recipe on a non-default GPU (e.g, cuda:1). Thia helps detecting issues in recipes where the device was hard-coded. You can do that simply with:

```
python -c 'from tests.utils.recipe_tests import run_recipe_tests; print("TEST FAILED!") if not(run_recipe_tests(filters_fields=["Dataset"], filters=[["CommonLanguage", "LibriSpeech"]], do_checks=False, run_opts="--device=cuda:0")) else print("TEST PASSED")'
```


To target a specific recipe (here by its hparam yaml):
```
python -c 'from tests.utils.recipe_tests import run_recipe_tests; print("TEST FAILED!") if not(run_recipe_tests(filters_fields=["Hparam_file"], filters=[["recipes/TIMIT/ASR/transducer/hparams/train_wav2vec.yaml"]], do_checks=False, run_opts="--device=cuda")) else print("TEST PASSED")'
Expand Down