| | |
| | | # Input as audio signal |
| | | if isinstance(speech, np.ndarray): |
| | | speech = torch.tensor(speech) |
| | | |
| | | |
| | | if(speech.dim()==3): |
| | | speech = torch.squeeze(speech, 2) |
| | | #speech = speech.unsqueeze(0).to(getattr(torch, self.dtype)) |
| | | speech = speech.to(getattr(torch, self.dtype)) |
| | | # lenghts: (1,) |
| | |
| | | output_dir_v2: Optional[str] = None, |
| | | fs: dict = None, |
| | | param_dict: dict = None, |
| | | **kwargs, |
| | | ): |
| | | # 3. Build data-iterator |
| | | if data_path_and_name_and_type is None and raw_inputs is not None: |
| | |
| | | data_path_and_name_and_type, |
| | | dtype=dtype, |
| | | batch_size=batch_size, |
| | | fs=fs, |
| | | mc=True, |
| | | key_file=key_file, |
| | | num_workers=num_workers, |
| | | preprocess_fn=ASRTask.build_preprocess_fn(speech2text.asr_train_args, False), |