| | |
| | | data = { |
| | | "text": torch.unsqueeze(torch.from_numpy(mini_sentence_id), 0), |
| | | "text_lengths": torch.from_numpy(np.array([len(mini_sentence_id)], dtype='int32')), |
| | | "vad_indexes": torch.from_numpy(np.array([len(cache)-1], dtype='int32')), |
| | | "vad_indexes": torch.from_numpy(np.array([len(cache)], dtype='int32')), |
| | | } |
| | | data = to_device(data, self.device) |
| | | y, _ = self.wrapped_model(**data) |
| | |
| | | **kwargs, |
| | | ): |
| | | assert check_argument_types() |
| | | logging.basicConfig( |
| | | level=log_level, |
| | | format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", |
| | | ) |
| | | ncpu = kwargs.get("ncpu", 1) |
| | | torch.set_num_threads(ncpu) |
| | | |
| | | if ngpu >= 1 and torch.cuda.is_available(): |
| | | device = "cuda" |