From 38b69b021a17d1ef4190fb829fde9f739ba00785 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 18 五月 2023 22:58:17 +0800
Subject: [PATCH] Merge pull request #529 from alibaba-damo-academy/langgz-patch-1

---
 README.md |   26 +++++++++++---------------
 1 files changed, 11 insertions(+), 15 deletions(-)

diff --git a/README.md b/README.md
index 996cde4..8d75908 100644
--- a/README.md
+++ b/README.md
@@ -94,22 +94,18 @@
 ## Citations
 
 ``` bibtex
-@inproceedings{gao2022paraformer,
-  title={Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition},
-  author={Gao, Zhifu and Zhang, Shiliang and McLoughlin, Ian and Yan, Zhijie},
+@inproceedings{gao2023funasr,
+  author={Zhifu Gao and Zerui Li and Jiaming Wang and Haoneng Luo and Xian Shi and Mengzhe Chen and Yabin Li and Lingyun Zuo and Zhihao Du and Zhangyu Xiao and Shiliang Zhang},
+  title={FunASR: A Fundamental End-to-End Speech Recognition Toolkit},
+  year={2023},
   booktitle={INTERSPEECH},
-  year={2022}
 }
-@inproceedings{gao2020universal,
-  title={Universal ASR: Unifying Streaming and Non-Streaming ASR Using a Single Encoder-Decoder Model},
-  author={Gao, Zhifu and Zhang, Shiliang and Lei, Ming and McLoughlin, Ian},
-  booktitle={arXiv preprint arXiv:2010.14099},
-  year={2020}
-}
-@inproceedings{Shi2023AchievingTP,
-  title={Achieving Timestamp Prediction While Recognizing with Non-Autoregressive End-to-End ASR Model},
-  author={Xian Shi and Yanni Chen and Shiliang Zhang and Zhijie Yan},
-  booktitle={arXiv preprint arXiv:2301.12343}
-  year={2023}
+@inproceedings{gao22b_interspeech,
+  author={Zhifu Gao and ShiLiang Zhang and Ian McLoughlin and Zhijie Yan},
+  title={{Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition}},
+  year=2022,
+  booktitle={Proc. Interspeech 2022},
+  pages={2063--2067},
+  doi={10.21437/Interspeech.2022-9996}
 }
 ```

--
Gitblit v1.9.1