Newer
Older
urbanLifeline_YanAn / src / views / voice / recordPage / index.vue
@zhangqy zhangqy on 24 Oct 15 KB 语音优化
  1. <template>
  2. <div id="as">
  3. <!-- <div class="record-page"> -->
  4.  
  5. <div class="recordBtmBox" @click="Record()">
  6. <!-- 四套状态 -->
  7. <!-- 默认状态,期望用户来进行点击 -->
  8. <img
  9. class="DongImg"
  10. v-show="process == 0"
  11. src="@/assets/images/voice/ceshi1.png"
  12. style="opacity: 0.3; bottom: 5px"
  13. alt=""
  14. />
  15. <el-tooltip content="点击开启智能语音" effect="customized">
  16. <Transition name="slideYY-fade">
  17. <img
  18. v-show="process == 0"
  19. class="MKF"
  20. src="@/assets/images/voice/MaiKeFeng.png"
  21. alt=""
  22. />
  23. </Transition>
  24. </el-tooltip>
  25. <!-- 声音获取状态,收录语音-->
  26. <img
  27. class="DongImg"
  28. v-show="process == 1"
  29. src="@/assets/images/voice/ceshi2.png"
  30. alt=""
  31. />
  32. <!-- 智慧语音识别中的等待状态 -->
  33. <img
  34. class="DongImg"
  35. v-show="process == 2"
  36. src="@/assets/images/voice/ceshi3.png"
  37. alt=""
  38. />
  39. <!-- 识别成功状态,展示识别结果,并且可以点击再次触发识别 -->
  40. <img
  41. class="DongImg"
  42. v-show="process == 3"
  43. src="@/assets/images/voice/ceshi1.png"
  44. alt=""
  45. />
  46. <div class="FontBox">{{ nowword }}</div>
  47. </div>
  48. </div>
  49. </template>
  50.  
  51. <script setup name="as">
  52. import { ref, reactive, toRefs, onMounted } from "vue";
  53. import lamejs from "lamejs";
  54.  
  55. const { proxy } = getCurrentInstance();
  56. import Recorder from "js-audio-recorder";
  57. import useUserStore from "@/store/modules/user";
  58. const userStore = useUserStore();
  59. const lockReconnect = ref(null);
  60. const timeoutnum = ref(null);
  61. import bus from "@/bus";
  62. import { parseTime } from "@/utils/ruoyi";
  63.  
  64. const ComShowID = ref(true); //当前大屏的菜单id
  65. const props = defineProps({
  66. ComShowID: {
  67. type: Number,
  68. },
  69. });
  70. watch(
  71. () => props.ComShowID,
  72. () => {
  73. ComShowID.value = props.ComShowID;
  74. },
  75. { immediate: true }
  76. );
  77. var recorder = new Recorder({
  78. sampleBits: 16, // 采样位数,支持 8 或 16,默认是16
  79. sampleRate: 11025, // 采样率,支持 11025、16000、22050、24000、44100、48000,根据浏览器默认值,我的chrome是48000
  80. numChannels: 1, // 声道,支持 1 或 2, 默认是1
  81. // compiling: false,(0.x版本中生效,1.x增加中) // 是否边录边转换,默认是false
  82. });
  83. // 功能3
  84.  
  85. // const shibieword = ref("");
  86. const nowword = ref(""); //当前指令进度位置
  87. const process = ref(0); //0 :未开始 1:录音中 2:识别中 3:识别结束
  88. const data = reactive({
  89. recordStatus: null, //录音进程的当前位置
  90. recognizeWs: null,
  91. });
  92. onMounted(() => {
  93. // 绑定事件-打印的是当前录音数据
  94. initRecognizeWs();
  95. // initWs1();
  96. });
  97. onBeforeUnmount(() => {
  98. data.recognizeWs && data.recognizeWs.close();
  99. });
  100. // 语音识别流程方法
  101. const Record = () => {
  102. if (process.value == 0) {
  103. // 正常流程
  104. // process.value = 1;
  105. // 录音中
  106. startRecorder("begin");
  107. } else if (process.value == 3) {
  108. // 语音识别结果展示,并在2秒后回到初始化状态
  109. setTimeout(() => {
  110. process.value = 0;
  111. nowword.value = "";
  112. }, 2000);
  113. } else {
  114. // 中途的瞎操作就不管了
  115. }
  116. };
  117. //初始化语音调度websocket服务
  118. function initRecognizeWs() {
  119. if (data.recognizeWs) {
  120. data.recognizeWs.onclose();
  121. }
  122. let wsuri;
  123. if (window.location.protocol.includes("https")) {
  124. //线上环境
  125. wsuri = `wss://${window.location.host}/websocket/voiceWebsocket`;
  126. } else {
  127. //本地环境
  128. // wsuri = `ws://192.168.20.145:13002/voiceWebsocket`;
  129. wsuri = `wss://server2.wh-nf.cn:8088/websocket/voiceWebsocket`;
  130. // wsuri = `wss://jingkai.wh-nf.cn:8986/voicesWebocket`;
  131. }
  132. data.recognizeWs = new WebSocket(wsuri);
  133.  
  134. //连接建立
  135. data.recognizeWs.onopen = function (e) {
  136. console.log("连接成功", e);
  137. };
  138.  
  139. //连接建立失败
  140. data.recognizeWs.onerror = function (evt) {
  141. console.log("连接失败", evt);
  142. reconnect();
  143. };
  144.  
  145. data.recognizeWs.onmessage = function (e) {
  146. if (e.data != "客户端连接成功") {
  147. let data = JSON.parse(e.data);
  148. let params = data.data;
  149. console.log(parseTime(new Date()), "Websocket接收值", data);
  150. console.log("接收的data内部的data", params);
  151.  
  152. // // 表明进入了文字播报的转义功能 不走指令等功能
  153. // if (wordbusinessSourceCode.value == data.type) {
  154. // // 将返回的type数据与文字播报的业务code进行精准匹配 如果匹配上了 才说明是发送的这条数据
  155. // if (params.audioFilePath) {
  156. // wordaudioFilePath.value = pathToUrl(params.audioFilePath);
  157. // }
  158. // }
  159. if (params.recognitionState == 1) {
  160. // 将返回的type数据与语音指令的业务code进行精准匹配 如果匹配上了 才说明是发送的这条数据
  161. // shibieword.value = params.recognitionResult;
  162. if (params.recognitionActionCode == "error") {
  163. nowword.value = `指令未识别,请您再说一遍`;
  164. setTimeout(() => {
  165. process.value = 0;
  166. nowword.value = "";
  167. // Record();
  168. }, 2000);
  169. } else {
  170. nowword.value = `成功识别语音,${params.recognitionResult}`;
  171. if (process.value == 2) {
  172. process.value = 3;
  173. Record();
  174. }
  175. if (params.recognitionActionCode == "open") {
  176. // 打开的操作
  177. // if (params.recognitionDataSourceCode == "pump" && params.recognitionDataId) {
  178. // // 例如是泵站的操作 具体业务书写..... recognitionDataId 对象的唯一id 泵站:泵站id 站点:站点id
  179. // // pumpdia.value = true;
  180. // } else if ("其它业务") {
  181. // }
  182.  
  183. switch (params.recognitionDataSourceCode) {
  184. // 图层控制
  185. case "YuShuiFenQu":
  186. // 先清空
  187. bus.emit("clearAllLayer");
  188. bus.emit("SetLayerShow", ["雨水分区"]);
  189. break;
  190. case "RanqiGuanWang":
  191. // 先清空
  192. bus.emit("clearAllLayer");
  193. bus.emit("SetLayerShow", ["燃气管网"]);
  194. break;
  195. case "fangZhenMap":
  196. bus.emit("SetLayerShow", ["仿真地图"]);
  197. break;
  198. case "buildingLayer":
  199. bus.emit("SetLayerShow", ["三维建筑"]);
  200. break;
  201. case "ranqiRisk":
  202. bus.emit("SetLayerShow", ["燃气风险评估"]);
  203. break;
  204. case "paishuiRisk":
  205. bus.emit("SetLayerShow", ["排水风险评估"]);
  206. break;
  207. // 专题切换
  208. case "Menu_FXPL":
  209. bus.emit("ChangeZhuanTiTu", {
  210. MenuData1: {
  211. name: "排水安全",
  212. id: 3,
  213. },
  214. MenuData2: {
  215. name: "防汛排涝",
  216. id: 7,
  217. },
  218. num: 2,
  219. });
  220. break;
  221. case "Menu_ZHSD":
  222. bus.emit("ChangeZhuanTiTu", {
  223. MenuData1: {
  224. name: "隧道安全",
  225. id: 5,
  226. },
  227. MenuData2: null,
  228. num: 1,
  229. });
  230. break;
  231. case "Menu_ZHQL":
  232. bus.emit("ChangeZhuanTiTu", {
  233. MenuData1: {
  234. name: "桥梁安全",
  235. id: 4,
  236. },
  237. MenuData2: null,
  238. num: 1,
  239. });
  240. break;
  241. case "Menu_ZHHM":
  242. bus.emit("ChangeZhuanTiTu", {
  243. MenuData1: {
  244. name: "智慧海绵",
  245. id: 6,
  246. },
  247. MenuData2: null,
  248. num: 1,
  249. });
  250.  
  251. break;
  252. case "Menu_ZHPS":
  253. bus.emit("ChangeZhuanTiTu", {
  254. MenuData1: {
  255. name: "排水安全",
  256. id: 3,
  257. },
  258. MenuData2: null,
  259. num: 1,
  260. });
  261. break;
  262. case "Menu_ZHRQ":
  263. bus.emit("ChangeZhuanTiTu", {
  264. MenuData1: {
  265. name: "燃气安全",
  266. id: 2,
  267. },
  268. MenuData2: null,
  269. num: 1,
  270. });
  271. break;
  272. case "Menu_ZTGL":
  273. bus.emit("ChangeZhuanTiTu", {
  274. MenuData1: {
  275. name: "总体概览",
  276. id: 1,
  277. },
  278. MenuData2: null,
  279. num: 1,
  280. });
  281. break;
  282. case "Menu_JCFX":
  283. bus.emit("ChangeZhuanTiTu", {
  284. MenuData1: {
  285. name: "监测分析",
  286. id: 61,
  287. },
  288. MenuData2: null,
  289. num: 1,
  290. });
  291. break;
  292. case "Menu_JSPG":
  293. bus.emit("ChangeZhuanTiTu", {
  294. MenuData1: {
  295. name: "建设评估",
  296. id: 62,
  297. },
  298. MenuData2: null,
  299. num: 1,
  300. });
  301. break;
  302. case "Open_XQHG":
  303. // 汛情回顾
  304. // 只在排水专题触发
  305. if (ComShowID.value == 7) {
  306. bus.emit("openUserDialog");
  307. } else {
  308. debugger;
  309. if (userStore.ZhuanTiType == 1) {
  310. bus.emit("ChangeZhuanTiTu", {
  311. MenuData1: {
  312. name: "排水安全",
  313. id: 3,
  314. },
  315. MenuData2: {
  316. name: "防汛排涝",
  317. id: 7,
  318. },
  319. num: 2,
  320. });
  321. setTimeout(() => {
  322. bus.emit("openUserDialog");
  323. }, 1000);
  324. }
  325. }
  326. break;
  327. }
  328. } else if (params.recognitionActionCode == "close") {
  329. // 关闭的操作
  330. } else if (params.recognitionActionCode == "detail") {
  331. // 查看的操作
  332. } else {
  333. nowword.value = `指令未识别,请您再说一遍`;
  334. }
  335. }
  336. }
  337. }
  338. };
  339. //关闭连接
  340. data.recognizeWs.onclose = function (e) {
  341. console.log("断开连接");
  342. };
  343. //重新连接
  344. function reconnect() {
  345. if (lockReconnect.value) {
  346. return;
  347. }
  348.  
  349. lockReconnect.value = true;
  350. //没连接上会一直重连,设置延迟避免请求过多
  351. timeoutnum.value && clearTimeout(timeoutnum.value);
  352. timeoutnum.value = setTimeout(() => {
  353. lockReconnect.value = false;
  354. }, 5000);
  355. }
  356. }
  357. // 结束录音并自动上传
  358. function stopRecorderAndupload(val) {
  359. nowword.value = "智能识别中...";
  360. console.log(`上传录音`, parseTime(new Date()));
  361. data.recordStatus = val;
  362. recorder.stop();
  363. uploadaudioformwebSocket();
  364. }
  365. /**
  366. * 录音的具体操作功能
  367. * */
  368. // 开始录音
  369. function startRecorder(val) {
  370. data.recordStatus = val;
  371. // 获取麦克风权限
  372. Recorder.getPermission().then(
  373. () => {
  374. // proxy.$modal.msgSuccess("获取权限成功,开始录音");
  375. recorder.start().then(() => {
  376. process.value = 1;
  377. nowword.value = "开始录音";
  378. console.log(`开始录音`, parseTime(new Date()));
  379. //5秒钟后语音识别结束,并进入到下一步骤
  380. setTimeout(() => {
  381. process.value = 2;
  382. if (process.value == 2) {
  383. // 如果手动提前点击了结束进度到下一步,那么这就可以不用走了
  384. stopRecorderAndupload("stop"); //这里面返回结果进行监听,做了处理process.value=3,且回调Record()
  385. }
  386. }, 5000);
  387. });
  388. },
  389. (error) => {
  390. proxy.$modal.msgError("请先允许该网页使用麦克风");
  391. // console.log(`${error.name} : ${error.message}`);
  392. }
  393. );
  394. }
  395. // 将获取到的音频文件上传到服务器[通过webSocket方式]
  396. function uploadaudioformwebSocket() {
  397. const mp3Blob = convertToMp3(recorder.getWAV());
  398. // recorder.download(mp3Blob, "recorder", "mp3");
  399. mp3ToBase64(mp3Blob).then((stream) => {
  400. // console.log('语音打印', stream)
  401. // 下面发送数据
  402. let parms = {
  403. createBy: userStore.userInfo.userName,
  404. voiceType: "mp3",
  405. data: stream,
  406. businessSourceCode: "dpyysb",
  407. };
  408. data.recognizeWs.send(JSON.stringify(parms));
  409. });
  410. }
  411. //
  412. function convertToMp3(wavDataView) {
  413. // 获取wav头信息
  414. const wav = lamejs.WavHeader.readHeader(wavDataView); // 此处其实可以不用去读wav头信息,毕竟有对应的config配置
  415. const { channels, sampleRate } = wav;
  416. const mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128);
  417. // 获取左右通道数据
  418. const result = recorder.getChannelData();
  419. const buffer = [];
  420. const leftData =
  421. result.left && new Int16Array(result.left.buffer, 0, result.left.byteLength / 2);
  422. const rightData =
  423. result.right && new Int16Array(result.right.buffer, 0, result.right.byteLength / 2);
  424. const remaining = leftData.length + (rightData ? rightData.length : 0);
  425. const maxSamples = 1152;
  426. for (let i = 0; i < remaining; i += maxSamples) {
  427. const left = leftData.subarray(i, i + maxSamples);
  428. let right = null;
  429. let mp3buf = null;
  430. if (channels === 2) {
  431. right = rightData.subarray(i, i + maxSamples);
  432. mp3buf = mp3enc.encodeBuffer(left, right);
  433. } else {
  434. mp3buf = mp3enc.encodeBuffer(left);
  435. }
  436. if (mp3buf.length > 0) {
  437. buffer.push(mp3buf);
  438. }
  439. }
  440. const enc = mp3enc.flush();
  441. if (enc.length > 0) {
  442. buffer.push(enc);
  443. }
  444. return new Blob(buffer, { type: "audio/mp3" });
  445. }
  446. function mp3ToBase64(blob) {
  447. return new Promise((resolve, reject) => {
  448. const fileReader = new FileReader();
  449. fileReader.onload = (e) => {
  450. resolve(e.target.result);
  451. };
  452. fileReader.readAsDataURL(blob);
  453. fileReader.onerror = () => {
  454. reject(new Error("blobToBase64 error"));
  455. };
  456. });
  457. }
  458. function closedia() {
  459. nowword.value = "你好,请点击【开始录制】,进行语音录制!";
  460. stopRecorder();
  461. }
  462. // 结束录音
  463. function stopRecorder(val) {
  464. nowword.value = "录音结束";
  465. data.recordStatus = val;
  466. recorder.stop();
  467. }
  468. </script>
  469.  
  470. <style lang="scss" scoped>
  471. #as {
  472. position: absolute;
  473. bottom: 0px;
  474. left: 50%;
  475. z-index: 999;
  476. margin-left: -200px;
  477.  
  478. .recordBtmBox {
  479. position: relative;
  480. z-index: 1000;
  481. width: 400px;
  482. height: 40px;
  483. cursor: pointer;
  484.  
  485. .DongImg {
  486. position: absolute;
  487. width: 400px;
  488. height: 30px;
  489. left: 0;
  490. bottom: 0;
  491. }
  492.  
  493. .MKF {
  494. position: absolute;
  495. width: 20px;
  496. height: 20px;
  497. left: 190px;
  498. top: 10px;
  499. z-index: 999;
  500. }
  501. .FontBox {
  502. width: 100%;
  503. height: 30px;
  504. position: absolute;
  505. left: 0;
  506. top: 0;
  507. text-align: center;
  508. line-height: 30px;
  509. color: #ffffff;
  510. }
  511. }
  512.  
  513. /*
  514. 进入和离开动画可以使用不同
  515. 持续时间和速度曲线。
  516. */
  517. .slideYY-fade-enter-active {
  518. transition: all 0.3s ease-out;
  519. }
  520.  
  521. .slideYY-fade-leave-active {
  522. transition: all 0.3s cubic-bezier(1, 0.5, 0.8, 1);
  523. }
  524.  
  525. .slideYY-fade-enter-from,
  526. .slideYY-fade-leave-to {
  527. transform: translateY(-80px);
  528. opacity: 0;
  529. }
  530. }
  531. </style>
  532.  
  533. <style>
  534. .el-popper.is-customized {
  535. /* Set padding to ensure the height is 32px */
  536. padding: 6px 12px;
  537. background: linear-gradient(90deg, rgb(103, 226, 226), rgb(3, 168, 168));
  538. color: rgb(255, 255, 255);
  539. }
  540.  
  541. .el-popper.is-customized .el-popper__arrow::before {
  542. background: linear-gradient(90deg, rgb(103, 226, 199), rgb(3, 168, 168));
  543. right: 0;
  544. color: rgb(255, 255, 255);
  545. }
  546. </style>