Newer
Older
urbanLifeline_YanAn / src / views / voice / recordPage / index.vue
@zhangqy zhangqy on 22 Oct 15 KB 调整语音识别
<template>
  <div id="as">
    <!-- <div class="record-page"> -->

    <div class="recordBtmBox" @click="Record()">
      <!-- 四套状态 -->
      <!-- 默认状态,期望用户来进行点击 -->
      <img
        class="DongImg"
        v-show="process == 0"
        src="@/assets/images/voice/ceshi1.png"
        style="opacity: 0.3; bottom: 5px"
        alt=""
      />
      <el-tooltip content="点击开启智能语音" effect="customized">
        <Transition name="slideYY-fade">
          <img
            v-show="process == 0"
            class="MKF"
            src="@/assets/images/voice/MaiKeFeng.png"
            alt=""
          />
        </Transition>
      </el-tooltip>
      <!-- 声音获取状态,收录语音-->
      <img
        class="DongImg"
        v-show="process == 1"
        src="@/assets/images/voice/ceshi2.png"
        alt=""
      />
      <!-- 智慧语音识别中的等待状态 -->
      <img
        class="DongImg"
        v-show="process == 2"
        src="@/assets/images/voice/ceshi3.png"
        alt=""
      />
      <!-- 识别成功状态,展示识别结果,并且可以点击再次触发识别 -->
      <img
        class="DongImg"
        v-show="process == 3"
        src="@/assets/images/voice/ceshi1.png"
        alt=""
      />
      <div class="FontBox">{{ nowword }}</div>
    </div>
  </div>
</template>

<script setup name="as">
import { ref, reactive, toRefs, onMounted } from "vue";
import lamejs from "lamejs";

const { proxy } = getCurrentInstance();
import Recorder from "js-audio-recorder";
import useUserStore from "@/store/modules/user";
const userStore = useUserStore();
const lockReconnect = ref(null);
const timeoutnum = ref(null);
import bus from "@/bus";
import { parseTime } from "@/utils/ruoyi";

const ComShowID = ref(true); //当前大屏的菜单id
const props = defineProps({
  ComShowID: {
    type: Number,
  },
});
watch(
  () => props.ComShowID,
  () => {
    ComShowID.value = props.ComShowID;
  },
  { immediate: true }
);
var recorder = new Recorder({
  sampleBits: 16, // 采样位数,支持 8 或 16,默认是16
  sampleRate: 48000, // 采样率,支持 11025、16000、22050、24000、44100、48000,根据浏览器默认值,我的chrome是48000
  numChannels: 1, // 声道,支持 1 或 2, 默认是1
  // compiling: false,(0.x版本中生效,1.x增加中)  // 是否边录边转换,默认是false
});
// 功能3

// const shibieword = ref("");
const nowword = ref(""); //当前指令进度位置
const process = ref(0); //0 :未开始  1:录音中  2:识别中 3:识别结束
const data = reactive({
  recordStatus: null, //录音进程的当前位置
  recognizeWs: null,
});
onMounted(() => {
  // 绑定事件-打印的是当前录音数据
  initRecognizeWs();
  // initWs1();
});
onBeforeUnmount(() => {
  data.recognizeWs && data.recognizeWs.close();
});
// 语音识别流程方法
const Record = () => {
  if (process.value == 0) {
    // 正常流程
    process.value = 1;
    // 录音中
    startRecorder("begin");
  } else if (process.value == 3) {
    // 语音识别结果展示,并在五秒后回到初始化状态
    setTimeout(() => {
      process.value = 0;
      nowword.value = "";
    }, 5000);
  } else {
    // 中途的瞎操作就不管了
  }
};
//初始化语音调度websocket服务
function initRecognizeWs() {
  if (data.recognizeWs) {
    data.recognizeWs.onclose();
  }
  let wsuri;
  if (window.location.protocol.includes("https")) {
    //线上环境
    wsuri = `wss://${window.location.host}/websocket/voiceWebsocket`;
  } else {
    //本地环境
    // wsuri = `ws://192.168.20.145:13002/voiceWebsocket`;
    wsuri = `wss://server2.wh-nf.cn:8088/websocket/voiceWebsocket`;
    // wsuri = `wss://jingkai.wh-nf.cn:8986/voicesWebocket`;
  }
  data.recognizeWs = new WebSocket(wsuri);

  //连接建立
  data.recognizeWs.onopen = function (e) {
    console.log("连接成功", e);
  };

  //连接建立失败
  data.recognizeWs.onerror = function (evt) {
    console.log("连接失败", evt);
    reconnect();
  };

  data.recognizeWs.onmessage = function (e) {
    if (e.data != "客户端连接成功") {
      let data = JSON.parse(e.data);
      let params = data.data;
      console.log(parseTime(new Date()), "Websocket接收值", data);
      console.log("接收的data内部的data", params);

      // // 表明进入了文字播报的转义功能 不走指令等功能
      // if (wordbusinessSourceCode.value == data.type) {
      //   // 将返回的type数据与文字播报的业务code进行精准匹配 如果匹配上了 才说明是发送的这条数据
      //   if (params.audioFilePath) {
      //     wordaudioFilePath.value = pathToUrl(params.audioFilePath);
      //   }
      // }
      if (params.recognitionState == 1) {
        // 将返回的type数据与语音指令的业务code进行精准匹配 如果匹配上了 才说明是发送的这条数据
        // shibieword.value = params.recognitionResult;
        if (params.recognitionActionCode == "error") {
          nowword.value = `指令未识别,请您再说一遍`;
          setTimeout(() => {
            process.value = 0;
            nowword.value = "";
            // Record();
          }, 3000);
        } else {
          nowword.value = `成功识别语音,${params.recognitionResult}`;
          if (process.value == 2) {
            process.value = 3;
            Record();
          }
          if (params.recognitionActionCode == "open") {
            // 打开的操作
            // if (params.recognitionDataSourceCode == "pump" && params.recognitionDataId) {
            //   // 例如是泵站的操作 具体业务书写..... recognitionDataId 对象的唯一id 泵站:泵站id  站点:站点id
            //   // pumpdia.value = true;
            // } else if ("其它业务") {
            // }

            switch (params.recognitionDataSourceCode) {
              //  图层控制
              case "YuShuiFenQu":
                // 先清空
                bus.emit("clearAllLayer");
                bus.emit("SetLayerShow", ["雨水分区"]);
                break;
              case "RanqiGuanWang":
                // 先清空
                bus.emit("clearAllLayer");
                bus.emit("SetLayerShow", ["燃气管网"]);
                break;
              case "fangZhenMap":
                bus.emit("SetLayerShow", ["仿真地图"]);
                break;
              case "buildingLayer":
                bus.emit("SetLayerShow", ["三维建筑"]);
                break;
              case "ranqiRisk":
                bus.emit("SetLayerShow", ["燃气风险评估"]);
                break;
              case "paishuiRisk":
                bus.emit("SetLayerShow", ["排水风险评估"]);
                break;
              // 专题切换
              case "Menu_FXPL":
                bus.emit("ChangeZhuanTiTu", {
                  MenuData1: {
                    name: "排水安全",
                    id: 3,
                  },
                  MenuData2: {
                    name: "防汛排涝",
                    id: 7,
                  },
                  num: 2,
                });
                break;
              case "Menu_ZHSD":
                bus.emit("ChangeZhuanTiTu", {
                  MenuData1: {
                    name: "隧道安全",
                    id: 5,
                  },
                  MenuData2: null,
                  num: 1,
                });
                break;
              case "Menu_ZHQL":
                bus.emit("ChangeZhuanTiTu", {
                  MenuData1: {
                    name: "桥梁安全",
                    id: 4,
                  },
                  MenuData2: null,
                  num: 1,
                });
                break;
              case "Menu_ZHHM":
                bus.emit("ChangeZhuanTiTu", {
                  MenuData1: {
                    name: "智慧海绵",
                    id: 6,
                  },
                  MenuData2: null,
                  num: 1,
                });

                break;
              case "Menu_ZHPS":
                bus.emit("ChangeZhuanTiTu", {
                  MenuData1: {
                    name: "排水安全",
                    id: 3,
                  },
                  MenuData2: null,
                  num: 1,
                });
                break;
              case "Menu_ZHRQ":
                bus.emit("ChangeZhuanTiTu", {
                  MenuData1: {
                    name: "燃气安全",
                    id: 2,
                  },
                  MenuData2: null,
                  num: 1,
                });
                break;
              case "Menu_ZTGL":
                bus.emit("ChangeZhuanTiTu", {
                  MenuData1: {
                    name: "总体概览",
                    id: 1,
                  },
                  MenuData2: null,
                  num: 1,
                });
                break;
              case "Menu_JCFX":
                bus.emit("ChangeZhuanTiTu", {
                  MenuData1: {
                    name: "监测分析",
                    id: 61,
                  },
                  MenuData2: null,
                  num: 1,
                });
                break;
              case "Menu_JSPG":
                bus.emit("ChangeZhuanTiTu", {
                  MenuData1: {
                    name: "建设评估",
                    id: 62,
                  },
                  MenuData2: null,
                  num: 1,
                });
                break;
              case "Open_XQHG":
                // 汛情回顾
                // 只在排水专题触发
                if (ComShowID.value == 7) {
                  bus.emit("openUserDialog");
                } else {
                  debugger;
                  if (userStore.ZhuanTiType == 1) {
                    bus.emit("ChangeZhuanTiTu", {
                      MenuData1: {
                        name: "排水安全",
                        id: 3,
                      },
                      MenuData2: {
                        name: "防汛排涝",
                        id: 7,
                      },
                      num: 2,
                    });
                    setTimeout(() => {
                      bus.emit("openUserDialog");
                    }, 1000);
                  }
                }
                break;
            }
          } else if (params.recognitionActionCode == "close") {
            // 关闭的操作
          } else if (params.recognitionActionCode == "detail") {
            // 查看的操作
          } else {
            nowword.value = `指令未识别,请您再说一遍`;
          }
        }
      }
    }
  };
  //关闭连接
  data.recognizeWs.onclose = function (e) {
    console.log("断开连接");
  };
  //重新连接
  function reconnect() {
    if (lockReconnect.value) {
      return;
    }

    lockReconnect.value = true;
    //没连接上会一直重连,设置延迟避免请求过多
    timeoutnum.value && clearTimeout(timeoutnum.value);
    timeoutnum.value = setTimeout(() => {
      lockReconnect.value = false;
    }, 5000);
  }
}
// 结束录音并自动上传
function stopRecorderAndupload(val) {
  nowword.value = "智能识别中...";
  console.log(`上传录音`, parseTime(new Date()));
  data.recordStatus = val;
  recorder.stop();
  uploadaudioformwebSocket();
}
/**
 *  录音的具体操作功能
 * */
// 开始录音
function startRecorder(val) {
  data.recordStatus = val;
  // 获取麦克风权限
  Recorder.getPermission().then(
    () => {
      // proxy.$modal.msgSuccess("获取权限成功,开始录音");
      nowword.value = "开始录音";
      recorder.start();
      //三秒钟后语音识别结束,并进入到下一步骤
      setTimeout(() => {
        process.value = 2;
        if (process.value == 2) {
          // 如果手动提前点击了结束进度到下一步,那么这就可以不用走了
          stopRecorderAndupload("stop"); //这里面返回结果进行监听,做了处理process.value=3,且回调Record()
        }
      }, 3000);
    },
    (error) => {
      proxy.$modal.msgError("请先允许该网页使用麦克风");
      // console.log(`${error.name} : ${error.message}`);
    }
  );
}
// 将获取到的音频文件上传到服务器[通过webSocket方式]
function uploadaudioformwebSocket() {
  const mp3Blob = convertToMp3(recorder.getWAV());
  // recorder.download(mp3Blob, 'recorder', 'mp3');
  mp3ToBase64(mp3Blob).then((stream) => {
    // console.log('语音打印', stream)
    // 下面发送数据
    let parms = {
      createBy: userStore.userInfo.userName,
      voiceType: "mp3",
      data: stream,
      businessSourceCode: "dpyysb",
    };
    data.recognizeWs.send(JSON.stringify(parms));
  });
}
//
function convertToMp3(wavDataView) {
  // 获取wav头信息
  const wav = lamejs.WavHeader.readHeader(wavDataView); // 此处其实可以不用去读wav头信息,毕竟有对应的config配置
  const { channels, sampleRate } = wav;
  const mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128);
  // 获取左右通道数据
  const result = recorder.getChannelData();
  const buffer = [];
  const leftData =
    result.left && new Int16Array(result.left.buffer, 0, result.left.byteLength / 2);
  const rightData =
    result.right && new Int16Array(result.right.buffer, 0, result.right.byteLength / 2);
  const remaining = leftData.length + (rightData ? rightData.length : 0);
  const maxSamples = 1152;
  for (let i = 0; i < remaining; i += maxSamples) {
    const left = leftData.subarray(i, i + maxSamples);
    let right = null;
    let mp3buf = null;
    if (channels === 2) {
      right = rightData.subarray(i, i + maxSamples);
      mp3buf = mp3enc.encodeBuffer(left, right);
    } else {
      mp3buf = mp3enc.encodeBuffer(left);
    }
    if (mp3buf.length > 0) {
      buffer.push(mp3buf);
    }
  }
  const enc = mp3enc.flush();
  if (enc.length > 0) {
    buffer.push(enc);
  }
  return new Blob(buffer, { type: "audio/mp3" });
}
function mp3ToBase64(blob) {
  return new Promise((resolve, reject) => {
    const fileReader = new FileReader();
    fileReader.onload = (e) => {
      resolve(e.target.result);
    };
    fileReader.readAsDataURL(blob);
    fileReader.onerror = () => {
      reject(new Error("blobToBase64 error"));
    };
  });
}
function closedia() {
  nowword.value = "你好,请点击【开始录制】,进行语音录制!";
  stopRecorder();
}
// 结束录音
function stopRecorder(val) {
  nowword.value = "录音结束";
  data.recordStatus = val;
  recorder.stop();
}
</script>

<style lang="scss" scoped>
#as {
  position: absolute;
  bottom: 0px;
  left: 50%;
  z-index: 999;
  margin-left: -200px;

  .recordBtmBox {
    position: relative;
    z-index: 1000;
    width: 400px;
    height: 40px;
    cursor: pointer;

    .DongImg {
      position: absolute;
      width: 400px;
      height: 30px;
      left: 0;
      bottom: 0;
    }

    .MKF {
      position: absolute;
      width: 20px;
      height: 20px;
      left: 190px;
      top: 10px;
      z-index: 999;
    }
    .FontBox {
      width: 100%;
      height: 30px;
      position: absolute;
      left: 0;
      top: 0;
      text-align: center;
      line-height: 30px;
      color: #ffffff;
    }
  }

  /*
    进入和离开动画可以使用不同
    持续时间和速度曲线。
  */
  .slideYY-fade-enter-active {
    transition: all 0.3s ease-out;
  }

  .slideYY-fade-leave-active {
    transition: all 0.3s cubic-bezier(1, 0.5, 0.8, 1);
  }

  .slideYY-fade-enter-from,
  .slideYY-fade-leave-to {
    transform: translateY(-80px);
    opacity: 0;
  }
}
</style>

<style>
.el-popper.is-customized {
  /* Set padding to ensure the height is 32px */
  padding: 6px 12px;
  background: linear-gradient(90deg, rgb(103, 226, 226), rgb(3, 168, 168));
  color: rgb(255, 255, 255);
}

.el-popper.is-customized .el-popper__arrow::before {
  background: linear-gradient(90deg, rgb(103, 226, 199), rgb(3, 168, 168));
  right: 0;
  color: rgb(255, 255, 255);
}
</style>