参考文章:https://www.jb51.net/article/162517.htm
前言
因为项目的需求,需要实现一个录音的功能,我本着想要Vue来写这个项目,但是无奈,网上的vue实现录音的教程并不多,如果我不能解决的话,那么我将面临的是写原生js来写这个项目,好吧,我真的懒得写原生,只能迎难而上了。不过好在,最后是可以实现的.下面来一起看看吧
下面来看看核心js文件
// 兼容
window.URL = window.URL || window.webkitURL
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia
let HZRecorder = function (stream, config) {
config = config || {}
config.sampleBits = config.sampleBits || 8 // 采样数位 8, 16
config.sampleRate = config.sampleRate || (44100 / 6) // 采样率(1/6 44100)
let context = new (window.webkitAudioContext || window.AudioContext)()
let audioInput = context.createMediaStreamSource(stream)
let createScript = context.createScriptProcessor || context.createJavaScriptNode
this.source = context.createBufferSource();
let recorder = createScript.apply(context, [4096, 1, 1])
let audioData = {
size: 0, // 录音文件长度
buffer: [], // 录音缓存
inputSampleRate: context.sampleRate, // 输入采样率
inputSampleBits: 16, // 输入采样数位 8, 16
outputSampleRate: config.sampleRate, // 输出采样率
oututSampleBits: config.sampleBits, // 输出采样数位 8, 16
input: function (data) {
this.buffer.push(new Float32Array(data))
this.size += data.length
},
compress: function () { // 合并压缩
// 合并
let data = new Float32Array(this.size)
let offset = 0
for (let i = 0; i < this.buffer.length; i++) {
data.set(this.buffer[i], offset)
offset += this.buffer[i].length
}
// 压缩
let compression = parseInt(this.inputSampleRate / this.outputSampleRate)
let length = data.length / compression
let result = new Float32Array(length)
let index = 0; let j = 0
while (index < length) {
result[index] = data[j]
j += compression
index++
}
return result
},
encodeWAV: function () {
let sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate)
let sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits)
let bytes = this.compress()
let dataLength = bytes.length * (sampleBits / 8)
let buffer = new ArrayBuffer(44 + dataLength)
let data = new DataView(buffer)
let channelCount = 1// 单声道
let offset = 0
let writeString = function (str) {
for (let i = 0; i < str.length; i++) {
data.setUint8(offset + i, str.charCodeAt(i))
}
}
// 资源交换文件标识符
writeString('RIFF'); offset += 4
// 下个地址开始到文件尾总字节数,即文件大小-8
data.setUint32(offset, 36 + dataLength, true); offset += 4
// WAV文件标志
writeString('WAVE'); offset += 4
// 波形格式标志
writeString('fmt '); offset += 4
// 过滤字节,一般为 0x10 = 16
data.setUint32(offset, 16, true); offset += 4
// 格式类别 (PCM形式采样数据)
data.setUint16(offset, 1, true); offset += 2
// 通道数
data.setUint16(offset, channelCount, true); offset += 2
// 采样率,每秒样本数,表示每个通道的播放速度
data.setUint32(offset, sampleRate, true); offset += 4
// 波形数据传输率 (每秒平均字节数) 单声道×每秒数据位数×每样本数据位/8
data.setUint32(offset, channelCount * sampleRate * (sampleBits / 8), true); offset += 4
// 快数据调整数 采样一次占用字节数 单声道×每样本的数据位数/8
data.setUint16(offset, channelCount * (sampleBits / 8), true); offset += 2
// 每样本数据位数
data.setUint16(offset, sampleBits, true); offset += 2
// 数据标识符
writeString('data'); offset += 4
// 采样数据总数,即数据总大小-44
data.setUint32(offset, dataLength, true); offset += 4
// 写入采样数据
if (sampleBits === 8) {
for (let i = 0; i < bytes.length; i++ , offset++) {
let s = Math.max(-1, Math.min(1, bytes[i]))
let val = s < 0 ? s * 0x8000 : s * 0x7FFF
val = parseInt(255 / (65535 / (val + 32768)))
data.setInt8(offset, val, true)
}
} else {
for (let i = 0; i < bytes.length; i++ , offset += 2) {
let s = Math.max(-1, Math.min(1, bytes[i]))
data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true)
}
}
return new Blob([data], { type: 'audio/mp3' })
}
}
// 开始录音
this.start = function () {
audioInput.connect(recorder)
recorder.connect(context.destination)
}
// 停止
this.stop = function () {
recorder.disconnect()
}
// 获取音频文件
this.getBlob = function () {
this.stop()
return audioData.encodeWAV()
}
// 回放
this.play = function (audio) {
let downRec = document.getElementById('downloadRec')
downRec.href = window.URL.createObjectURL(this.getBlob())
downRec.download = new Date().toLocaleString() + '.mp3'
audio.src = window.URL.createObjectURL(this.getBlob())
}
// 上传
this.upload = function (url, callback) {
let fd = new FormData()
fd.append('audioData', this.getBlob())
let xhr = new XMLHttpRequest()
/* eslint-disable */
if (callback) {
xhr.upload.addEventListener('progress', function (e) {
callback('uploading', e)
}, false)
xhr.addEventListener('load', function (e) {
callback('ok', e)
}, false)
xhr.addEventListener('error', function (e) {
callback('error', e)
}, false)
xhr.addEventListener('abort', function (e) {
callback('cancel', e)
}, false)
}
/* eslint-disable */
xhr.open('POST', url)
xhr.send(fd)
}
// 音频采集
HZRecorder.recorder = recorder;
// recorder.onaudioprocess = function (e) { //开始处理音频
// var buffer = e.inputBuffer.getChannelData(0); //获得缓冲区的输入音频,转换为包含了PCM通道数据的32位浮点数组
// // 创建变量并迭代来获取最大的buffer中的音量值
// var maxVal = 0;
// for (var i = 0; i < buffer.length; i++) {
// if (maxVal < buffer[i]) {
// maxVal = buffer[i];
// }
// }
// //显示音量值 在这获取到音量值
// // 但是我怎么让外面的test.vue获取到呢
// // console.log(maxVal * 10000)
// };
}
// 抛出异常
HZRecorder.throwError = function (message) {
alert(message)
throw new function () { this.toString = function () { return message } }()
}
// 是否支持录音
HZRecorder.canRecording = (navigator.getUserMedia != null)
// 获取录音机
HZRecorder.get = function (callback, config) {
if (callback) {
if (navigator.getUserMedia) {
navigator.getUserMedia(
{ audio: true } // 只启用音频
, function (stream) {
let rec = new HZRecorder(stream, config);
callback(rec)
}
, function (error) {
switch (error.code || error.name) {
case 'PERMISSION_DENIED':
case 'PermissionDeniedError':
HZRecorder.throwError('用户拒绝提供信息。')
break
case 'NOT_SUPPORTED_ERROR':
case 'NotSupportedError':
HZRecorder.throwError('浏览器不支持硬件设备。')
break
case 'MANDATORY_UNSATISFIED_ERROR':
case 'MandatoryUnsatisfiedError':
HZRecorder.throwError('无法发现指定的硬件设备。')
break
default:
HZRecorder.throwError('无法打开麦克风。异常信息:' + (error.code || error.name))
break
}
})
} else {
HZRecorder.throwErr('当前浏览器不支持录音功能。'); return
}
}
}
export default HZRecorder
HZRecorder.recorder = recorder;
我在这一步,把recorder给导出了 ,因为我要做声纹的功能,如果不导出的话,我再外面是拿不到音量值
Recorder.vue
<template>
<div class="layouts-recorder">
<div class="circle">
<div class="mask" @click="beginOrStopRecorder"></div>
</div>
<button class="stop" @click="stop">stop</button>
<div class="spectrum">
<div class="spectrum-left">
<span :class="{red:this.voice>=1}"></span>
<span :class="{red:this.voice>=2}"></span>
<span :class="{red:this.voice>=3}"></span>
<span :class="{red:this.voice>=4}"></span>
<span :class="{red:this.voice>=5}"></span>
<span :class="{red:this.voice>=6}"></span>
<span :class="{red:this.voice>=7}"></span>
<span :class="{red:this.voice>=8}"></span>
<span :class="{red:this.voice>=9}"></span>
<span :class="{red:this.voice>=10}"></span>
</div>
<div class="spectrum-right">
<span :class="{red:this.voice>=1}"></span>
<span :class="{red:this.voice>=2}"></span>
<span :class="{red:this.voice>=3}"></span>
<span :class="{red:this.voice>=4}"></span>
<span :class="{red:this.voice>=5}"></span>
<span :class="{red:this.voice>=6}"></span>
<span :class="{red:this.voice>=7}"></span>
<span :class="{red:this.voice>=8}"></span>
<span :class="{red:this.voice>=9}"></span>
<span :class="{red:this.voice>=10}"></span>
</div>
</div>
</div>
</template>
<script>
import recording from "../assets/js/record.js";
export default {
watch: {
DB(newDb) {
if (newDb >= 1 && newDb <= 20) {
// 显示一格音谱
this.voice = 1;
} else if (newDb >= 21 && newDb <= 40) {
this.voice = 2;
// 显示二格音谱
} else if (newDb >= 41 && newDb <= 60) {
this.voice = 3;
// 显示三格音谱
} else if (newDb >= 71 && newDb <= 90) {
this.voice = 4;
// 显示四格音谱
} else if (newDb >= 91 && newDb <= 120) {
this.voice = 5;
// 显示五格音谱
} else if (newDb >= 121 && newDb <= 140) {
this.voice = 6;
// 显示六格音谱
} else if (newDb >= 141 && newDb <= 160) {
this.voice = 7;
// 显示七格音谱
} else if (newDb >= 161 && newDb <= 180) {
this.voice = 8;
// 显示八格音谱
} else if (newDb >= 181 && newDb <= 200) {
this.voice = 9;
// 显示九格音谱
} else if (newDb > 200) {
this.voice = 10;
// 显示全部音谱
}
}
},
methods: {
clearTimer() {
if (this.interval) {
this.num = 60;
clearInterval(this.interval);
}
},
beginOrStopRecorder() {
if (this.interval) {
// 说明已经开始了录音
this.stop();
} else {
this.clearTimer();
this.startTime = new Date().getTime()
var that = this;
// 开始录音
recording.get(rec => {
// 当首次按下时,要获取浏览器的麦克风权限,所以这时要做一个判断处理
if (rec) {
// 首次按下,只调用一次
if (this.flag) {
this.mouseEnd();
this.flag = false;
} else {
this.recorder = rec;
this.interval = setInterval(() => {
if (this.num <= 0) {
this.recorder.stop();
this.num = 60;
this.clearTimer();
} else {
this.num--;
this.time = "松开结束(" + this.num + "秒)";
this.recorder.start();
// 音频采集
recording.recorder.onaudioprocess = function(e) {
//开始处理音频
var buffer = e.inputBuffer.getChannelData(0); //获得缓冲区的输入音频,转换为包含了PCM通道数据的32位浮点数组
// 创建变量并迭代来获取最大的buffer中的音量值
var maxVal = 0;
for (var i = 0; i < buffer.length; i++) {
if (maxVal < buffer[i]) {
maxVal = buffer[i];
}
}
//显示音量值 在这获取到音量值
that.DB = parseInt(maxVal * 10000);
};
}
}, 1000);
}
}
});
}
},
// 停止录音的方法
stop() {
this.clearTimer();
this.endTime = new Date().getTime();
this.recorderTime = Math.round((this.endTime -this.startTime)/1000);
if(this.recorderTime<1){
console.log('录音时间过短,请重新录入');
return ;
}
if (this.recorder) {
this.recorder.stop();
// 重置说话时间
this.num = 60;
this.time = "按住说话(" + this.num + "秒)";
// 获取语音二进制文件
let blob = this.recorder.getBlob();
let url = URL.createObjectURL(blob);
// 把得到的url给audio
this.form.audioUrl = url;
this.voice = 0;
}
}
},
data() {
return {
DB: 0,
form: {
time: "按住说话(60秒)",
audioUrl: ""
},
num: 60, // 按住说话时间
recorder: null,
interval: "",
audioFileList: [], // 上传语音列表
startTime: "", // 语音开始时间
endTime: "", // 语音结束
recorderTime: 0, // 录音时间
voice: 0
};
}
};
</script>
<style lang="less">
.red {
background-color: red;
}
.spectrum-left {
span {
display: inline-block;
}
span:nth-child(1) {
width: 6px;
height: 14px;
border: 1px solid #000;
}
span:nth-child(2) {
width: 6px;
height: 16px;
border: 1px solid #000;
}
span:nth-child(3) {
width: 6px;
height: 18px;
border: 1px solid #000;
}
span:nth-child(4) {
width: 6px;
height: 20px;
border: 1px solid #000;
}
span:nth-child(5) {
width: 6px;
height: 15px;
border: 1px solid #000;
}
span:nth-child(6) {
width: 6px;
height: 15px;
border: 1px solid #000;
}
span:nth-child(7) {
width: 6px;
height: 15px;
border: 1px solid #000;
}
span:nth-child(8) {
width: 6px;
height: 18px;
border: 1px solid #000;
}
span:nth-child(9) {
width: 6px;
height: 12px;
border: 1px solid #000;
}
span:nth-child(10) {
width: 6px;
height: 14px;
border: 1px solid #000;
}
}
.layouts-recorder {
margin-top: 30px;
}
.stop {
margin-left: 130px;
}
.circle {
border-radius: 50%;
width: 50px;
height: 50px;
display: flex;
justify-content: center;
align-items: center;
border: 1px solid #000;
}
.mask {
border-radius: 50%;
width: 30px;
height: 30px;
background-color: red;
}
.active .mask {
border-radius: 2px;
}
</style>
因为我再recorder.js中导出了,所以就可以在Recorder.vue中拿到 并绑定了事件 这样我就可以在Recorder.vue中拿到我想要的音量值
拿到音量值就可以进行区分了,实现效果
2.png最终实现的效果
好啦 到底demo就算完成啦 后期的样式自己的设计稿来修改 但是功能已经实现了!
网友评论