波形图:https://www.jb51.net/article/188545.htm
废话:想不到我的第一篇博客是关于前端,作为一名后端的小菜,前端方面肯定还有很多不足之处,如果文章有任何问题欢迎指正。感谢大家。好了!废话不多说下面讲一下需求。
需求:公司要求实现web端的录音并通过websocket实时上传至java后台,而且能通过vlc实时播放,简单一点讲就是我用网页在那一边讲话,一个大喇叭就能实时把我的话播出去,这样是不是通俗易懂呀,而且呢公司要求用mp3格式。当然啦!为了知道自己在讲话需要一个波形图,这里主要实现前半部分功能,后半部分臣妾也做不到呀!后半部分的vlc播放呢如果大家想知道,可以留言,届时可以给大家指条明路
前端实现:
引入:
这个跟大佬的js有点不一样,我在里面加了一点东西,而且在这个js里面引入了两个另外的js,lame.min.js和worker-realtime.js,这俩在大佬的代码里有
页面:
测试
recordmp3.js
(function (exports) {
var MP3Recorder = function (config) {
var recorder = this;
config = config || {};
config.sampleRate = config.sampleRate || 44100;
config.bitRate = config.bitRate || 128;
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({
audio: true
},
function (stream) {
var context = new AudioContext(),
microphone = context.createMediaStreamSource(stream),
processor = context.createScriptProcessor(16384, 1, 1),//bufferSize大小,输入channel数,输出channel数
mp3ReceiveSuccess, currentErrorCallback;
var height = 100;
var width = 400;
const analyser = context.createAnalyser()
analyser.fftSize = 1024
//连接到音频源
microphone.connect(analyser);
analyser.connect(context.destination);
const bufferLength = analyser.frequencyBinCount // 返回的是 analyser的fftsize的一半
const dataArray = new Uint8Array(bufferLength);
function draw() {
canvasCtx.clearRect(0, 0, width, height); //清除画布
analyser.getByteFrequencyData(dataArray); // 将当前频率数据复制到传入其中的Uint8Array
const requestAnimFrame = window.requestAnimationFrame(draw) || window.webkitRequestAnimationFrame(draw);
canvasCtx.fillStyle = '#000130';
canvasCtx.fillRect(0, 0, width, height);
let barWidth = (width / bufferLength) * 2;
let barHeight;
let x = 0;
let c = 2
for (let i = 0; i < bufferLength; i++) {
barHeight = c+(dataArray[i]/400)*height;
canvasCtx.fillStyle = 'rgb(0, 255, 30)';
canvasCtx.fillRect(x, height / 2 - barHeight / 2, barWidth, barHeight);
x += barWidth + 1;
}
}
draw();
useWebSocket();
config.sampleRate = context.sampleRate;
processor.onaudioprocess = function (event) {
//边录音边转换
var array = event.inputBuffer.getChannelData(0);
realTimeWorker.postMessage({cmd: 'encode', buf: array});
sendData();
};
var realTimeWorker = new Worker('/js/recorder/worker-realtime.js');
realTimeWorker.onmessage = function (e) {
switch (e.data.cmd) {
case 'init':
log('初始化成功');
if (config.funOk) {
config.funOk();
}
break;
case 'end':
log('MP3大小:', e.data.buf.length);
if (mp3ReceiveSuccess) {
mp3ReceiveSuccess(new Blob(e.data.buf, {type: 'audio/mp3'}));
}
break;
case 'error':
log('错误信息:' + e.data.error);
if (currentErrorCallback) {
currentErrorCallback(e.data.error);
}
break;
default:
log('未知信息:', e.data);
}
};
recorder.getMp3Blob = function (onSuccess, onError) {
currentErrorCallback = onError;
mp3ReceiveSuccess = onSuccess;
realTimeWorker.postMessage({cmd: 'finish'});
};
recorder.start = function () {
if (processor && microphone) {
microphone.connect(processor);
processor.connect(context.destination);
log('开始录音');
}
}
recorder.stop = function () {
if (processor && microphone) {
microphone.disconnect();
processor.disconnect();
log('录音结束');
}
}
realTimeWorker.postMessage({
cmd: 'init',
config: {
sampleRate: config.sampleRate,
bitRate: config.bitRate
}
});
},
function (error) {
var msg;
switch (error.code || error.name) {
case 'PERMISSION_DENIED':
case 'PermissionDeniedError':
msg = '用户拒绝访问麦客风';
break;
case 'NOT_SUPPORTED_ERROR':
case 'NotSupportedError':
msg = '浏览器不支持麦客风';
break;
case 'MANDATORY_UNSATISFIED_ERROR':
case 'MandatoryUnsatisfiedError':
msg = '找不到麦客风设备';
break;
default:
msg = '无法打开麦克风,异常信息:' + (error.code || error.name);
break;
}
if (config.funCancel) {
config.funCancel(msg);
}
});
} else {
if (config.funCancel) {
config.funCancel('当前浏览器不支持录音功能');
}
}
function log(str) {
if (config.debug) {
console.log(str);
}
}
}
exports.MP3Recorder = MP3Recorder;
})(window);
后端websocket:
这里实现的是保存为mp3文件
package com.jetosend.common.socket;
import com.jetosend.common.utils.Utils;
import org.springframework.stereotype.Component;
import javax.websocket.*;
import javax.websocket.server.PathParam;
import javax.websocket.server.ServerEndpoint;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.Hashtable;
import java.util.Map;
@ServerEndpoint("/send/{key}")
@Component
public class ServerSocket {
private static final Map connections = new Hashtable<>();
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
/***
* @Description:打开连接
* @Param: [id, 保存对方平台的资源编码
* session]
* @Return: void
* @Author: Liting
* @Date: 2019-10-10 09:22
*/
@OnOpen
public void onOpen(@PathParam("key") String id, Session session) {
System.out.println(id + "连上了");
connections.put(id, session);
}
/**
* 接收消息
*/
@OnMessage
public void onMessage(@PathParam("key") String id, InputStream inputStream) {
System.out.println("来自" + id);
try {
int rc = 0;
byte[] buff = new byte[100];
while ((rc = inputStream.read(buff, 0, 100)) > 0) {
byteArrayOutputStream.write(buff, 0, rc);
}
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* 异常处理
*
* @param throwable
*/
@OnError
public void onError(Throwable throwable) {
throwable.printStackTrace();
//TODO 日志打印异常
}
/**
* 关闭连接
*/
@OnClose
public void onClose(@PathParam("key") String id) {
System.out.println(id + "断开");
BufferedOutputStream bos = null;
FileOutputStream fos = null;
File file = null;
try {
file = new File("D:\\testtest.mp3");
//输出流
fos = new FileOutputStream(file);
//缓冲流
bos = new BufferedOutputStream(fos);
//将字节数组写出
bos.write(byteArrayOutputStream.toByteArray());
} catch (Exception e) {
e.printStackTrace();
} finally {
if (bos != null) {
try {
bos.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if (fos != null) {
try {
fos.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
connections.remove(id);
}
实现效果:
总结
到此这篇关于js实现mp3录音通过websocket实时传送+简易波形图效果的文章就介绍到这了,更多相关js实现mp3录音内容请搜索自学php网以前的文章或继续浏览下面的相关文章希望大家以后多多支持自学php网!