纯JS实现音频的合并或拼接实例页面
回到相关文章 »效果:
原始音频
段落1:
段落2:
背景音:
操作与预览
//zxx: 先拼接再合并
代码:
HTML代码:
<h4>原始音频</h4> <p>段落1:<audio src="./assets/1.wav" controls></audio></p> <p>段落2:<audio src="./assets/2.wav" controls></audio></p> <p>背景音:<audio src="./assets/bgmusic.wav" controls></audio></p> <h4>操作与预览</h4> <button id="audioConcat">拼接两个段落音频</button> <div id="outputConcat" class="output"></div> <button id="audioMerge" disabled>合并音频</button> <div id="outputMerge" class="output"></div>
JS代码:
// 音频地址
const audioSrc = [
'./assets/1.wav',
'./assets/2.wav'
];
const bgAudioSrc = './assets/bgmusic.wav';
// AudioContext
const audioContext = new AudioContext();
// 基于src地址获得 AudioBuffer 的方法
const getAudioBuffer = (src) => {
return new Promise((resolve, reject) => {
fetch(src).then(response => response.arrayBuffer()).then(arrayBuffer => {
audioContext.decodeAudioData(arrayBuffer).then(buffer => {
resolve(buffer);
});
})
})
}
// 拼接音频的方法
const concatAudio = (arrBufferList) => {
// 获得 AudioBuffer
const audioBufferList = arrBufferList;
// 最大通道数
const maxChannelNumber = Math.max(...audioBufferList.map(audioBuffer => audioBuffer.numberOfChannels));
// 总长度
const totalLength = audioBufferList.map((buffer) => buffer.length).reduce((lenA, lenB) => lenA + lenB, 0);
// 创建一个新的 AudioBuffer
const newAudioBuffer = audioContext.createBuffer(maxChannelNumber, totalLength, audioBufferList[0].sampleRate);
// 将所有的 AudioBuffer 的数据拷贝到新的 AudioBuffer 中
let offset = 0;
audioBufferList.forEach((audioBuffer, index) => {
for (let channel = 0; channel < audioBuffer.numberOfChannels; channel++) {
newAudioBuffer.getChannelData(channel).set(audioBuffer.getChannelData(channel), offset);
}
offset += audioBuffer.length;
});
return newAudioBuffer;
}
// 合并音频的方法
const mergeAudio = (arrBufferList) => {
// 获得 AudioBuffer
const audioBufferList = arrBufferList;
// 最大播放时长
const maxDuration = Math.max(...audioBufferList.map(audioBuffer => audioBuffer.duration));
// 最大通道数
const maxChannelNumber = Math.max(...audioBufferList.map(audioBuffer => audioBuffer.numberOfChannels));
// 创建一个新的 AudioBuffer
const newAudioBuffer = audioContext.createBuffer(maxChannelNumber, audioBufferList[0].sampleRate * maxDuration, audioBufferList[0].sampleRate);
// 将所有的 AudioBuffer 的数据合并到新的 AudioBuffer 中
audioBufferList.forEach((audioBuffer, index) => {
for (let channel = 0; channel < audioBuffer.numberOfChannels; channel++) {
const outputData = newAudioBuffer.getChannelData(channel);
const bufferData = audioBuffer.getChannelData(channel);
for (let i = audioBuffer.getChannelData(channel).length - 1; i >= 0; i--) {
outputData[i] += bufferData[i];
}
newAudioBuffer.getChannelData(channel).set(outputData);
}
});
return newAudioBuffer;
}
// 这个背景音乐合并的时候需要,所以放外面了
let concatAudioBuffer = null;
// 点击按钮后
audioConcat.onclick = async function () {
if (this.classList.contains('loading')) {
return;
}
this.classList.add('loading');
// 拼接音频
const arrBufferList = await Promise.all(audioSrc.map(src => getAudioBuffer(src)));
concatAudioBuffer = concatAudio(arrBufferList);
const newAudioSrc = URL.createObjectURL(bufferToWave(concatAudioBuffer, concatAudioBuffer.length));
outputConcat.innerHTML = `<audio src="${newAudioSrc}" controls></audio>`;
// 移除 loading 样式
this.classList.remove('loading');
// 移除合并按钮的禁用态
audioMerge.disabled = false;
}
// 合并按钮点击后
audioMerge.onclick = async function () {
if (!concatAudioBuffer || this.classList.contains('loading')) {
return;
}
this.classList.add('loading');
// 请求背景音乐的 buffer 数据
const bgAudioBuffer = await getAudioBuffer(bgAudioSrc);
// 合并音频
const newAudioBuffer = mergeAudio([concatAudioBuffer, bgAudioBuffer]);
// 合并音频
const newAudioSrc = URL.createObjectURL(bufferToWave(newAudioBuffer, newAudioBuffer.length));
// 预览合并后的音频
outputMerge.innerHTML = `<audio src="${newAudioSrc}" controls></audio>`;
// 移除 loading 样式
this.classList.remove('loading');
}
// AudioBuffer 转 blob
function bufferToWave(abuffer, len) {
var numOfChan = abuffer.numberOfChannels,
length = len * numOfChan * 2 + 44,
buffer = new ArrayBuffer(length),
view = new DataView(buffer),
channels = [], i, sample,
offset = 0,
pos = 0;
// write WAVE header
// "RIFF"
setUint32(0x46464952);
// file length - 8
setUint32(length - 8);
// "WAVE"
setUint32(0x45564157);
// "fmt " chunk
setUint32(0x20746d66);
// length = 16
setUint32(16);
// PCM (uncompressed)
setUint16(1);
setUint16(numOfChan);
setUint32(abuffer.sampleRate);
// avg. bytes/sec
setUint32(abuffer.sampleRate * 2 * numOfChan);
// block-align
setUint16(numOfChan * 2);
// 16-bit (hardcoded in this demo)
setUint16(16);
// "data" - chunk
setUint32(0x61746164);
// chunk length
setUint32(length - pos - 4);
// write interleaved data
for(i = 0; i < abuffer.numberOfChannels; i++)
channels.push(abuffer.getChannelData(i));
while(pos < length) {
// interleave channels
for(i = 0; i < numOfChan; i++) {
// clamp
sample = Math.max(-1, Math.min(1, channels[i][offset]));
// scale to 16-bit signed int
sample = (0.5 + sample < 0 ? sample * 32768 : sample * 32767)|0;
// write 16-bit sample
view.setInt16(pos, sample, true);
pos += 2;
}
// next source sample
offset++
}
// create Blob
return new Blob([buffer], {type: "audio/wav"});
function setUint16(data) {
view.setUint16(pos, data, true);
pos += 2;
}
function setUint32(data) {
view.setUint32(pos, data, true);
pos += 4;
}
}