canvas图片序列加声音合成mp4视频实例页面

回到相关文章 »

效果:

canvas效果

canvas和audio非必须可见,为了方便大家学习才展示出来的。

音频素材

下载

mp4视频绘制时间:s,生成时间和播放一致(等音频流完整播放):s

代码:

CSS代码:
canvas, video {
	width: 300px;
	height: 200px;
}
HTML代码:
<canvas id="canvas" width="600" height="400"></canvas>
<h4>音频素材</h4>
<p>
    <audio src="./happy-and-bright.mp3" controls preload="auto">
</p>

<p class="flex">
    <button id="generate">生成MP4视频</button>
    <a id="download" download="muxer-with-audio.mp4">下载</a>
</p>

<div class="view">
    <video id="video" width="600" height="400" controls></video>
    <p class="time">视频绘制时间:<output id="output1"></output>s,生成时间和播放一致(等音频流完整播放):<output id="output2"></output>s</p>
</div>
JS代码:
// 构造器,和音视频编码对象
var muxer = null;
var videoEncoder = null;
var audioEncoder = null;

// 结束编码
const endEncoding = async () => {
    await videoEncoder?.flush();
    await audioEncoder?.flush();
    muxer.finalize();

    let { buffer } = muxer.target;

    var blobUrl = URL.createObjectURL(new Blob([buffer]));
    video.src = blobUrl;
    download.href = blobUrl;

    videoEncoder = null;
    audioEncoder = null;
    muxer = null;
};

// 创建屏幕外 canvas
var canvas = document.createElement('canvas');
canvas.width = 600;
canvas.height = 400;

// 构造包装器
muxer = new Mp4Muxer.Muxer({
    target: new Mp4Muxer.ArrayBufferTarget(),
    video: {
        codec: 'avc',
        width: canvas.width,
        height: canvas.height,
        frameRate: 30
    },
    audio: {
        codec: 'aac',
        sampleRate: 48000,
        numberOfChannels: 1
    },
    firstTimestampBehavior: 'offset'
});

// 音视频编码器,这里使用的是WebCodese API
videoEncoder = new VideoEncoder({
    output: (chunk, meta) => muxer.addVideoChunk(chunk, meta),
    error: e => console.error(e)
});
videoEncoder.configure({
    codec: 'avc1.42001f',
    width: canvas.width,
    height: canvas.height,
    bitrate: 1e6
});
// 音频的
audioEncoder = new AudioEncoder({
    output: (chunk, meta) => muxer.addAudioChunk(chunk, meta),
    error: e => console.error(e)
});

// 音频数据
var audioData = null;

// 获取音频完整的 audioBuffer 数据
fetch(audio.src).then(response => response.arrayBuffer()).then(buffer => {
    audioBuffer = buffer;
});


// 点击按钮的mp4生成
generate.onclick = async function () {
    // 编码视频数据
    var startTime = document.timeline.currentTime;
    var frameCounter = 0;
    // handleDraw源码可右键页面查看
    handleDraw(canvas, function () {
        let frame = new VideoFrame(canvas, {
            timestamp: (frameCounter * 1000 / 30) * 1000
        });

        // 把最后的帧作为视频的预览画面
        if (frameCounter == 30) {
            canvas.toBlob(function (blob) {
                video.poster = URL.createObjectURL(blob);
            }, 'image/jpeg', 0.95);
        }

        frameCounter++;
        videoEncoder.encode(frame, { keyFrame: frameCounter % 30 === 0 });
        frame.close();
    }, function () {
        // 音频的处理
        // 因为不知道音频需要截取多长的时间
        // 因此,等画面帧结束之后才执行这里的代码
        // 实际开发,如果知道视频的时长
        // 可以提前进行音频编码

        // 音频数据剪裁 https://www.zhangxinxu.com/wordpress/2020/07/js-audio-clip-copy-upload/
        const audioContext = new AudioContext();
        audioContext.decodeAudioData(audioBuffer).then(audioData => {
            // 创建个全新的Float32Array去存放更扁平的音频数据
            const numChannels = audioData.numberOfChannels;
            // 如果是完整音频,则下面注释代码
            // const audioFrameCount  = audioData.length;
            // 这里的长度其实不到2秒,每秒30帧绘制,可由总帧数算是视频时长
            const seconds = frameCounter / 30;
            const audioFrameCount  = Math.round(seconds * audioData.sampleRate);

            // 创建同样采用率、同样声道数量,长度匹配的的空的AudioBuffer
            var newAudioBuffer = new AudioContext().createBuffer(numChannels, audioFrameCount, audioData.sampleRate);
            // 创建临时的Array存放复制的buffer数据
            var anotherArray = new Float32Array(audioFrameCount);
            // 声道的数据的复制和写入
            var offset = 0;
            for (var channel = 0; channel < numChannels; channel++) {
                audioData.copyFromChannel(anotherArray, channel, 0);
                newAudioBuffer.copyToChannel(anotherArray, channel, offset);
            }

            // 扁平数据
            const planarData = new Float32Array(numChannels * audioFrameCount);

            for (let channel = 0; channel < numChannels; channel++) {
                const channelData = newAudioBuffer.getChannelData(channel);
                planarData.set(channelData, channel * audioFrameCount);
            }

            // 构造 AudioData 对象
            const audioData2 = new AudioData({
                format: 'f32-planar',
                sampleRate: audioData.sampleRate,
                numberOfFrames: audioFrameCount ,
                numberOfChannels: audioData.numberOfChannels,
                timestamp: 0,
                data: planarData
            });

            audioEncoder.configure({
                codec: 'mp4a.40.2',
                numberOfChannels: audioData.numberOfChannels,
                sampleRate: audioData.sampleRate,
                bitrate: 128000
            });

            // 音频编码
            audioEncoder.encode(audioData2);

            // 预期结束时间
            const timeUsed = document.timeline.currentTime - startTime;
            const timerActualEnd = frameCounter * 1000 / 30;

            endEncoding();

            // 按钮提示还原
            generate.innerHTML = '生成完成';

            // 时间设置
            output1.innerHTML = Math.round(timeUsed / 10) / 100;
            output2.innerHTML = Math.round(timerActualEnd / 10) / 100;
        });        
    });

    // 一次性点击
    this.disabled = true;
    this.textContent = '生成中...';
};