JS代码:
// 下面是视频解码的处理逻辑,使用mp4box.js获取视频信息
// 使用 Webcodecs API 进行解码
const mp4url = './rains-s.mp4';
const mp4box = MP4Box.createFile();
// 这个是额外的处理方法,不需要关心里面的细节
const getExtradata = () => {
// 生成VideoDecoder.configure需要的description信息
const entry = mp4box.moov.traks[0].mdia.minf.stbl.stsd.entries[0];
const box = entry.avcC ?? entry.hvcC ?? entry.vpcC;
if (box != null) {
const stream = new DataStream(
undefined,
0,
DataStream.BIG_ENDIAN
)
box.write(stream)
// slice()方法的作用是移除moov box的header信息
return new Uint8Array(stream.buffer.slice(8))
}
};
// 视频轨道,解码用
let videoTrack = null;
let videoDecoder = null;
// 这个就是最终解码出来的视频画面序列文件
const videoFrames = [];
let nbSampleTotal = 0;
let countSample = 0;
mp4box.onReady = function (info) {
// 记住视频轨道信息,onSamples匹配的时候需要
videoTrack = info.videoTracks[0];
if (videoTrack != null) {
mp4box.setExtractionOptions(videoTrack.id, 'video', {
nbSamples: 100
})
}
// 视频的宽度和高度
const videoW = videoTrack.track_width;
const videoH = videoTrack.track_height;
// 设置视频解码器
videoDecoder = new VideoDecoder({
output: (videoFrame) => {
createImageBitmap(videoFrame).then((img) => {
videoFrames.push({
img,
duration: videoFrame.duration,
timestamp: videoFrame.timestamp
});
videoFrame.close();
});
},
error: (err) => {
console.error('videoDecoder错误:', err);
}
});
nbSampleTotal = videoTrack.nb_samples;
videoDecoder.configure({
codec: videoTrack.codec,
codedWidth: videoW,
codedHeight: videoH,
description: getExtradata()
});
mp4box.start();
};
mp4box.onSamples = function (trackId, ref, samples) {
// samples其实就是采用数据了
if (videoTrack.id === trackId) {
mp4box.stop();
countSample += samples.length;
for (const sample of samples) {
const type = sample.is_sync ? 'key' : 'delta';
const chunk = new EncodedVideoChunk({
type,
timestamp: sample.cts,
duration: sample.duration,
data: sample.data
});
videoDecoder.decode(chunk);
}
if (countSample === nbSampleTotal) {
videoDecoder.flush();
}
}
};
// 获取视频的arraybuffer数据
fetch(mp4url).then(res => res.arrayBuffer()).then(buffer => {
// 因为文件较小,所以直接一次性写入
// 如果文件较大,则需要res.body.getReader()创建reader对象,每次读取一部分数据
// reader.read().then(({ done, value })
buffer.fileStart = 0;
mp4box.appendBuffer(buffer);
mp4box.flush();
});
// -------下面就是pixi对原图的绘制,以及解码后的视频帧了----------
const canvas = document.getElementById('canvas');
const view = canvas.transferControlToOffscreen()
// app 初始化
const viewWidth = 540;
const viewHeight = 960;
const app = new PIXI.Application({
view,
width: viewWidth,
height: viewHeight,
resolution: 1
});
// 背景绘制
const background = PIXI.Sprite.from('./school_overcast.jpg');
background.width = viewWidth;
background.height = viewHeight;
app.stage.addChild(background);
// 创建一个专门的imgContainer容器,用来不停地更新绘制
const imgContainer = new PIXI.Container();
// 并设置其混合模式为screen
// imgContainer.blendMode = PIXI.BLEND_MODES.SCREEN;
// 添加到舞台
app.stage.addChild(imgContainer);
// 下面就是点击按钮,然后执行混合模式层的绘制了
// 以上就是解码过程,下面是将解码的videoFrames应用早canvas上,做特效展示
const button = document.querySelector('button');
button.addEventListener('click', () => {
if (!videoFrames.length) {
console.error('视频解码尚未完成,请稍等');
return;
}
// 将img转为sprites,这样绘制时候省得多次创建了
const spriteFrames = videoFrames.map(obj => {
obj.sprite = PIXI.Sprite.from(obj.img);
obj.sprite.x = 0;
obj.sprite.y = 0;
obj.sprite.width = viewWidth;
obj.sprite.height = viewHeight;
// 混合模式是滤色
obj.sprite.blendMode = PIXI.BLEND_MODES.SCREEN
return obj;
});
// 从videoFrames中取出数据,应用到canvas上
let index = 0;
// 绘制方法
const draw = () => {
const { sprite, duration } = spriteFrames[index];
// 移除之前的
imgContainer.removeChildren();
// 添加新的
imgContainer.addChild(sprite);
// 开始下一帧绘制
index++;
if (index === videoFrames.length) {
// 重新开始
index = 0;
}
// 100是nbSamples取样值
setTimeout(draw, duration / 100);
}
draw();
// 按钮禁用
button.disabled = true;
});