使用自定义帧映射进行解码#
在本例中,将描述 custom_frame_mappings 参数 视频解码器课程。该参数允许你提供预先计算好的帧映射信息,以加快视频解码器的实例化,同时保持寻帧精度 seek_mode="exact"。
它非常适合以下工作流程:
帧的准确性至关重要,因此不能使用近似模式
视频可以先预处理一次,然后多次解码
首先,一些模板说明:我们会从网上下载一个短视频,然后用 ffmpeg 多次重复制作更长的版本。我们最终会有两个视频:一个大约 14 秒的短视频和一个大约 12 分钟的长视频。你可以忽略这部分,直接跳到下面用 ffprobe 创建自定义帧映射 。
import tempfile
from pathlib import Path
import subprocess
import requests
# Video source: https://www.pexels.com/video/dog-eating-854132/
# License: CC0. Author: Coverr.
url = "https://videos.pexels.com/video-files/854132/854132-sd_640_360_25fps.mp4"
response = requests.get(url, headers={"User-Agent": ""})
if response.status_code != 200:
raise RuntimeError(f"Failed to download video. {response.status_code = }.")
temp_dir = tempfile.mkdtemp()
short_video_path = Path(temp_dir) / "short_video.mp4"
with open(short_video_path, 'wb') as f:
for chunk in response.iter_content():
f.write(chunk)
long_video_path = Path(temp_dir) / "long_video.mp4"
ffmpeg_command = [
"ffmpeg",
"-stream_loop", "50", # repeat video 50 times to get a ~12 min video
"-i", f"{short_video_path}",
"-c", "copy",
f"{long_video_path}"
]
subprocess.run(ffmpeg_command, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
from torchcodec.decoders import VideoDecoder
print(f"Short video duration: {VideoDecoder(short_video_path).metadata.duration_seconds} seconds")
print(f"Long video duration: {VideoDecoder(long_video_path).metadata.duration_seconds / 60} minutes")
使用 ffprobe 创建自定义帧映射#
为了生成包含所需视频元数据的 JSON 文件,建议使用 ffprobe。需要以下帧元数据字段(旧版 FFmpeg 需要 pkt_ 前缀):
pts/pkt_pts:每帧的演示时间戳(Presentation timestamps)duration/pkt_duration:每帧时长key_frame:布尔索引,表示哪些帧是关键帧
from pathlib import Path
import subprocess
import tempfile
from time import perf_counter_ns
import json
# Lets define a simple function to run ffprobe on a video's first stream index, then writes the results in output_json_path.
def generate_frame_mappings(video_path, output_json_path, stream_index):
ffprobe_cmd = ["ffprobe", "-i", f"{video_path}", "-select_streams", f"{stream_index}", "-show_frames", "-show_entries", "frame=pts,duration,key_frame", "-of", "json"]
print(f"Running ffprobe:\n{' '.join(ffprobe_cmd)}\n")
ffprobe_result = subprocess.run(ffprobe_cmd, check=True, capture_output=True, text=True)
with open(output_json_path, "w") as f:
f.write(ffprobe_result.stdout)
stream_index = 0
long_json_path = Path(temp_dir) / "long_custom_frame_mappings.json"
short_json_path = Path(temp_dir) / "short_custom_frame_mappings.json"
generate_frame_mappings(long_video_path, long_json_path, stream_index)
generate_frame_mappings(short_video_path, short_json_path, stream_index)
with open(short_json_path) as f:
sample_data = json.loads(f.read())
print("Sample of fields in custom frame mappings:")
for frame in sample_data["frames"][:3]:
print(f"{frame['key_frame'] = }, {frame['pts'] = }, {frame['duration'] = }")
性能:视频解码器的创建#
自定义帧映射会影响视频解码器的创建 对象。随着视频长度或分辨率的增加,相较于精确模式的性能提升也随之提升。
import torch
# Here, we define a benchmarking function, with the option to seek to the start of a file_like.
def bench(f, file_like=False, average_over=50, warmup=2, **f_kwargs):
for _ in range(warmup):
f(**f_kwargs)
if file_like:
f_kwargs["custom_frame_mappings"].seek(0)
times = []
for _ in range(average_over):
start = perf_counter_ns()
f(**f_kwargs)
end = perf_counter_ns()
times.append(end - start)
if file_like:
f_kwargs["custom_frame_mappings"].seek(0)
times = torch.tensor(times) * 1e-6 # ns to ms
std = times.std().item()
med = times.median().item()
print(f"{med = :.2f}ms +- {std:.2f}")
for video_path, json_path in ((short_video_path, short_json_path), (long_video_path, long_json_path)):
print(f"\nRunning benchmarks on {Path(video_path).name}")
print("Creating a VideoDecoder object with custom_frame_mappings:")
with open(json_path, "r") as f:
bench(VideoDecoder, file_like=True, source=video_path, stream_index=stream_index, custom_frame_mappings=f)
# Compare against exact seek_mode
print("Creating a VideoDecoder object with seek_mode='exact':")
bench(VideoDecoder, source=video_path, stream_index=stream_index, seek_mode="exact")
性能:使用自定义帧映射进行帧解码#
虽然使用 custom_frame_mappings 只会影响 的初始化速度 VideoDecoder 的解码工作流程涉及创建一个 VideoDecoder 实例,从而实现性能提升。
def decode_frames(video_path, seek_mode = "exact", custom_frame_mappings = None):
decoder = VideoDecoder(
source=video_path,
seek_mode=seek_mode,
custom_frame_mappings=custom_frame_mappings
)
decoder.get_frames_in_range(start=0, stop=10)
for video_path, json_path in ((short_video_path, short_json_path), (long_video_path, long_json_path)):
print(f"\nRunning benchmarks on {Path(video_path).name}")
print("Decoding frames with custom_frame_mappings:")
with open(json_path, "r") as f:
bench(decode_frames, file_like=True, video_path=video_path, custom_frame_mappings=f)
print("Decoding frames with seek_mode='exact':")
bench(decode_frames, video_path=video_path, seek_mode="exact")
准确性:元数据和帧检索#
除了相较于 seek_mode="exact" 时实现实例化速度更快外,使用自定义帧映射还保留了精确元数据和帧寻址的优势。
print("Metadata of short video with custom_frame_mappings:")
with open(short_json_path, "r") as f:
print(VideoDecoder(short_video_path, custom_frame_mappings=f).metadata)
print("Metadata of short video with seek_mode='exact':")
print(VideoDecoder(short_video_path, seek_mode="exact").metadata)
with open(short_json_path, "r") as f:
custom_frame_mappings_decoder = VideoDecoder(short_video_path, custom_frame_mappings=f)
exact_decoder = VideoDecoder(short_video_path, seek_mode="exact")
for i in range(len(exact_decoder)):
torch.testing.assert_close(
exact_decoder.get_frame_at(i).data,
custom_frame_mappings_decoder.get_frame_at(i).data,
atol=0, rtol=0,
)
print("Frame seeking is the same for this video!")
custom_frame_mappings 怎么帮忙?#
自定义帧映射包含与扫描作中精确模式中通常计算的帧索引信息相同。通过将这些信息提供给视频解码器 作为 JSON,它省去了昂贵的扫描需求,同时保留了 准确性提升。
应该用哪个模式?#
当速度比精确寻址精度更重要时,建议以“近似”模式实现最快解码。
对于精确寻帧,自定义帧映射有助于反复解码相同视频的工作流程,并且可以进行一些预处理。
如果要在没有预处理的情况下进行精确寻帧,可以使用“精确”模式。