代码如下:
导入框架:#import <AVFoundation/AVFoundation.h>
atTime表示在什么时刻,取值范围0-1
//获取指定的视频帧
+ (void)getVideoFrameWith:(NSURL *)fileUrl
atTime:(CGFloat)atTime
block:(void(^)(UIImage *image))block
fail:(void(^)(NSString *error))fail {
if (!fileUrl) return;
AVAsset *asset = [AVAsset assetWithURL:fileUrl];
AVAssetImageGenerator *imageGenerator = [AVAssetImageGenerator assetImageGeneratorWithAsset:asset];
CGFloat totalTime = asset.duration.value / asset.duration.timescale;
if (totalTime < 0) {
fail(@"视频总时长小于0");
return;
}
//如果不设置这两个属性为kCMTimeZero,则实际生成的图片和需要生成的图片会有时间差
imageGenerator.requestedTimeToleranceBefore = kCMTimeZero;
imageGenerator.requestedTimeToleranceAfter = kCMTimeZero;
imageGenerator.appliesPreferredTrackTransform = YES; //截图的时候调整到正确的方向
CGFloat value = totalTime * atTime; //第几秒
CGFloat timeScale = asset.duration.timescale; //帧率
value = MAX(value, 0);
value = MIN(value, asset.duration.value);
CMTime requestTime = CMTimeMakeWithSeconds(value, timeScale); //时间
NSError *error = nil;
CGImageRef imageRef = [imageGenerator copyCGImageAtTime:requestTime actualTime:NULL error:&error];
if (error) {
fail(error.debugDescription);
}else {
UIImage *img = [UIImage imageWithCGImage:imageRef];
dispatch_async(dispatch_get_main_queue(), ^{
if (block) {
block(img);
}
});
}
}
网友评论