本文分享webrtc的ios端上自定义采集视频
本质问题:从摄像机里面取出数据给webrtc模块使用。
建立 RTCCameraVideoCapturer (视频生产者)--->RTCVideoSource(消费者) 之间的通道
头问题 *.h
@property(nonatomic, strong) RTCMediaStream* _Nullable streamLocal; //本地流
@property(nonatomic, strong) RTCCameraVideoCapturer* cameraCapturer; //capture
@property(nonatomic, strong) RTCVideoSource* videoSource; //数据源头
//methord
-(void) setupLocal;
-(void)startCapture;
实现文件 *.m
-(void) setupLocal
{
RTCPeerConnectionFactory* factory = [RTCEngine sharedInstance].connectionFactory;
_streamLocal = [factory mediaStreamWithStreamId:_streamId]; //本地流RTCMediaStream
_videoSource = [_factory videoSource];//--2--获取数据源头RTCVideoSource
//视频 RTCCameraVideoCapturer
cameraCapturer = [[RTCCameraVideoCapturer alloc] initWithDelegate:videoFrameConsumer];
//cameraCapturer = [[RTCCameraVideoCapturer alloc] initWithDelegate:videoSource]; //这里判断需不需要取出webrtc进行进一步处理。美颜,滤镜等处理。具体处理下一个章节详细记录,
RTCVideoTrack* videoTrack = [factory videoTrackWithSource:videoSource trackId:[[NSUUID UUID] UUIDString]];
[_streamLocal addVideoTrack:videoTrack];
//音频
RTCAudioSource* audioSource = [factory audioSourceWithConstraints:nil];
RTCVideoTrack* videoTrack = [factory audioTrackWithSource:audioSource trackId:[[NSUUID UUID] UUIDString]];
[_streamLocal addAudioTrack:audioTrack];
}
//begin
-(void)startCapture
{
AVCaptureDevicePosition position =
usingFrontCamera ? AVCaptureDevicePositionFront : AVCaptureDevicePositionBack;
AVCaptureDevice *device = [self findDeviceForPosition:position];
AVCaptureDeviceFormat *format = [self selectFormatForDevice:device];
[cameraCapturer startCaptureWithDevice:device format:format fps:frameRate];
}
//inner methord
- (AVCaptureDevice *)findDeviceForPosition:(AVCaptureDevicePosition)position {
NSArray<AVCaptureDevice *> *captureDevices = [RTCCameraVideoCapturer captureDevices];
for (AVCaptureDevice *device in captureDevices) {
if (device.position == position) {
return device;
}
}
return captureDevices[0];
}
// 查找最接近采集方案的采集格式
- (AVCaptureDeviceFormat *)selectFormatForDevice:(AVCaptureDevice *)device {
NSArray<AVCaptureDeviceFormat *> *formats = [RTCCameraVideoCapturer supportedFormatsForDevice:device];
int targetWidth = (int)minWidth;
int targetHeight = (int)minHeight;
AVCaptureDeviceFormat *selectedFormat = nil;
int currentDiff = INT_MAX;
for (AVCaptureDeviceFormat *format in formats) {
CMVideoDimensions dimension = CMVideoFormatDescriptionGetDimensions(format.formatDescription);
int diff = abs(targetWidth - dimension.width) + abs(targetHeight - dimension.height);
if (diff < currentDiff) {
selectedFormat = format;
currentDiff = diff;
}
}
NSAssert(selectedFormat != nil, @"No suitable capture format found.");
CMVideoDimensions dimension = CMVideoFormatDescriptionGetDimensions(selectedFormat.formatDescription);
cameraWidth = dimension.width;
cameraHeight = dimension.height;
return selectedFormat;
}
以上是是webrtc采集代码,另外本地需要预览的这个怎么做呢?
//找到对应的
if (stream && stream.videoTracks.count > 0) {
[stream.videoTracks[0] addRenderer:self]; //
}
@interface RTCView() <RTCVideoViewDelegate>
-(void)renderFrame:(RTCVideoFrame *)frame //会在这里回调帧数据,
{
// todo it is hack, need to remove this
if(subview==nil){
return;
}
[subview renderFrame:frame]; //放到渲染的RTCMTLVideoView or RTCEAGLVideoView 即可
}
随后我会分享代码,
网友评论