美文网首页
二、基于Android平台FFmepg多媒体播放器开发-播放控制

二、基于Android平台FFmepg多媒体播放器开发-播放控制

作者: Erich_Godsen | 来源:发表于2020-11-26 16:15 被阅读0次

书接上文,我们接着看如何利用FFmpeg解码音视频

代码示例

static jint
MediaPlayer_play(JNIEnv *env, jobject thiz, jstring url, jobject surface) {
    int ret = -1;
    const char *input = env->GetStringUTFChars(url, NULL);
    if (input == NULL) {
        LOGE("input is invalid");
        return ret;
    }

    av_register_all();


    AVFormatContext *formatContext = avformat_alloc_context();
    int openState = avformat_open_input(&formatContext, input, NULL, NULL);
    if (openState < 0) {
        char errbuf[128];
        if (av_strerror(openState, errbuf, sizeof(errbuf)) == 0) {
            LOGE("open file error, the rease is %s", errbuf);
        }
        return ret;
    }

    av_dump_format(formatContext, 0, input, 0);

    if (avformat_find_stream_info(formatContext, NULL) < 0) {
        LOGE("read stream error");
        return ret;
    }

    int video_stream_index = -1;
    LOGD("int this video file, the streams num:%d", formatContext->nb_streams);
    for (int i = 0; i < formatContext->nb_streams; i++) {
        if (formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_stream_index = i;
            break;
        }
    }

    //audio decode
    int audio_stream_index = -1;
    for (int i = 0; i < formatContext->nb_streams; i++) {
        if (formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            audio_stream_index = i;
            break;
        }
    }

    LOGE("audio_stream_index=%d", audio_stream_index);
    if (audio_stream_index == -1) {
        LOGE("no audio stream");
        return ret;
    }



    if (video_stream_index == -1) {
        LOGE("vide stream is error");
        return ret;
    }

    AVCodecParameters *codecParameters = formatContext->streams[video_stream_index]->codecpar;
    AVCodec *videoDecoder = avcodec_find_decoder(codecParameters->codec_id);

    if (videoDecoder == NULL) {
        LOGD("can not find codec for this video");
        return ret;
    }

    AVCodecParameters *audioCodecParameters = formatContext->streams[audio_stream_index]->codecpar;
    AVCodec *audioDecoder = avcodec_find_decoder(audioCodecParameters->codec_id);


    AVCodecContext *codecContext = avcodec_alloc_context3(videoDecoder);

    if (codecContext == NULL) {
        LOGE("alloc codec context failed!");
        return ret;
    }

    AVCodecContext *audioContext = avcodec_alloc_context3(audioDecoder);

    if (audioContext == NULL) {
        LOGE("alloc audio context failed!");
        return ret;
    }

    if (avcodec_parameters_to_context(codecContext, codecParameters) < 0) {
        LOGE("inflate codec context failed!");
        return ret;
    }

    if (avcodec_parameters_to_context(audioContext, audioCodecParameters) < 0) {
        LOGE("inflate audio codec context failed!");
        return ret;
    }

    if (avcodec_open2(codecContext, videoDecoder, NULL) < 0) {
        LOGE("init context error");
        return ret;
    }

    if (avcodec_open2(audioContext, audioDecoder, NULL) < 0) {
        LOGE("init audio context error");
        return ret;
    }


    AVPixelFormat  dstFormat = AV_PIX_FMT_RGBA;

    AVPacket *packet = av_packet_alloc();
    AVFrame *frame = av_frame_alloc();
    //分配解码后的每一数据信息的结构体(指针)
    AVFrame *outFrame = av_frame_alloc();
    //分配最终显示出来的目标帧信息的结构体(指针)
    uint8_t *out_buffer = (uint8_t *)av_malloc(av_image_get_buffer_size(dstFormat, codecContext->width, codecContext -> height, 1));

    av_image_fill_arrays(outFrame->data, outFrame->linesize,
            out_buffer, dstFormat, codecContext->width, codecContext->height, 1);

    SwsContext *swsContext = sws_getContext(codecContext->width,
                codecContext->height,
                codecContext->pix_fmt,
                codecContext->width,
                codecContext->height,
                dstFormat,
                SWS_BICUBIC,
                NULL,
                NULL,
                NULL);

    if (swsContext == NULL) {
        LOGE("swsContext is NULL");
        return ret;
    }

    ANativeWindow *nativeWindow = ANativeWindow_fromSurface(env, surface);
    //定义绘图缓冲区
    ANativeWindow_Buffer outBuffer;
    SwrContext *swrCtx = swr_alloc();
    enum AVSampleFormat in_sample_fmt = audioContext->sample_fmt;
    enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;

    int in_sample_rate = audioContext->sample_rate;
    int out_sample_rate = 44100;

    LOGE("run_audio_decodec start");

    uint64_t  in_ch_layout = audioContext->channel_layout;
    uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO;
    swr_alloc_set_opts(swrCtx, out_ch_layout, out_sample_fmt, out_sample_rate,
                       in_ch_layout, in_sample_fmt, in_sample_rate, 0, NULL);
    swr_init(swrCtx);

    int out_channel_nb = av_get_channel_layout_nb_channels(out_ch_layout);
    uint8_t  *out_buffer_audio = static_cast<uint8_t *>(av_malloc(MAX_AUDIO_FRME_SIZE));

    audioTrack = static_cast<AudioTrack *>(malloc(sizeof(AudioTrack)));
    jobject at = env->CallObjectMethod(javaCallback->thiz, javaCallback->createAudioTrackId, out_sample_rate, out_channel_nb);
    audioTrack->audiotrack = env->NewGlobalRef(at);

    audioTrack->audio_track_class = env->GetObjectClass(audioTrack->audiotrack);
    audioTrack->play_id = env->GetMethodID(audioTrack->audio_track_class, "play", "()V");
    audioTrack->stop_id = env->GetMethodID(audioTrack->audio_track_class, "stop", "()V");
    audioTrack->write_id = env->GetMethodID(audioTrack->audio_track_class, "write", "([BII)I");

    env->CallVoidMethod(audioTrack->audiotrack, audioTrack->play_id);

    //通过设置宽高限制缓冲区中的像素数量,而非屏幕的物流显示尺寸。
    //如果缓冲区与物理屏幕的显示尺寸不相符,则实际显示可能会是拉伸,或者被压缩的图像
    ANativeWindow_setBuffersGeometry(nativeWindow, codecContext->width, codecContext->height,
                                     WINDOW_FORMAT_RGBA_8888);

    while (av_read_frame(formatContext, packet) == 0) {
        if (packet->stream_index == video_stream_index) {
            int sendPacketState = avcodec_send_packet(codecContext, packet);
            if (sendPacketState == 0) {
                int receiveFrameState = avcodec_receive_frame(codecContext, frame);
                if (receiveFrameState == 0) {
                    //锁定窗口绘图界面
                    ANativeWindow_lock(nativeWindow, &outBuffer, NULL);
                    sws_scale(swsContext,
                              ((const uint8_t *const*) frame->data),
                              frame->linesize,
                              0,
                              frame->height,
                              outFrame->data,
                              outFrame->linesize);

                    uint8_t *dst = static_cast<uint8_t *>(outBuffer.bits);
                    //解码后的像素数据首地址
                    //这里由于使用的是RGBA格式,所以解码图像数据只保存在data[0]中。但如果是YUV就会有data[0]
                    //data[1],data[2]
                    uint8_t *src = outFrame->data[0];
                    int oneLineByte = outBuffer.stride * 4;
                    int srcStride = outFrame->linesize[0];
                    for (int i = 0; i < codecContext->height; i++) {
                        memcpy(dst + i * oneLineByte, src + i * srcStride, srcStride);
                    }
                    ANativeWindow_unlockAndPost(nativeWindow);
                    //进行短暂休眠。如果休眠时间太长会导致播放的每帧画面有延迟感,如果短会有加速播放的感觉。
                    //一般一每秒60帧——16毫秒一帧的时间进行休眠
                    usleep(1000 * 20);


                } else if (receiveFrameState == AVERROR(EAGAIN)) {
                    LOGE("decode error AVERROR(EAGAIN)");
                } else if (receiveFrameState == AVERROR_EOF) {
                    LOGE("decode error AVERROR_EOF");
                } else if (receiveFrameState == AVERROR(EINVAL)) {
                    LOGE("decode error AVERROR(EINVAL)");
                } else if (receiveFrameState ==  AVERROR(ENOMEM)) {
                    LOGE("decode error AVERROR(ENOMEM)");
                } else {
                    LOGE("decode error unknown");
                }

            } else if(sendPacketState == AVERROR(EAGAIN)) {
                LOGD("send decode failed:AVERROR(EAGAIN)");
            } else if (sendPacketState == AVERROR_EOF) {
                LOGD("send decode failed:AVERROR_EOF");
            } else if (sendPacketState == AVERROR(EINVAL)) {
                LOGD("send decode failed:AVERROR(EINVAL)");
            } else if (sendPacketState == AVERROR(ENOMEM)) {
                LOGD("send decode failed:AVERROR(ENOMEM)");
            } else {
                LOGD("send decode failed:unknow");
            }

        } else if (packet->stream_index == audio_stream_index) {
            int sendPacketState = avcodec_send_packet(audioContext, packet);
            if (sendPacketState == 0) {
                while (avcodec_receive_frame(audioContext, frame) == 0) {
                    AVRational tb = (AVRational){1, frame->sample_rate};
                    if (frame->pts != AV_NOPTS_VALUE) {
                        frame->pts = av_rescale_q(frame->pts, audioContext->time_base, tb);
                    } else if (frame->pts != AV_NOPTS_VALUE) {
                        frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(audioContext), tb);
                    } else {
                        frame->pts = packet->dts;
                    }


                    int len2 = swr_convert(swrCtx, &out_buffer_audio, MAX_AUDIO_FRME_SIZE,
                                (const uint8_t **)(frame->extended_data), frame->nb_samples);
                    if (len2 < 0) {
                        LOGE("swr_convert FAILED!!!");
                    }
                    int out_buffer_size = av_samples_get_buffer_size(NULL, out_channel_nb, frame->nb_samples, out_sample_fmt, 1);
                    jbyteArray audio_sample_array = env->NewByteArray(out_buffer_size);
                    jbyte *sample_bytep = env->GetByteArrayElements(audio_sample_array, NULL);
                    memcpy(sample_bytep, out_buffer_audio, out_buffer_size);
                    env->ReleaseByteArrayElements(audio_sample_array, sample_bytep, 0);
                    env->CallIntMethod(audioTrack->audiotrack, audioTrack->write_id, audio_sample_array, 0, out_buffer_size);

                    env->DeleteLocalRef(audio_sample_array);

                }
            } else {
                LOGE("send package error");
            }

        }

        av_packet_unref(packet);
    }
    LOGE("play complete");

    ANativeWindow_release(nativeWindow);
    av_frame_free(&outFrame);
    av_frame_free(&frame);

    av_packet_free(&packet);
    avcodec_free_context(&codecContext);
    avformat_close_input(&formatContext);
    avformat_free_context(formatContext);

    env->ReleaseStringUTFChars(url, input);

    return ret;
}

相关文章

网友评论

      本文标题:二、基于Android平台FFmepg多媒体播放器开发-播放控制

      本文链接:https://www.haomeiwen.com/subject/vximiktx.html