<ruby id="bdb3f"></ruby>

    <p id="bdb3f"><cite id="bdb3f"></cite></p>

      <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
        <p id="bdb3f"><cite id="bdb3f"></cite></p>

          <pre id="bdb3f"></pre>
          <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

          <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
          <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

          <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                <ruby id="bdb3f"></ruby>

                企業??AI智能體構建引擎,智能編排和調試,一鍵部署,支持知識庫和私有化部署方案 廣告
                原文: https://blog.csdn.net/liupenglove/article/details/103774105 參考: https://blog.csdn.net/nonmarking/article/details/48140653 ***** 本篇博客相比上一篇《[?ffmpeg濾鏡學習一,movie+overlay濾鏡實現視頻加水印、畫中畫](https://blog.csdn.net/liupenglove/article/details/103739302)》更深入一些,本次的實現,可以控制子畫面出現的時間段、子畫面播放時間等,這篇文章主要參考了大師兄悟空公眾號下的文章《使用 FFmpeg 實現畫中畫效果(一)》,下面看一下具體實現: 首先提出5個問題: 1. 子畫面展示位置? 2. 子畫面從主畫面的哪個時間點開始播放? 3. 子畫面從子畫面的哪個時間點開始播放? 4. 子畫面是按照時間段顯示還是一直顯示? 5. 如果子畫面和主畫面不等長怎么辦? 要解決這5個問題,主要使用overlay濾鏡,如下: ``` ffmpeg -h filter=overlay .... ``` 首先通過x、y參數可以解決子畫面顯示位置的問題。 shortest參數可以解決主畫面、子畫面時間不等的問題。 enable參數可以解決2、4兩個問題,第三個問題需要使用一個新的濾鏡setpts,主畫面與子畫面的視頻偏移可以通過setpts濾鏡設置,如下: ~~~cpp ffmpeg -h filter=setpts ~~~ 設置畫布: ``` const char *filter_descr = "movie=out1.mp4[in2];[in2]setpts=PTS[out2];[0:v][out2]overlay=x=20:y=120:enable='between(t,2,15)':shortest=1"; ``` 下代碼吧: ``` /* * 實現對現有視頻增加水印,可以是圖片、也可以是視頻,若為視頻,類似畫中畫 */ #include "myffmpeg/util.h" extern "C" { #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <libavutil/opt.h> #include <libavfilter/buffersink.h> #include <libavfilter/buffersrc.h> int open_input_file(AVFormatContext *fmt, AVCodecContext **codecctx, AVCodec *codec, const char *filename, int index) { int ret = 0; char msg[500]; *codecctx = avcodec_alloc_context3(codec); ret = avcodec_parameters_to_context(*codecctx, fmt->streams[index]->codecpar); if (ret < 0) { sprintf(msg, "avcodec_parameters_to_context error,ret:%d\n", ret); lp_log(msg); return -1; } // open 解碼器 ret = avcodec_open2(*codecctx, codec, NULL); if (ret < 0) { sprintf(msg, "avcodec_open2 error,ret:%d\n", ret); lp_log(msg); return -2; } printf("pix:%d\n", (*codecctx)->pix_fmt); return ret; } int init_filter(AVFilterContext **buffersrc_ctx, AVFilterContext **buffersink_ctx, AVFilterGraph **filter_graph, AVStream *stream, AVCodecContext *codecctx, const char *filter_desc) { int ret = -1; char args[512]; char msg[500]; const AVFilter *buffersrc = avfilter_get_by_name("buffer"); const AVFilter *buffersink = avfilter_get_by_name("buffersink"); AVFilterInOut *input = avfilter_inout_alloc(); AVFilterInOut *output = avfilter_inout_alloc(); AVRational time_base = stream->time_base; enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE}; if (!output || !input || !filter_graph) { ret = -1; sprintf(msg, "avfilter_graph_alloc/avfilter_inout_alloc error,ret:%d\n", ret); lp_log(msg); goto end; } snprintf(args, sizeof(args), "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", codecctx->width, codecctx->height, codecctx->pix_fmt, stream->time_base.num, stream->time_base.den, codecctx->sample_aspect_ratio.num, codecctx->sample_aspect_ratio.den); ret = avfilter_graph_create_filter(buffersrc_ctx, buffersrc, "in", args, NULL, *filter_graph); if (ret < 0) { sprintf(msg, "avfilter_graph_create_filter buffersrc error,ret:%d\n", ret); lp_log(msg); goto end; } ret = avfilter_graph_create_filter(buffersink_ctx, buffersink, "out", NULL, NULL, *filter_graph); if (ret < 0) { sprintf(msg, "avfilter_graph_create_filter buffersink error,ret:%d\n", ret); lp_log(msg); goto end; } ret = av_opt_set_int_list(*buffersink_ctx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN); if (ret < 0) { sprintf(msg, "av_opt_set_int_list error,ret:%d\n", ret); lp_log(msg); goto end; } /* * The buffer source output must be connected to the input pad of * the first filter described by filters_descr; since the first * filter input label is not specified, it is set to "in" by * default. */ output->name = av_strdup("in"); output->filter_ctx = *buffersrc_ctx; output->pad_idx = 0; output->next = NULL; /* * The buffer sink input must be connected to the output pad of * the last filter described by filters_descr; since the last * filter output label is not specified, it is set to "out" by * default. */ input->name = av_strdup("out"); input->filter_ctx = *buffersink_ctx; input->pad_idx = 0; input->next = NULL; if ((ret = avfilter_graph_parse_ptr(*filter_graph, filter_desc, &input, &output, NULL)) < 0) { sprintf(msg, "avfilter_graph_parse_ptr error,ret:%d\n", ret); lp_log(msg); goto end; } if ((ret = avfilter_graph_config(*filter_graph, NULL)) < 0) { sprintf(msg, "avfilter_graph_config error,ret:%d\n", ret); lp_log(msg); goto end; } end: avfilter_inout_free(&input); avfilter_inout_free(&output); return ret; } int my_filter(const char *name) { int ret; char msg[500]; // const char *filter_descr = "movie=my_logo.png[wm];[in][wm]overlay=10:10[out]"; // const char *filter_descr = "scale=640:360,transpose=cclock"; const char *filter_descr = "movie=out1.mp4[in2];[in2]setpts=PTS[out2];[in][out2]overlay=x=20:y=120:enable='between(t,2,15)':shortest=1"; AVFormatContext *pFormatCtx = NULL; AVCodecContext *pCodecCtx; AVFilterContext *buffersink_ctx; AVFilterContext *buffersrc_ctx; AVFilterGraph *filter_graph; AVCodec *codec; int video_stream_index = -1; AVPacket packet; AVFrame *pFrame; AVFrame *pFrame_out; filter_graph = avfilter_graph_alloc(); FILE *fp_yuv = fopen("test.yuv", "wb+"); ret = avformat_open_input(&pFormatCtx, name, NULL, NULL); if (ret < 0) { sprintf(msg, "avformat_open_input error,ret:%d\n", ret); lp_log(msg); ret = -1; goto end; } ret = avformat_find_stream_info(pFormatCtx, NULL); if (ret < 0) { sprintf(msg, "avformat_find_stream_info error,ret:%d\n", ret); lp_log(msg); ret = -2; goto end; } ret = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &codec, 0); if (ret < 0) { sprintf(msg, "av_find_best_stream error,ret:%d\n", ret); lp_log(msg); ret = -3; goto end; } // 獲取到視頻流索引 video_stream_index = ret; av_dump_format(pFormatCtx, 0, name, 0); if ((ret = open_input_file(pFormatCtx, &pCodecCtx, codec, name, video_stream_index)) < 0) { ret = -4; sprintf(msg, "open_input_file error,ret:%d\n", ret); lp_log(msg); goto end; } if ((ret = init_filter(&buffersrc_ctx, &buffersink_ctx, &filter_graph, pFormatCtx->streams[video_stream_index], pCodecCtx, filter_descr)) < 0) { ret = -5; sprintf(msg, "init_filter error,ret:%d\n", ret); lp_log(msg); goto end; } pFrame = av_frame_alloc(); pFrame_out = av_frame_alloc(); while (1) { if ((ret = av_read_frame(pFormatCtx, &packet)) < 0) break; if (packet.stream_index == video_stream_index) { ret = avcodec_send_packet(pCodecCtx, &packet); if (ret < 0) { sprintf(msg, "avcodec_send_packet error,ret:%d\n", ret); lp_log(msg); break; } while (ret >= 0) { ret = avcodec_receive_frame(pCodecCtx, pFrame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { break; } else if (ret < 0) { sprintf(msg, "avcodec_receive_frame error,ret:%d\n", ret); lp_log(msg); goto end; } pFrame->pts = pFrame->best_effort_timestamp; /* push the decoded frame into the filtergraph */ ret = av_buffersrc_add_frame_flags(buffersrc_ctx, pFrame, AV_BUFFERSRC_FLAG_KEEP_REF); if (ret < 0) { sprintf(msg, "av_buffersrc_add_frame_flags error,ret:%d\n", ret); lp_log(msg); break; } /* pull filtered frames from the filtergraph */ while (1) { ret = av_buffersink_get_frame(buffersink_ctx, pFrame_out); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) { ret = -6; goto end; } if (pFrame_out->format == AV_PIX_FMT_YUV420P) { //Y, U, V for (int i = 0; i < pFrame_out->height; i++) { fwrite(pFrame_out->data[0] + pFrame_out->linesize[0] * i, 1, pFrame_out->width, fp_yuv); } for (int i = 0; i < pFrame_out->height / 2; i++) { fwrite(pFrame_out->data[1] + pFrame_out->linesize[1] * i, 1, pFrame_out->width / 2, fp_yuv); } for (int i = 0; i < pFrame_out->height / 2; i++) { fwrite(pFrame_out->data[2] + pFrame_out->linesize[2] * i, 1, pFrame_out->width / 2, fp_yuv); } } av_frame_unref(pFrame_out); } av_frame_unref(pFrame); } } av_packet_unref(&packet); } end: avcodec_free_context(&pCodecCtx); fclose(fp_yuv); } } ```
                  <ruby id="bdb3f"></ruby>

                  <p id="bdb3f"><cite id="bdb3f"></cite></p>

                    <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
                      <p id="bdb3f"><cite id="bdb3f"></cite></p>

                        <pre id="bdb3f"></pre>
                        <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

                        <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
                        <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

                        <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                              <ruby id="bdb3f"></ruby>

                              哎呀哎呀视频在线观看