Update vnc2mpg.c

This update makes the example work on versions of ffmpeg newer than "ancient," fixes a bunch of bugs in the process, and with better documentation of the pitfalls.
pull/3/head
tmcqueen-materials 8 years ago committed by GitHub
parent 32301cb73f
commit 709c8ea862

@ -3,6 +3,7 @@
* Simple movie writer for vnc; based on Libavformat API example from FFMPEG
*
* Copyright (c) 2003 Fabrice Bellard, 2004 Johannes E. Schindelin
* Updates copyright (c) 2017 Tyrel M. McQueen
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@ -25,412 +26,451 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <signal.h>
#include <math.h>
#ifndef M_PI
#define M_PI 3.1415926535897931
#endif
#include "avformat.h"
#include <signal.h>
#include <sys/time.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <rfb/rfbclient.h>
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define VNC_PIX_FMT AV_PIX_FMT_RGB565 /* pixel format generated by VNC client */
#define OUTPUT_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
/**************************************************************/
/* video output */
static int write_packet(AVFormatContext *oc, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
/* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(pkt, *time_base, st->time_base);
pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */
return av_interleaved_write_frame(oc, pkt);
}
AVFrame *picture, *tmp_picture;
uint8_t *video_outbuf;
int frame_count, video_outbuf_size;
/*************************************************/
/* video functions */
/* add a video output stream */
AVStream *add_video_stream(AVFormatContext *oc, int codec_id, int w, int h)
{
AVCodecContext *c;
/* a wrapper around a single output video stream */
typedef struct {
AVStream *st;
AVCodec *codec;
AVCodecContext *enc;
int64_t pts;
AVFrame *frame;
AVFrame *tmp_frame;
struct SwsContext *sws;
} VideoOutputStream;
/* Add an output video stream. */
int add_video_stream(VideoOutputStream *ost, AVFormatContext *oc,
enum AVCodecID codec_id, int64_t br, int sr, int w, int h)
{
int i;
st = av_new_stream(oc, 0);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
exit(1);
}
#if LIBAVFORMAT_BUILD<4629
c = &st->codec;
#else
c = st->codec;
#endif
c->codec_id = codec_id;
c->codec_type = CODEC_TYPE_VIDEO;
/* put sample parameters */
c->bit_rate = 800000;
/* resolution must be a multiple of two */
c->width = w;
c->height = h;
/* frames per second */
#if LIBAVCODEC_BUILD<4754
c->frame_rate = STREAM_FRAME_RATE;
c->frame_rate_base = 1;
#else
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->pix_fmt = PIX_FMT_YUV420P;
#endif
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
/* find the encoder */
ost->codec = avcodec_find_encoder(codec_id);
if (!(ost->codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
return -1;
} // no extra memory allocation from this call
if (ost->codec->type != AVMEDIA_TYPE_VIDEO) {
fprintf(stderr, "Encoder for '%s' does not seem to be for video.\n",
avcodec_get_name(codec_id));
return -2;
}
if (c->codec_id == CODEC_ID_MPEG1VIDEO){
/* needed to avoid using macroblocks in which some coeffs overflow
this doesn't happen with normal video, it just happens here as the
motion of the chroma plane doesn't match the luma plane */
c->mb_decision=2;
ost->enc = avcodec_alloc_context3(ost->codec);
if (!(ost->enc)) {
fprintf(stderr, "Could not alloc an encoding context\n");
return -3;
} // from now on need to call avcodec_free_context(&(ost->enc)) on error
/* Set codec parameters */
ost->enc->codec_id = codec_id;
ost->enc->bit_rate = br;
/* Resolution must be a multiple of two (round up to avoid buffer overflow). */
ost->enc->width = w + (w % 2);
ost->enc->height = h + (h % 2);
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
ost->enc->time_base = (AVRational){ 1, sr };
ost->enc->gop_size = 12; /* emit one intra frame every twelve frames at most */
ost->enc->pix_fmt = OUTPUT_PIX_FMT;
if (ost->enc->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
ost->enc->mb_decision = 2;
}
/* some formats want stream headers to be separate */
if(!strcmp(oc->oformat->name, "mp4") || !strcmp(oc->oformat->name, "mov") || !strcmp(oc->oformat->name, "3gp"))
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st;
ost->st = avformat_new_stream(oc, ost->codec);
if (!ost->st) {
fprintf(stderr, "Could not allocate stream\n");
avcodec_free_context(&(ost->enc));
return -4;
} // stream memory cleared up when oc is freed, so no need to do so later in this function on error
ost->st->id = oc->nb_streams-1;
ost->st->time_base = ost->enc->time_base;
ost->pts = 0;
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
ost->enc->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
// must wait to allocate frame buffers until codec is opened (in case codec changes the PIX_FMT)
return 0;
}
AVFrame *alloc_picture(int pix_fmt, int width, int height)
AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
{
AVFrame *picture;
uint8_t *picture_buf;
int size;
picture = avcodec_alloc_frame();
int ret;
picture = av_frame_alloc();
if (!picture)
return NULL;
size = avpicture_get_size(pix_fmt, width, height);
picture_buf = malloc(size);
if (!picture_buf) {
av_free(picture);
// from now on need to call av_frame_free(&picture) on error
picture->format = pix_fmt;
picture->width = width;
picture->height = height;
/* allocate the buffers for the frame data */
ret = av_frame_get_buffer(picture, 64);
if (ret < 0) {
fprintf(stderr, "Could not allocate frame data.\n");
av_frame_free(&picture);
return NULL;
}
avpicture_fill((AVPicture *)picture, picture_buf,
pix_fmt, width, height);
return picture;
}
void open_video(AVFormatContext *oc, AVStream *st)
{
AVCodec *codec;
AVCodecContext *c;
#if LIBAVFORMAT_BUILD<4629
c = &st->codec;
#else
c = st->codec;
#endif
/* find the video encoder */
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
} // use av_frame_free(&picture) to free memory from this call
int open_video(AVFormatContext *oc, VideoOutputStream *ost)
{
int ret;
/* open the codec */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
ret = avcodec_open2(ost->enc, ost->codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
return ret;
} // memory from this call freed when oc is freed, no need to do it on error in this call
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc);
if (ret < 0) {
fprintf(stderr, "Could not copy the stream parameters.\n");
return ret;
} // memory from this call is freed when oc (parent of ost->st) is freed, no need to do it on error in this call
/* allocate and init a re-usable frame */
ost->frame = alloc_picture(ost->enc->pix_fmt, ost->enc->width, ost->enc->height);
if (!(ost->frame)) {
fprintf(stderr, "Could not allocate video frame\n");
return -1;
} // from now on need to call av_frame_free(&(ost->frame)) on error
/* If the output format is not the same as the VNC format, then a temporary VNC format
* picture is needed too. It is then converted to the required
* output format. */
ost->tmp_frame = NULL;
ost->sws = NULL;
if (ost->enc->pix_fmt != VNC_PIX_FMT) {
ost->tmp_frame = alloc_picture(VNC_PIX_FMT, ost->enc->width, ost->enc->height);
if (!(ost->tmp_frame)) {
fprintf(stderr, "Could not allocate temporary picture\n");
av_frame_free(&(ost->frame));
return -2;
} // from now on need to call av_frame_free(&(ost->tmp_frame)) on error
ost->sws = sws_getCachedContext(ost->sws, ost->enc->width, ost->enc->height, VNC_PIX_FMT, ost->enc->width, ost->enc->height, ost->enc->pix_fmt, 0, NULL, NULL, NULL);
if (!(ost->sws)) {
fprintf(stderr, "Could not get sws context\n");
av_frame_free(&(ost->frame));
av_frame_free(&(ost->tmp_frame));
return -3;
} // from now on need to call sws_freeContext(ost->sws); ost->sws = NULL; on error
}
video_outbuf = NULL;
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
/* allocate output buffer */
/* XXX: API change will be done */
video_outbuf_size = 200000;
video_outbuf = malloc(video_outbuf_size);
}
return 0;
}
/* allocate the encoded raw picture */
picture = alloc_picture(c->pix_fmt, c->width, c->height);
if (!picture) {
fprintf(stderr, "Could not allocate picture\n");
exit(1);
/*
* encode current video frame and send it to the muxer
* return 0 on success, negative on error
*/
int write_video_frame(AVFormatContext *oc, VideoOutputStream *ost, int64_t pts)
{
int ret, ret2;
AVPacket pkt = { 0 };
if (pts <= ost->pts) return 0; // nothing to do
/* convert format if needed */
if (ost->tmp_frame) {
sws_scale(ost->sws, (const uint8_t * const *)ost->tmp_frame->data,
ost->tmp_frame->linesize, 0, ost->enc->height, ost->frame->data, ost->frame->linesize);
}
/* if the output format is not RGB565, then a temporary RGB565
picture is needed too. It is then converted to the required
output format */
tmp_picture = NULL;
if (c->pix_fmt != PIX_FMT_RGB565) {
tmp_picture = alloc_picture(PIX_FMT_RGB565, c->width, c->height);
if (!tmp_picture) {
fprintf(stderr, "Could not allocate temporary picture\n");
exit(1);
/* send the imager to encoder */
ost->pts = pts;
ost->frame->pts = ost->pts;
ret = avcodec_send_frame(ost->enc, ost->frame);
if (ret < 0) {
fprintf(stderr, "Error sending video frame to encoder: %s\n", av_err2str(ret));
return ret;
}
/* read all available packets */
ret2 = 0;
for (ret = avcodec_receive_packet(ost->enc, &pkt); ret == 0; ret = avcodec_receive_packet(ost->enc, &pkt)) {
ret2 = write_packet(oc, &(ost->enc->time_base), ost->st, &pkt);
if (ret2 < 0) {
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret2));
/* continue on this error to not gum up encoder */
}
}
if (ret2 < 0) return ret2;
if (!(ret == AVERROR(EAGAIN))) return ret; // if AVERROR(EAGAIN), means all available packets output, need more frames (i.e. success)
return 0;
}
void write_video_frame(AVFormatContext *oc, AVStream *st)
/*
* Write final video frame (i.e. drain codec).
*/
int write_final_video_frame(AVFormatContext *oc, VideoOutputStream *ost)
{
int out_size, ret;
AVCodecContext *c;
AVFrame *picture_ptr;
#if LIBAVFORMAT_BUILD<4629
c = &st->codec;
#else
c = st->codec;
#endif
if (c->pix_fmt != PIX_FMT_RGB565) {
/* as we only generate a RGB565 picture, we must convert it
to the codec pixel format if needed */
img_convert((AVPicture *)picture, c->pix_fmt,
(AVPicture *)tmp_picture, PIX_FMT_RGB565,
c->width, c->height);
}
picture_ptr = picture;
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near
futur for that */
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= (uint8_t *)picture_ptr;
pkt.size= sizeof(AVPicture);
ret = av_write_frame(oc, &pkt);
} else {
/* encode the image */
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture_ptr);
/* if zero size, it means the image was buffered */
if (out_size != 0) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.pts= c->coded_frame->pts;
if(c->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= video_outbuf;
pkt.size= out_size;
/* write the compressed frame in the media file */
ret = av_write_frame(oc, &pkt);
} else {
ret = 0;
}
int ret, ret2;
AVPacket pkt = { 0 };
/* send NULL image to encoder */
ret = avcodec_send_frame(ost->enc, NULL);
if (ret < 0) {
fprintf(stderr, "Error sending final video frame to encoder: %s\n", av_err2str(ret));
return ret;
}
if (ret != 0) {
fprintf(stderr, "Error while writing video frame\n");
exit(1);
/* read all available packets */
ret2 = 0;
for (ret = avcodec_receive_packet(ost->enc, &pkt); ret == 0; ret = avcodec_receive_packet(ost->enc, &pkt)) {
ret2 = write_packet(oc, &(ost->enc->time_base), ost->st, &pkt);
if (ret2 < 0) {
fprintf(stderr, "Error while writing final video frame: %s\n", av_err2str(ret2));
/* continue on this error to not gum up encoder */
}
}
frame_count++;
if (ret2 < 0) return ret2;
if (!(ret == AVERROR(EOF))) return ret;
return 0;
}
void close_video(AVFormatContext *oc, AVStream *st)
void close_video_stream(VideoOutputStream *ost)
{
avcodec_close(st->codec);
av_free(picture->data[0]);
av_free(picture);
if (tmp_picture) {
av_free(tmp_picture->data[0]);
av_free(tmp_picture);
}
av_free(video_outbuf);
avcodec_free_context(&(ost->enc));
av_frame_free(&(ost->frame));
av_frame_free(&(ost->tmp_frame));
sws_freeContext(ost->sws); ost->sws = NULL;
ost->codec = NULL; /* codec not an allocated item */
ost->st = NULL; /* freeing parent oc will free this memory */
}
static const char *filename;
static AVOutputFormat *fmt;
static AVFormatContext *oc;
static AVStream *video_st;
static double video_pts;
static int movie_open(int w, int h) {
if (fmt->video_codec != CODEC_ID_NONE) {
video_st = add_video_stream(oc, fmt->video_codec, w, h);
} else
return 1;
/* set the output parameters (must be done even if no
parameters). */
if (av_set_parameters(oc, NULL) < 0) {
fprintf(stderr, "Invalid output format parameters\n");
return 2;
/**************************************************************/
/* Output movie handling */
AVFormatContext *movie_open(char *filename, VideoOutputStream *video_st, int br, int fr, int w, int h) {
int ret;
AVFormatContext *oc;
/* allocate the output media context. */
ret = avformat_alloc_output_context2(&oc, NULL, NULL, filename);
if (ret < 0) {
fprintf(stderr, "Warning: Could not deduce output format from file extension: using MP4.\n");
ret = avformat_alloc_output_context2(&oc, NULL, "mp4", filename);
}
if (ret < 0) {
fprintf(stderr, "Error: Could not allocate media context: %s.\n", av_err2str(ret));
return NULL;
} // from now on, need to call avformat_free_context(oc); oc=NULL; to free memory on error
dump_format(oc, 0, filename, 1);
/* now that all the parameters are set, we can open the audio and
video codecs and allocate the necessary encode buffers */
if (video_st)
open_video(oc, video_st);
/* Add the video stream using the default format codec and initialize the codec. */
if (oc->oformat->video_codec != AV_CODEC_ID_NONE) {
ret = add_video_stream(video_st, oc, oc->oformat->video_codec, br, fr, w, h);
} else {
ret = -1;
}
if (ret < 0) {
fprintf(stderr, "Error: chosen output format does not have a video codec, or error %i\n", ret);
avformat_free_context(oc); oc = NULL;
return NULL;
} // from now on, need to call close_video_stream(video_st) to free memory on error
/* Now that all the parameters are set, we can open the codecs and allocate the necessary encode buffers. */
ret = open_video(oc, video_st);
if (ret < 0) {
fprintf(stderr, "Error: error opening video codec, error %i\n", ret);
close_video_stream(video_st);
avformat_free_context(oc); oc = NULL;
return NULL;
} // no additional calls required to free memory, as close_video_stream(video_st) will do it
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
fprintf(stderr, "Could not open '%s'\n", filename);
return 3;
if (!(oc->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open '%s': %s\n", filename,
av_err2str(ret));
close_video_stream(video_st);
avformat_free_context(oc); oc = NULL;
return NULL;
}
}
/* write the stream header, if any */
av_write_header(oc);
return 0;
} // will need to call avio_closep(&oc->pb) to free file handle on error
/* Write the stream header, if any. */
ret = avformat_write_header(oc, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when writing to output file: %s\n",
av_err2str(ret));
if (!(oc->oformat->flags & AVFMT_NOFILE))
avio_closep(&oc->pb);
close_video_stream(video_st);
avformat_free_context(oc); oc = NULL;
} // no additional items to free
return oc;
}
static int movie_close() {
int i;
void movie_close(AVFormatContext **ocp, VideoOutputStream *video_st) {
AVFormatContext *oc = *ocp;
/* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
if (oc) {
if (video_st)
write_final_video_frame(oc, video_st);
/* close each codec */
close_video(oc, video_st);
av_write_trailer(oc);
/* write the trailer, if any */
av_write_trailer(oc);
/* free the streams */
for(i = 0; i < oc->nb_streams; i++) {
av_freep(&oc->streams[i]);
}
/* Close the video codec. */
close_video_stream(video_st);
if (!(fmt->flags & AVFMT_NOFILE)) {
/* close the output file */
url_fclose(&oc->pb);
}
if (!(oc->oformat->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_closep(&oc->pb);
/* free the stream */
av_free(oc);
/* free the stream */
avformat_free_context(oc);
ocp = NULL;
}
}
/**************************************************************/
/* VNC globals */
VideoOutputStream video_st = { 0 };
rfbClient *client = NULL;
rfbBool quit = FALSE;
char *filename = NULL;
AVFormatContext *oc = NULL;
int bitrate = 1000000;
int framerate = 5;
long max_time = 0;
struct timespec start_time, cur_time;
/* Signal handling */
void signal_handler(int signal) {
quit=TRUE;
}
static rfbBool quit=FALSE;
static void signal_handler(int signal) {
fprintf(stderr,"Cleaning up.\n");
quit=TRUE;
/* returns time since start in pts units */
int64_t time_to_pts(int framerate, struct timespec *start_time, struct timespec *cur_time) {
time_t ds = cur_time->tv_sec - start_time->tv_sec;
long dns = cur_time->tv_nsec - start_time->tv_nsec;
/* use usecs */
int64_t dt = (int64_t)ds*(int64_t)1000000+(int64_t)dns/(int64_t)1000;
/* compute rv in units of frame number (rounding to nearest, not truncating) */
int64_t rv = (((int64_t)framerate)*dt + (int64_t)500000) / (int64_t)(1000000);
return rv;
}
/**************************************************************/
/* VNC callback functions */
static rfbBool resize(rfbClient* client) {
static rfbBool first=TRUE;
if(!first) {
movie_close();
perror("I don't know yet how to change resolutions!\n");
}
movie_open(client->width, client->height);
signal(SIGINT,signal_handler);
if(tmp_picture)
client->frameBuffer=tmp_picture->data[0];
else
client->frameBuffer=picture->data[0];
return TRUE;
rfbBool vnc_malloc_fb(rfbClient* client) {
movie_close(&oc, &video_st);
oc = movie_open(filename, &video_st, bitrate, framerate, client->width, client->height);
if (!oc)
return FALSE;
signal(SIGINT,signal_handler);
signal(SIGTERM,signal_handler);
signal(SIGQUIT,signal_handler);
signal(SIGABRT,signal_handler);
/* These assignments assumes the AVFrame buffer is contigous. This is true in current ffmpeg versions for
* most non-HW accelerated bits, but may not be true globally. */
if(video_st.tmp_frame)
client->frameBuffer=video_st.tmp_frame->data[0];
else
client->frameBuffer=video_st.frame->data[0];
return TRUE;
}
static void update(rfbClient* client,int x,int y,int w,int h) {
void vnc_update(rfbClient* client,int x,int y,int w,int h) {
}
/**************************************************************/
/* media file output */
int main(int argc, char **argv)
{
time_t stop=0;
rfbClient* client;
int i,j;
/* get a vnc client structure (don't connect yet). */
/* Initialize vnc client structure (don't connect yet). */
client = rfbGetClient(5,3,2);
client->format.redShift=11; client->format.redMax=31;
client->format.greenShift=5; client->format.greenMax=63;
client->format.blueShift=0; client->format.blueMax=31;
/* initialize libavcodec, and register all codecs and formats */
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
if(!strncmp(argv[argc-1],":",1) ||
!strncmp(argv[argc-1],"127.0.0.1",9) ||
!strncmp(argv[argc-1],"localhost",9))
client->appData.encodingsString="raw";
filename=0;
/* Parse command line. */
for(i=1;i<argc;i++) {
j=i;
if(argc>i+1 && !strcmp("-o",argv[i])) {
filename=argv[2];
j+=2;
} else if(argc>i+1 && !strcmp("-t",argv[i])) {
stop=time(0)+atoi(argv[i+1]);
j+=2;
}
if(j>i) {
argc-=j-i;
memmove(argv+i,argv+j,(argc-i)*sizeof(char*));
i--;
}
j=i;
if(argc>i+1 && !strcmp("-o",argv[i])) {
filename=argv[i+1];
j+=2;
} else if(argc>i+1 && !strcmp("-t",argv[i])) {
max_time=atol(argv[i+1]);
if (max_time < 10 || max_time > 100000000) {
fprintf(stderr, "Warning: Nonsensical time-per-file %li, resetting to default.\n", max_time);
max_time = 0;
}
j+=2;
}
/* This is so that argc/argv are ready for passing to rfbInitClient */
if(j>i) {
argc-=j-i;
memmove(argv+i,argv+j,(argc-i)*sizeof(char*));
i--;
}
}
/* auto detect the output format from the name. default is
mpeg. */
fmt = filename?guess_format(NULL, filename, NULL):0;
if (!fmt) {
printf("Could not deduce output format from file extension: using MPEG.\n");
fmt = guess_format("mpeg", NULL, NULL);
}
if (!fmt) {
fprintf(stderr, "Could not find suitable output format\n");
exit(1);
/* default filename. */
if (!filename) {
fprintf(stderr, "Warning: No filename specified. Using output.mp4\n");
filename = "output.mp4";
}
/* allocate the output media context */
oc = av_alloc_format_context();
if (!oc) {
fprintf(stderr, "Memory error\n");
exit(1);
}
oc->oformat = fmt;
snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
/* add the audio and video streams using the default format codecs
and initialize the codecs */
video_st = NULL;
/* open VNC connection */
client->MallocFrameBuffer=resize;
client->GotFrameBufferUpdate=update;
/* open VNC connection. */
client->MallocFrameBuffer=vnc_malloc_fb;
client->GotFrameBufferUpdate=vnc_update;
if(!rfbInitClient(client,&argc,argv)) {
printf("usage: %s [-o output_file] [-t seconds] server:port\n"
"Shoot a movie from a VNC server.\n", argv[0]);
exit(1);
printf("usage: %s [-o output_file] [-t seconds-per-file] server:port\n", argv[0]);
return 1;
}
if(client->serverPort==-1)
client->vncRec->doNotSleep = TRUE; /* vncrec playback */
/* main loop */
/* main loop */
clock_gettime(CLOCK_MONOTONIC, &start_time);
while(!quit) {
int i=WaitForMessage(client,1000000/STREAM_FRAME_RATE);
if(i<0) {
movie_close();
return 0;
int i=WaitForMessage(client,10000/framerate); /* useful for timeout to be no more than 10 msec per second (=10000/framerate usec) */
if (i>0) {
if(!HandleRFBServerMessage(client))
quit=TRUE;
} else if (i<0) {
quit=TRUE;
}
if(i)
if(!HandleRFBServerMessage(client))
quit=TRUE;
else {
/* compute current audio and video time */
video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
/* write interleaved audio and video frames */
write_video_frame(oc, video_st);
}
if(stop!=0 && stop<time(0))
quit=TRUE;
if (!quit) {
clock_gettime(CLOCK_MONOTONIC, &cur_time);
write_video_frame(oc, &video_st, update_time_for_next(framerate, &start_time, &cur_time));
if ((cur_time.tv_sec - start_time.tv_sec) > max_time && max_time > 0) {
quit = TRUE;
}
}
}
movie_close();
movie_close(&oc,&video_st);
return 0;
}

Loading…
Cancel
Save