本文整理汇总了C++中put_buffer函数的典型用法代码示例。如果您正苦于以下问题:C++ put_buffer函数的具体用法?C++ put_buffer怎么用?C++ put_buffer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了put_buffer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: gxf_write_mpeg_auxiliary
static int gxf_write_mpeg_auxiliary(ByteIOContext *pb, GXFStreamContext *ctx)
{
char buffer[1024];
int size, starting_line;
if (ctx->iframes) {
ctx->p_per_gop = ctx->pframes / ctx->iframes;
if (ctx->pframes % ctx->iframes)
ctx->p_per_gop++;
if (ctx->pframes) {
ctx->b_per_i_or_p = ctx->bframes / ctx->pframes;
if (ctx->bframes % ctx->pframes)
ctx->b_per_i_or_p++;
}
if (ctx->p_per_gop > 9)
ctx->p_per_gop = 9; /* ensure value won't take more than one char */
if (ctx->b_per_i_or_p > 9)
ctx->b_per_i_or_p = 9; /* ensure value won't take more than one char */
}
if (ctx->codec->height == 512 || ctx->codec->height == 608)
starting_line = 7; // VBI
else if (ctx->codec->height == 480)
starting_line = 20;
else
starting_line = 23; // default PAL
size = snprintf(buffer, 1024, "Ver 1\nBr %.6f\nIpg 1\nPpi %d\nBpiop %d\n"
"Pix 0\nCf %d\nCg %d\nSl %d\nnl16 %d\nVi 1\nf1 1\n",
(float)ctx->codec->bit_rate, ctx->p_per_gop, ctx->b_per_i_or_p,
ctx->codec->pix_fmt == PIX_FMT_YUV422P ? 2 : 1, ctx->first_gop_closed == 1,
starting_line, ctx->codec->height / 16);
put_byte(pb, TRACK_MPG_AUX);
put_byte(pb, size + 1);
put_buffer(pb, (uint8_t *)buffer, size + 1);
return size + 3;
}
示例2: rm_write_audio
static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size, int flags)
{
uint8_t *buf1;
RMContext *rm = s->priv_data;
ByteIOContext *pb = s->pb;
StreamInfo *stream = rm->audio_stream;
int i;
/* XXX: suppress this malloc */
buf1= (uint8_t*) av_malloc( size * sizeof(uint8_t) );
write_packet_header(s, stream, size, !!(flags & PKT_FLAG_KEY));
/* for AC-3, the words seem to be reversed */
for(i=0;i<size;i+=2) {
buf1[i] = buf[i+1];
buf1[i+1] = buf[i];
}
put_buffer(pb, buf1, size);
put_flush_packet(pb);
stream->nb_frames++;
av_free(buf1);
return 0;
}
示例3: flv_write_header
//.........这里部分代码省略.........
/* write meta_tag */
put_byte(pb, 18); // tag type META
metadata_size_pos= url_ftell(pb);
put_be24(pb, 0); // size of data part (sum of all parts below)
put_be24(pb, 0); // time stamp
put_be32(pb, 0); // reserved
/* now data of data_size size */
/* first event name as a string */
put_byte(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, "onMetaData"); // 12 bytes
/* mixed array (hash) with size and string/type/data tuples */
put_byte(pb, AMF_DATA_TYPE_MIXEDARRAY);
put_be32(pb, 5*!!video_enc + 5*!!audio_enc + 2); // +2 for duration and file size
put_amf_string(pb, "duration");
flv->duration_offset= url_ftell(pb);
put_amf_double(pb, s->duration / AV_TIME_BASE); // fill in the guessed duration, it'll be corrected later if incorrect
if(video_enc) {
put_amf_string(pb, "width");
put_amf_double(pb, video_enc->width);
put_amf_string(pb, "height");
put_amf_double(pb, video_enc->height);
put_amf_string(pb, "videodatarate");
put_amf_double(pb, video_enc->bit_rate / 1024.0);
put_amf_string(pb, "framerate");
put_amf_double(pb, framerate);
put_amf_string(pb, "videocodecid");
put_amf_double(pb, video_enc->codec_tag);
}
if(audio_enc) {
put_amf_string(pb, "audiodatarate");
put_amf_double(pb, audio_enc->bit_rate / 1024.0);
put_amf_string(pb, "audiosamplerate");
put_amf_double(pb, audio_enc->sample_rate);
put_amf_string(pb, "audiosamplesize");
put_amf_double(pb, audio_enc->codec_id == CODEC_ID_PCM_U8 ? 8 : 16);
put_amf_string(pb, "stereo");
put_amf_bool(pb, audio_enc->channels == 2);
put_amf_string(pb, "audiocodecid");
put_amf_double(pb, audio_enc->codec_tag);
}
put_amf_string(pb, "filesize");
flv->filesize_offset= url_ftell(pb);
put_amf_double(pb, 0); // delayed write
put_amf_string(pb, "");
put_byte(pb, AMF_END_OF_OBJECT);
/* write total size of tag */
data_size= url_ftell(pb) - metadata_size_pos - 10;
url_fseek(pb, metadata_size_pos, SEEK_SET);
put_be24(pb, data_size);
url_fseek(pb, data_size + 10 - 3, SEEK_CUR);
put_be32(pb, data_size + 11);
for (i = 0; i < s->nb_streams; i++) {
AVCodecContext *enc = s->streams[i]->codec;
if (enc->codec_id == CODEC_ID_AAC || enc->codec_id == CODEC_ID_H264) {
int64_t pos;
put_byte(pb, enc->codec_type == AVMEDIA_TYPE_VIDEO ?
FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO);
put_be24(pb, 0); // size patched later
put_be24(pb, 0); // ts
put_byte(pb, 0); // ts ext
put_be24(pb, 0); // streamid
pos = url_ftell(pb);
if (enc->codec_id == CODEC_ID_AAC) {
put_byte(pb, get_audio_flags(enc));
put_byte(pb, 0); // AAC sequence header
put_buffer(pb, enc->extradata, enc->extradata_size);
} else {
put_byte(pb, enc->codec_tag | FLV_FRAME_KEY); // flags
put_byte(pb, 0); // AVC sequence header
put_be24(pb, 0); // composition time
ff_isom_write_avcc(pb, enc->extradata, enc->extradata_size);
}
data_size = url_ftell(pb) - pos;
url_fseek(pb, -data_size - 10, SEEK_CUR);
put_be24(pb, data_size);
url_fseek(pb, data_size + 10 - 3, SEEK_CUR);
put_be32(pb, data_size + 11); // previous tag size
}
}
return 0;
}
示例4: au_write_packet
static int au_write_packet(AVFormatContext *s, AVPacket *pkt)
{
ByteIOContext *pb = s->pb;
put_buffer(pb, pkt->data, pkt->size);
return 0;
}
示例5: img_write_packet
static int img_write_packet(AVFormatContext *s, AVPacket *pkt)
{
VideoData *img = s->priv_data;
ByteIOContext *pb[3];
char filename[1024];
AVCodecContext *codec= s->streams[ pkt->stream_index ]->codec;
int i;
if (!img->is_pipe) {
if (av_get_frame_filename(filename, sizeof(filename),
img->path, img->img_number) < 0 && img->img_number>1) {
av_log(s, AV_LOG_ERROR, "Could not get frame filename from pattern\n");
return AVERROR(EIO);
}
for(i=0; i<3; i++){
if (url_fopen(&pb[i], filename, URL_WRONLY) < 0) {
av_log(s, AV_LOG_ERROR, "Could not open file : %s\n",filename);
return AVERROR(EIO);
}
if(codec->codec_id != CODEC_ID_RAWVIDEO)
break;
filename[ strlen(filename) - 1 ]= 'U' + i;
}
} else {
pb[0] = s->pb;
}
if(codec->codec_id == CODEC_ID_RAWVIDEO){
int ysize = codec->width * codec->height;
put_buffer(pb[0], pkt->data , ysize);
put_buffer(pb[1], pkt->data + ysize, (pkt->size - ysize)/2);
put_buffer(pb[2], pkt->data + ysize +(pkt->size - ysize)/2, (pkt->size - ysize)/2);
put_flush_packet(pb[1]);
put_flush_packet(pb[2]);
url_fclose(pb[1]);
url_fclose(pb[2]);
}else{
if(av_str2id(img_tags, s->filename) == CODEC_ID_JPEG2000){
AVStream *st = s->streams[0];
if(st->codec->extradata_size > 8 &&
AV_RL32(st->codec->extradata+4) == MKTAG('j','p','2','h')){
if(pkt->size < 8 || AV_RL32(pkt->data+4) != MKTAG('j','p','2','c'))
goto error;
put_be32(pb[0], 12);
put_tag (pb[0], "jP ");
put_be32(pb[0], 0x0D0A870A); // signature
put_be32(pb[0], 20);
put_tag (pb[0], "ftyp");
put_tag (pb[0], "jp2 ");
put_be32(pb[0], 0);
put_tag (pb[0], "jp2 ");
put_buffer(pb[0], st->codec->extradata, st->codec->extradata_size);
}else if(pkt->size < 8 ||
(!st->codec->extradata_size &&
AV_RL32(pkt->data+4) != MKTAG('j','P',' ',' '))){ // signature
error:
av_log(s, AV_LOG_ERROR, "malformated jpeg2000 codestream\n");
return -1;
}
}
put_buffer(pb[0], pkt->data, pkt->size);
}
put_flush_packet(pb[0]);
if (!img->is_pipe) {
url_fclose(pb[0]);
}
img->img_number++;
return 0;
}
示例6: swf_write_header
//.........这里部分代码省略.........
put_tag(pb, "FWS");
if (!strcmp("avm2", s->oformat->name))
version = 9;
else if (swf->video_enc && swf->video_enc->codec_id == CODEC_ID_VP6F)
version = 8; /* version 8 and above support VP6 codec */
else if (swf->video_enc && swf->video_enc->codec_id == CODEC_ID_FLV1)
version = 6; /* version 6 and above support FLV1 codec */
else
version = 4; /* version 4 for mpeg audio support */
put_byte(pb, version);
put_le32(pb, DUMMY_FILE_SIZE); /* dummy size
(will be patched if not streamed) */
put_swf_rect(pb, 0, width * 20, 0, height * 20);
put_le16(pb, (rate * 256) / rate_base); /* frame rate */
swf->duration_pos = url_ftell(pb);
put_le16(pb, (uint16_t)(DUMMY_DURATION * (int64_t)rate / rate_base)); /* frame count */
/* avm2/swf v9 (also v8?) files require a file attribute tag */
if (version == 9) {
put_swf_tag(s, TAG_FILEATTRIBUTES);
put_le32(pb, 1<<3); /* set ActionScript v3/AVM2 flag */
put_swf_end_tag(s);
}
/* define a shape with the jpeg inside */
if (swf->video_enc && swf->video_enc->codec_id == CODEC_ID_MJPEG) {
put_swf_tag(s, TAG_DEFINESHAPE);
put_le16(pb, SHAPE_ID); /* ID of shape */
/* bounding rectangle */
put_swf_rect(pb, 0, width, 0, height);
/* style info */
put_byte(pb, 1); /* one fill style */
put_byte(pb, 0x41); /* clipped bitmap fill */
put_le16(pb, BITMAP_ID); /* bitmap ID */
/* position of the bitmap */
put_swf_matrix(pb, (int)(1.0 * (1 << FRAC_BITS)), 0,
0, (int)(1.0 * (1 << FRAC_BITS)), 0, 0);
put_byte(pb, 0); /* no line style */
/* shape drawing */
init_put_bits(&p, buf1, sizeof(buf1));
put_bits(&p, 4, 1); /* one fill bit */
put_bits(&p, 4, 0); /* zero line bit */
put_bits(&p, 1, 0); /* not an edge */
put_bits(&p, 5, FLAG_MOVETO | FLAG_SETFILL0);
put_bits(&p, 5, 1); /* nbits */
put_bits(&p, 1, 0); /* X */
put_bits(&p, 1, 0); /* Y */
put_bits(&p, 1, 1); /* set fill style 1 */
/* draw the rectangle ! */
put_swf_line_edge(&p, width, 0);
put_swf_line_edge(&p, 0, height);
put_swf_line_edge(&p, -width, 0);
put_swf_line_edge(&p, 0, -height);
/* end of shape */
put_bits(&p, 1, 0); /* not an edge */
put_bits(&p, 5, 0);
flush_put_bits(&p);
put_buffer(pb, buf1, pbBufPtr(&p) - p.buf);
put_swf_end_tag(s);
}
if (swf->audio_enc && swf->audio_enc->codec_id == CODEC_ID_MP3) {
int v = 0;
/* start sound */
put_swf_tag(s, TAG_STREAMHEAD2);
switch(swf->audio_enc->sample_rate) {
case 11025: v |= 1 << 2; break;
case 22050: v |= 2 << 2; break;
case 44100: v |= 3 << 2; break;
default:
/* not supported */
av_log(s, AV_LOG_ERROR, "swf does not support that sample rate, choose from (44100, 22050, 11025).\n");
return -1;
}
v |= 0x02; /* 16 bit playback */
if (swf->audio_enc->channels == 2)
v |= 0x01; /* stereo playback */
put_byte(s->pb, v);
v |= 0x20; /* mp3 compressed */
put_byte(s->pb, v);
put_le16(s->pb, swf->samples_per_frame); /* avg samples per frame */
put_le16(s->pb, 0);
put_swf_end_tag(s);
}
put_flush_packet(s->pb);
return 0;
}
示例7: flac_write_packet
static int flac_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
put_buffer(s->pb, pkt->data, pkt->size);
put_flush_packet(s->pb);
return 0;
}
示例8: rtp_check_and_send_back_rr
int rtp_check_and_send_back_rr(RTPDemuxContext *s, int count)
{
ByteIOContext *pb;
uint8_t *buf;
int len;
int rtcp_bytes;
RTPStatistics *stats= &s->statistics;
uint32_t lost;
uint32_t extended_max;
uint32_t expected_interval;
uint32_t received_interval;
uint32_t lost_interval;
uint32_t expected;
uint32_t fraction;
uint64_t ntp_time= s->last_rtcp_ntp_time; // TODO: Get local ntp time?
if (!s->rtp_ctx || (count < 1))
return -1;
/* TODO: I think this is way too often; RFC 1889 has algorithm for this */
/* XXX: mpeg pts hardcoded. RTCP send every 0.5 seconds */
s->octet_count += count;
rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
RTCP_TX_RATIO_DEN;
rtcp_bytes /= 50; // mmu_man: that's enough for me... VLC sends much less btw !?
if (rtcp_bytes < 28)
return -1;
s->last_octet_count = s->octet_count;
if (url_open_dyn_buf(&pb) < 0)
return -1;
// Receiver Report
put_byte(pb, (RTP_VERSION << 6) + 1); /* 1 report block */
put_byte(pb, 201);
put_be16(pb, 7); /* length in words - 1 */
put_be32(pb, s->ssrc); // our own SSRC
put_be32(pb, s->ssrc); // XXX: should be the server's here!
// some placeholders we should really fill...
// RFC 1889/p64
extended_max= stats->cycles + stats->max_seq;
expected= extended_max - stats->base_seq + 1;
lost= expected - stats->received;
lost= FFMIN(lost, 0xffffff); // clamp it since it's only 24 bits...
expected_interval= expected - stats->expected_prior;
stats->expected_prior= expected;
received_interval= stats->received - stats->received_prior;
stats->received_prior= stats->received;
lost_interval= expected_interval - received_interval;
if (expected_interval==0 || lost_interval<=0) fraction= 0;
else fraction = (lost_interval<<8)/expected_interval;
fraction= (fraction<<24) | lost;
put_be32(pb, fraction); /* 8 bits of fraction, 24 bits of total packets lost */
put_be32(pb, extended_max); /* max sequence received */
put_be32(pb, stats->jitter>>4); /* jitter */
if(s->last_rtcp_ntp_time==AV_NOPTS_VALUE)
{
put_be32(pb, 0); /* last SR timestamp */
put_be32(pb, 0); /* delay since last SR */
} else {
uint32_t middle_32_bits= s->last_rtcp_ntp_time>>16; // this is valid, right? do we need to handle 64 bit values special?
uint32_t delay_since_last= ntp_time - s->last_rtcp_ntp_time;
put_be32(pb, middle_32_bits); /* last SR timestamp */
put_be32(pb, delay_since_last); /* delay since last SR */
}
// CNAME
put_byte(pb, (RTP_VERSION << 6) + 1); /* 1 report block */
put_byte(pb, 202);
len = strlen(s->hostname);
put_be16(pb, (6 + len + 3) / 4); /* length in words - 1 */
put_be32(pb, s->ssrc);
put_byte(pb, 0x01);
put_byte(pb, len);
put_buffer(pb, s->hostname, len);
// padding
for (len = (6 + len) % 4; len % 4; len++) {
put_byte(pb, 0);
}
put_flush_packet(pb);
len = url_close_dyn_buf(pb, &buf);
if ((len > 0) && buf) {
int result;
dprintf(s->ic, "sending %d bytes of RR\n", len);
result= url_write(s->rtp_ctx, buf, len);
dprintf(s->ic, "result from url_write: %d\n", result);
av_free(buf);
}
return 0;
}
示例9: put_frame
static void put_frame(
AVFormatContext *s,
ASFStream *stream,
AVStream *avst,
int timestamp,
const uint8_t *buf,
int m_obj_size,
int flags
)
{
ASFContext *asf = s->priv_data;
int m_obj_offset, payload_len, frag_len1;
m_obj_offset = 0;
while (m_obj_offset < m_obj_size) {
payload_len = m_obj_size - m_obj_offset;
if (asf->packet_timestamp_start == -1) {
asf->multi_payloads_present = (payload_len < MULTI_PAYLOAD_CONSTANT);
asf->packet_size_left = PACKET_SIZE;
if (asf->multi_payloads_present){
frag_len1 = MULTI_PAYLOAD_CONSTANT - 1;
}
else {
frag_len1 = SINGLE_PAYLOAD_DATA_LENGTH;
}
asf->packet_timestamp_start = timestamp;
}
else {
// multi payloads
frag_len1 = asf->packet_size_left - PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS - PACKET_HEADER_MIN_SIZE - 1;
if(frag_len1 < payload_len && avst->codec->codec_type == CODEC_TYPE_AUDIO){
flush_packet(s);
continue;
}
}
if (frag_len1 > 0) {
if (payload_len > frag_len1)
payload_len = frag_len1;
else if (payload_len == (frag_len1 - 1))
payload_len = frag_len1 - 2; //additional byte need to put padding length
put_payload_header(s, stream, timestamp+PREROLL_TIME, m_obj_size, m_obj_offset, payload_len, flags);
put_buffer(&asf->pb, buf, payload_len);
if (asf->multi_payloads_present)
asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS);
else
asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD);
asf->packet_timestamp_end = timestamp;
asf->packet_nb_payloads++;
} else {
payload_len = 0;
}
m_obj_offset += payload_len;
buf += payload_len;
if (!asf->multi_payloads_present)
flush_packet(s);
else if (asf->packet_size_left <= (PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS + PACKET_HEADER_MIN_SIZE + 1))
flush_packet(s);
}
stream->seq++;
}
示例10: put_guid
static void put_guid(ByteIOContext *s, const GUID *g)
{
assert(sizeof(*g) == 16);
put_buffer(s, *g, sizeof(*g));
}
示例11: transmit_data
//.........这里部分代码省略.........
error = errno;
(void) unlink(tempfile);
(void) rmdir(tempdir);
errno = error;
efatal("lstat", error);
}
exists = 0;
} else {
if ((st.st_mode&S_IFMT) != S_IFREG) {
(void) unlink(tempfile);
(void) rmdir(tempdir);
fatal("tempfile no longer a regular file");
}
exists = 1;
}
if (debug)
diag("exists %d", exists);
if ( ! put_exists ( &exists ) ) {
(void) unlink(tempfile);
(void) rmdir(tempdir);
efatal("write failed");
}
if (!exists) {
(void) unlink(tempfile);
return;
}
perm = st.st_mode&0777;
if (debug)
diag("perm %#o", perm);
if ( ! put_perm ( &perm ) ) {
(void) unlink(tempfile);
(void) rmdir(tempdir);
efatal("write failed");
}
count = st.st_size;
if (debug)
diag("count %d", count);
if ( ! put_count ( &count ) ) {
(void) unlink(tempfile);
(void) rmdir(tempdir);
efatal("write failed");
}
count = st.st_size;
if (count == 0) {
(void) unlink(tempfile);
#ifdef notdef
(void) rmdir(tempdir);
#endif
return;
}
if ((fd = open(tempfile, O_RDONLY, 0)) < 0) {
(void) unlink(tempfile);
(void) rmdir(tempdir);
efatal("open failed");
}
if ((bufsize = count) > 10*1024)
buffer = (char *) malloc(bufsize = 10*1024);
else
buffer = (char *) malloc(bufsize);
if (buffer == NULL) {
(void) unlink(tempfile);
(void) rmdir(tempdir);
fatal("malloc failed");
}
if (debug)
diag("bufsize %d", bufsize);
while (count != 0) {
if (debug)
diag("loop count %d", count);
if ((len = read(fd, buffer, bufsize)) <= 0) {
error = errno;
(void) close(fd);
(void) unlink(tempfile);
(void) rmdir(tempdir);
if (len == 0)
fatal("1 premature EOF");
errno = error;
efatal("server read", error);
}
if (debug)
diag("read %d bytes", len);
put_buffer ( buffer, len, &len2, fd );
if (debug)
diag("wrote %d bytes", len2);
count -= len;
if (count < bufsize)
bufsize = count;
}
if (close(fd) < 0) {
error = errno;
(void) unlink(tempfile);
(void) rmdir(tempdir);
errno = error;
efatal("close");
}
(void) free(buffer);
(void) unlink(tempfile);
if (debug)
diag("transfer complete");
}
示例12: asf_write_header1
/* write the header (used two times if non streamed) */
static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data_chunk_size)
{
ASFContext *asf = s->priv_data;
ByteIOContext *pb = s->pb;
AVMetadataTag *tags[5];
int header_size, n, extra_size, extra_size2, wav_extra_size, file_time;
int has_title;
int metadata_count;
AVCodecContext *enc;
int64_t header_offset, cur_pos, hpos;
int bit_rate;
int64_t duration;
ff_metadata_conv(&s->metadata, ff_asf_metadata_conv, NULL);
tags[0] = av_metadata_get(s->metadata, "title" , NULL, 0);
tags[1] = av_metadata_get(s->metadata, "author" , NULL, 0);
tags[2] = av_metadata_get(s->metadata, "copyright", NULL, 0);
tags[3] = av_metadata_get(s->metadata, "comment" , NULL, 0);
tags[4] = av_metadata_get(s->metadata, "rating" , NULL, 0);
duration = asf->duration + PREROLL_TIME * 10000;
has_title = tags[0] || tags[1] || tags[2] || tags[3] || tags[4];
metadata_count = s->metadata ? s->metadata->count : 0;
bit_rate = 0;
for(n=0;n<s->nb_streams;n++) {
enc = s->streams[n]->codec;
av_set_pts_info(s->streams[n], 32, 1, 1000); /* 32 bit pts in ms */
bit_rate += enc->bit_rate;
}
if (asf->is_streamed) {
put_chunk(s, 0x4824, 0, 0xc00); /* start of stream (length will be patched later) */
}
put_guid(pb, &ff_asf_header);
put_le64(pb, -1); /* header length, will be patched after */
put_le32(pb, 3 + has_title + !!metadata_count + s->nb_streams); /* number of chunks in header */
put_byte(pb, 1); /* ??? */
put_byte(pb, 2); /* ??? */
/* file header */
header_offset = url_ftell(pb);
hpos = put_header(pb, &ff_asf_file_header);
put_guid(pb, &ff_asf_my_guid);
put_le64(pb, file_size);
file_time = 0;
put_le64(pb, unix_to_file_time(file_time));
put_le64(pb, asf->nb_packets); /* number of packets */
put_le64(pb, duration); /* end time stamp (in 100ns units) */
put_le64(pb, asf->duration); /* duration (in 100ns units) */
put_le64(pb, PREROLL_TIME); /* start time stamp */
put_le32(pb, (asf->is_streamed || url_is_streamed(pb)) ? 3 : 2); /* ??? */
put_le32(pb, s->packet_size); /* packet size */
put_le32(pb, s->packet_size); /* packet size */
put_le32(pb, bit_rate); /* Nominal data rate in bps */
end_header(pb, hpos);
/* unknown headers */
hpos = put_header(pb, &ff_asf_head1_guid);
put_guid(pb, &ff_asf_head2_guid);
put_le32(pb, 6);
put_le16(pb, 0);
end_header(pb, hpos);
/* title and other infos */
if (has_title) {
int len;
uint8_t *buf;
ByteIOContext *dyn_buf;
if (url_open_dyn_buf(&dyn_buf) < 0)
return AVERROR(ENOMEM);
hpos = put_header(pb, &ff_asf_comment_header);
for (n = 0; n < FF_ARRAY_ELEMS(tags); n++) {
len = tags[n] ? avio_put_str16le(dyn_buf, tags[n]->value) : 0;
put_le16(pb, len);
}
len = url_close_dyn_buf(dyn_buf, &buf);
put_buffer(pb, buf, len);
av_freep(&buf);
end_header(pb, hpos);
}
if (metadata_count) {
AVMetadataTag *tag = NULL;
hpos = put_header(pb, &ff_asf_extended_content_header);
put_le16(pb, metadata_count);
while ((tag = av_metadata_get(s->metadata, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
put_str16(pb, tag->key);
put_le16(pb, 0);
put_str16(pb, tag->value);
}
end_header(pb, hpos);
}
//.........这里部分代码省略.........
示例13: put_string
void put_string(StubState *state, const char * str)
{
put_buffer(state, (const uint8_t *) str, strlen(str));
}
示例14: top_twenty
void top_twenty(integer this_many)
{
/*{ Enters a players name on the top twenty list -JWT- }*/
string list[MAX_HIGH_SCORES+2];
integer players_line = 0;
integer i1,i2,i3,i4;
int n1;
vtype o1,s1;
FILE *f1;
boolean flag;
char ch;
if (py.misc.cheated) {
exit_game();
}
clear_screen();
if (!read_top_scores(&f1, MORIA_TOP, list, MAX_HIGH_SCORES, &n1, s1)) {
prt(s1,2,1);
prt("",3,1);
} else {
i3 = total_points();
flag = false;
if (i3 == 0) {
i1 = n1;
} else {
for (i1=1; (i1 <= n1) && !flag ; ) { /* XXXX check for corruption */
sscanf(&(list[i1][13]),"%ld",&i4);
if (i4 < i3) {
flag = true;
} else {
i1++;
}
}
}
if ((i3 > 0) && ((flag) || (n1 == 0) || (n1 < MAX_HIGH_SCORES))) {
for (i2 = MAX_HIGH_SCORES-1; i2 >= i1 ; i2--) {
strcpy(list[i2+1], list[i2]);
}
user_name(o1);
format_top_score(list[i1], o1, i3, PM.diffic, PM.name,
PM.lev, PM.race, PM.tclass);
if (n1 < MAX_HIGH_SCORES) {
n1++;
}
max_score = n1;
players_line = i1;
flag = false;
write_top_scores(&f1, list, n1);
} else {
/* did not get a high score */
max_score = 20;
}
if (!close_top_scores(&f1)) {
prt("Error unlocking score file.",2,1);
prt("",3,1);
}
put_buffer("Username Points Diff Character name Level Race Class",1,1);
put_buffer("____________ ________ _ ________________________ __ __________ ________________",2,1);
i2 = 3;
if (max_score > n1) {
max_score = n1;
}
if (this_many > 0) {
if (this_many > MAX_HIGH_SCORES) {
max_score = MAX_HIGH_SCORES;
} else {
max_score = this_many;
}
}
for (i1 = 1; i1 <= max_score; i1++) {
/*insert_str(list[i1],chr(7),''); XXXX why? */
if (i1 == players_line) {
put_buffer_attr(list[i1],i2,1, A_REVERSE);
} else {
put_buffer(list[i1],i2,1);
}
if ((i1 != 1) && ((i1 % 20) == 0) && (i1 != max_score)) {
prt("[Press any key to continue, or <Control>-Z to exit]",
24,1);
ch = inkey();
switch (ch) {
case 3: case 25: case 26:
erase_line(24,1);
put_buffer(" ",23,13);
//.........这里部分代码省略.........
示例15: ffm_write_header
static int ffm_write_header(AVFormatContext *s)
{
FFMContext *ffm = s->priv_data;
AVStream *st;
ByteIOContext *pb = s->pb;
AVCodecContext *codec;
int bit_rate, i;
ffm->packet_size = FFM_PACKET_SIZE;
/* header */
put_le32(pb, MKTAG('F', 'F', 'M', '1'));
put_be32(pb, ffm->packet_size);
/* XXX: store write position in other file ? */
put_be64(pb, ffm->packet_size); /* current write position */
put_be32(pb, s->nb_streams);
bit_rate = 0;
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
bit_rate += st->codec->bit_rate;
}
put_be32(pb, bit_rate);
/* list of streams */
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
av_set_pts_info(st, 64, 1, 1000000);
codec = st->codec;
/* generic info */
put_be32(pb, codec->codec_id);
put_byte(pb, codec->codec_type);
put_be32(pb, codec->bit_rate);
put_be32(pb, st->quality);
put_be32(pb, codec->flags);
put_be32(pb, codec->flags2);
put_be32(pb, codec->debug);
/* specific info */
switch(codec->codec_type) {
case CODEC_TYPE_VIDEO:
put_be32(pb, codec->time_base.num);
put_be32(pb, codec->time_base.den);
put_be16(pb, codec->width);
put_be16(pb, codec->height);
put_be16(pb, codec->gop_size);
put_be32(pb, codec->pix_fmt);
put_byte(pb, codec->qmin);
put_byte(pb, codec->qmax);
put_byte(pb, codec->max_qdiff);
put_be16(pb, (int) (codec->qcompress * 10000.0));
put_be16(pb, (int) (codec->qblur * 10000.0));
put_be32(pb, codec->bit_rate_tolerance);
put_strz(pb, codec->rc_eq ? codec->rc_eq : "tex^qComp");
put_be32(pb, codec->rc_max_rate);
put_be32(pb, codec->rc_min_rate);
put_be32(pb, codec->rc_buffer_size);
put_be64(pb, av_dbl2int(codec->i_quant_factor));
put_be64(pb, av_dbl2int(codec->b_quant_factor));
put_be64(pb, av_dbl2int(codec->i_quant_offset));
put_be64(pb, av_dbl2int(codec->b_quant_offset));
put_be32(pb, codec->dct_algo);
put_be32(pb, codec->strict_std_compliance);
put_be32(pb, codec->max_b_frames);
put_be32(pb, codec->luma_elim_threshold);
put_be32(pb, codec->chroma_elim_threshold);
put_be32(pb, codec->mpeg_quant);
put_be32(pb, codec->intra_dc_precision);
put_be32(pb, codec->me_method);
put_be32(pb, codec->mb_decision);
put_be32(pb, codec->nsse_weight);
put_be32(pb, codec->frame_skip_cmp);
put_be64(pb, av_dbl2int(codec->rc_buffer_aggressivity));
put_be32(pb, codec->codec_tag);
put_byte(pb, codec->thread_count);
break;
case CODEC_TYPE_AUDIO:
put_be32(pb, codec->sample_rate);
put_le16(pb, codec->channels);
put_le16(pb, codec->frame_size);
break;
default:
return -1;
}
if (codec->flags & CODEC_FLAG_GLOBAL_HEADER) {
put_be32(pb, codec->extradata_size);
put_buffer(pb, codec->extradata, codec->extradata_size);
}
}
/* flush until end of block reached */
while ((url_ftell(pb) % ffm->packet_size) != 0)
put_byte(pb, 0);
put_flush_packet(pb);
/* init packet mux */
ffm->packet_ptr = ffm->packet;
ffm->packet_end = ffm->packet + ffm->packet_size - FFM_HEADER_SIZE;
assert(ffm->packet_end >= ffm->packet);
//.........这里部分代码省略.........