本文整理汇总了C++中put_be16函数的典型用法代码示例。如果您正苦于以下问题:C++ put_be16函数的具体用法?C++ put_be16怎么用?C++ put_be16使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了put_be16函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: flush_packet
static void flush_packet(AVFormatContext *s)
{
FFMContext *ffm = s->priv_data;
int fill_size, h;
ByteIOContext *pb = s->pb;
fill_size = ffm->packet_end - ffm->packet_ptr;
memset(ffm->packet_ptr, 0, fill_size);
if (url_ftell(pb) % ffm->packet_size)
av_abort();
/* put header */
put_be16(pb, PACKET_ID);
put_be16(pb, fill_size);
put_be64(pb, ffm->dts);
h = ffm->frame_offset;
if (ffm->first_packet)
h |= 0x8000;
put_be16(pb, h);
put_buffer(pb, ffm->packet, ffm->packet_end - ffm->packet);
put_flush_packet(pb);
/* prepare next packet */
ffm->frame_offset = 0; /* no key frame */
ffm->packet_ptr = ffm->packet;
ffm->first_packet = 0;
}
示例2: write_sgi_header
static void write_sgi_header(ByteIOContext *f, const SGIInfo *info)
{
int i;
put_be16(f, SGI_MAGIC);
put_byte(f, info->rle);
put_byte(f, info->bytes_per_channel);
put_be16(f, info->dimension);
put_be16(f, info->xsize);
put_be16(f, info->ysize);
put_be16(f, info->zsize);
/* The rest are constant in this implementation */
put_be32(f, 0L); /* pixmin */
put_be32(f, 255L); /* pixmax */
put_be32(f, 0L); /* dummy */
/* name */
for (i = 0; i < 80; i++) {
put_byte(f, 0);
}
put_be32(f, 0L); /* colormap */
/* The rest of the 512 byte header is unused. */
for (i = 0; i < 404; i++) {
put_byte(f, 0);
}
}
示例3: gxf_write_media_preamble
static int gxf_write_media_preamble(ByteIOContext *pb, GXFContext *ctx, AVPacket *pkt, int size)
{
GXFStreamContext *sc = &ctx->streams[pkt->stream_index];
int64_t dts = av_rescale_rnd(pkt->dts, ctx->sample_rate, sc->codec->time_base.den, AV_ROUND_UP);
put_byte(pb, sc->media_type);
put_byte(pb, sc->index);
put_be32(pb, dts);
if (sc->codec->codec_type == CODEC_TYPE_AUDIO) {
put_be16(pb, 0);
put_be16(pb, size / 2);
} else if (sc->codec->codec_id == CODEC_ID_MPEG2VIDEO) {
int frame_type = gxf_parse_mpeg_frame(sc, pkt->data, pkt->size);
if (frame_type == FF_I_TYPE) {
put_byte(pb, 0x0d);
sc->iframes++;
} else if (frame_type == FF_B_TYPE) {
put_byte(pb, 0x0f);
sc->bframes++;
} else {
put_byte(pb, 0x0e);
sc->pframes++;
}
put_be24(pb, size);
} else if (sc->codec->codec_id == CODEC_ID_DVVIDEO) {
put_byte(pb, size / 4096);
put_be24(pb, 0);
} else
put_be32(pb, size);
put_be32(pb, dts);
put_byte(pb, 1); /* flags */
put_byte(pb, 0); /* reserved */
return 16;
}
示例4: free_eraseblk
static bool free_eraseblk(eraseblock& eb)
{
if ( eb.e_type == eraseblock_type::dentry_inode
|| eb.e_type == eraseblock_type::dentry_clin
|| eb.e_type == eraseblock_type::file_inode
|| eb.e_type == eraseblock_type::file_clin)
{
// The given erase block contains inodes or indirect pointers
// and therefore tracks it's valid cluster count.
// Set it to "free" if it doesn't contain any valid clusters.
if (get_be16(eb.e_cvalid) == 0)
{
eb.e_type = eraseblock_type::empty;
eb.e_lastwrite = put_be16(0);
eb.e_writeops = put_be16(0);
return true;
}
}
else if (eb.e_type == eraseblock_type::ebin)
{
// TODO: implement me!
log().error("Unable to free ebin erase block {}", eb);
}
// the given erase block cannot be freed
return false;
}
示例5: close_eraseblks
void close_eraseblks(fs_context& fs)
{
/* TODO: Error handling missing! */
for (eb_id_t eb_id = 1; eb_id < fs.neraseblocks; ++eb_id)
{
if (fs.eb_usage[eb_id].e_type == eraseblock_type::ebin)
continue; /* can never be "open" */
if (fs.eb_usage[eb_id].e_type == eraseblock_type::empty)
continue; /* can never be "open" */
eraseblock_type eb_type = fs.eb_usage[eb_id].e_type;
unsigned int writeops = get_be16(fs.eb_usage[eb_id].e_writeops);
unsigned int max_writeops = fs.erasesize / fs.clustersize;
if (writeops == max_writeops)
continue; /* erase block is already finalized/closed */
fs.eb_usage[eb_id].e_writeops = put_be16(max_writeops);
if (!summary_required(fs, eb_type))
continue;
summary* eb_summary = summary_get(*fs.summary_cache, eb_type);
summary_write(fs, eb_summary, eb_id);
summary_close(*fs.summary_cache, eb_summary);
/* tell gcinfo an erase block of a specific type was written */
unsigned int write_time = gcinfo_update_writetime(fs, eb_type);
fs.eb_usage[eb_id].e_lastwrite = put_be16(write_time);
}
}
示例6: ff_isom_write_avcc
int ff_isom_write_avcc(ByteIOContext *pb, const uint8_t *data, int len)
{
if (len > 6) {
/* check for h264 start code */
if (AV_RB32(data) == 0x00000001 ||
AV_RB24(data) == 0x000001) {
uint8_t *buf=NULL, *end, *start;
uint32_t sps_size=0, pps_size=0;
uint8_t *sps=0, *pps=0;
int ret = ff_avc_parse_nal_units_buf(data, &buf, &len);
if (ret < 0)
return ret;
start = buf;
end = buf + len;
/* look for sps and pps */
while (buf < end) {
unsigned int size;
uint8_t nal_type;
size = AV_RB32(buf);
nal_type = buf[4] & 0x1f;
if (nal_type == 7) { /* SPS */
sps = buf + 4;
sps_size = size;
} else if (nal_type == 8) { /* PPS */
pps = buf + 4;
pps_size = size;
}
buf += size + 4;
}
if ( !sps || !pps )
{
av_log( 0, AV_LOG_ERROR, "thinkingl Can't parse h264 nal!!!\n" );
}
assert(sps);
assert(pps);
put_byte(pb, 1); /* version */
put_byte(pb, sps[1]); /* profile */
put_byte(pb, sps[2]); /* profile compat */
put_byte(pb, sps[3]); /* level */
put_byte(pb, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 1 (11) */
put_byte(pb, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
put_be16(pb, sps_size);
put_buffer(pb, sps, sps_size);
put_byte(pb, 1); /* number of pps */
put_be16(pb, pps_size);
put_buffer(pb, pps, pps_size);
av_free(start);
} else {
put_buffer(pb, data, len);
}
}
return 0;
}
示例7: daud_write_packet
static int daud_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
put_be16(s->pb, pkt->size);
put_be16(s->pb, 0x8010); // unknown
put_buffer(s->pb, pkt->data, pkt->size);
put_flush_packet(s->pb);
return 0;
}
示例8: gxf_write_track_description
static int gxf_write_track_description(AVFormatContext *s, GXFStreamContext *sc, int index)
{
ByteIOContext *pb = s->pb;
int64_t pos;
int mpeg = sc->track_type == 4 || sc->track_type == 9;
/* track description section */
put_byte(pb, sc->media_type + 0x80);
put_byte(pb, index + 0xC0);
pos = url_ftell(pb);
put_be16(pb, 0); /* size */
/* media file name */
put_byte(pb, TRACK_NAME);
put_byte(pb, strlen(ES_NAME_PATTERN) + 3);
put_tag(pb, ES_NAME_PATTERN);
put_be16(pb, sc->media_info);
put_byte(pb, 0);
if (!mpeg) {
/* auxiliary information */
put_byte(pb, TRACK_AUX);
put_byte(pb, 8);
if (sc->track_type == 3)
gxf_write_timecode_auxiliary(pb, sc);
else
put_le64(pb, 0);
}
/* file system version */
put_byte(pb, TRACK_VER);
put_byte(pb, 4);
put_be32(pb, 0);
if (mpeg)
gxf_write_mpeg_auxiliary(pb, s->streams[index]);
/* frame rate */
put_byte(pb, TRACK_FPS);
put_byte(pb, 4);
put_be32(pb, sc->frame_rate_index);
/* lines per frame */
put_byte(pb, TRACK_LINES);
put_byte(pb, 4);
put_be32(pb, sc->lines_index);
/* fields per frame */
put_byte(pb, TRACK_FPF);
put_byte(pb, 4);
put_be32(pb, sc->fields);
return updateSize(pb, pos);
}
示例9: gxf_write_track_description
static int gxf_write_track_description(ByteIOContext *pb, GXFStreamContext *stream)
{
int64_t pos;
/* track description section */
put_byte(pb, stream->media_type + 0x80);
put_byte(pb, stream->index + 0xC0);
pos = url_ftell(pb);
put_be16(pb, 0); /* size */
/* media file name */
put_byte(pb, TRACK_NAME);
put_byte(pb, strlen(ES_NAME_PATTERN) + 3);
put_tag(pb, ES_NAME_PATTERN);
put_be16(pb, stream->media_info);
put_byte(pb, 0);
if (stream->codec->codec_id != CODEC_ID_MPEG2VIDEO) {
/* auxiliary information */
put_byte(pb, TRACK_AUX);
put_byte(pb, 8);
if (stream->codec->codec_id == CODEC_ID_NONE)
gxf_write_timecode_auxiliary(pb, stream);
else
put_le64(pb, 0);
}
/* file system version */
put_byte(pb, TRACK_VER);
put_byte(pb, 4);
put_be32(pb, 0);
if (stream->codec->codec_id == CODEC_ID_MPEG2VIDEO)
gxf_write_mpeg_auxiliary(pb, stream);
/* frame rate */
put_byte(pb, TRACK_FPS);
put_byte(pb, 4);
put_be32(pb, stream->frame_rate_index);
/* lines per frame */
put_byte(pb, TRACK_LINES);
put_byte(pb, 4);
put_be32(pb, stream->lines_index);
/* fields per frame */
put_byte(pb, TRACK_FPF);
put_byte(pb, 4);
put_be32(pb, stream->fields);
return updateSize(pb, pos);
}
示例10: gxf_write_umf_media_description
static int gxf_write_umf_media_description(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
ByteIOContext *pb = s->pb;
int64_t pos;
int i, j;
pos = url_ftell(pb);
gxf->umf_media_offset = pos - gxf->umf_start_offset;
for (i = 0; i <= s->nb_streams; ++i) {
GXFStreamContext *sc;
int64_t startpos, curpos;
if (i == s->nb_streams)
sc = &gxf->timecode_track;
else
sc = s->streams[i]->priv_data;
startpos = url_ftell(pb);
put_le16(pb, 0); /* length */
put_le16(pb, sc->media_info);
put_le16(pb, 0); /* reserved */
put_le16(pb, 0); /* reserved */
put_le32(pb, gxf->nb_fields);
put_le32(pb, 0); /* attributes rw, ro */
put_le32(pb, 0); /* mark in */
put_le32(pb, gxf->nb_fields); /* mark out */
put_buffer(pb, ES_NAME_PATTERN, strlen(ES_NAME_PATTERN));
put_be16(pb, sc->media_info);
for (j = strlen(ES_NAME_PATTERN)+2; j < 88; j++)
put_byte(pb, 0);
put_le32(pb, sc->track_type);
put_le32(pb, sc->sample_rate);
put_le32(pb, sc->sample_size);
put_le32(pb, 0); /* reserved */
if (sc == &gxf->timecode_track)
gxf_write_umf_media_timecode(pb, sc); /* 8 0bytes */
else {
AVStream *st = s->streams[i];
switch (st->codec->codec_id) {
case CODEC_ID_MPEG2VIDEO:
gxf_write_umf_media_mpeg(pb, st);
break;
case CODEC_ID_PCM_S16LE:
gxf_write_umf_media_audio(pb, sc);
break;
case CODEC_ID_DVVIDEO:
gxf_write_umf_media_dv(pb, sc);
break;
}
}
curpos = url_ftell(pb);
url_fseek(pb, startpos, SEEK_SET);
put_le16(pb, curpos - startpos);
url_fseek(pb, curpos, SEEK_SET);
}
return url_ftell(pb) - pos;
}
示例11: rtcp_send_sr
/* send an rtcp sender report packet */
static void rtcp_send_sr(AVFormatContext *s1, int64_t ntp_time)
{
RTPMuxContext *s = s1->priv_data;
uint32_t rtp_ts;
#ifdef _MSC_VER
AVRational rational = {1, 1000000};
#endif
dprintf(s1, "RTCP: %02x %"PRIx64" %x\n", s->payload_type, ntp_time, s->timestamp);
s->last_rtcp_ntp_time = ntp_time;
#ifdef _MSC_VER
rtp_ts = av_rescale_q(ntp_time - s->first_rtcp_ntp_time, rational,
s1->streams[0]->time_base) + s->base_timestamp;
#else
rtp_ts = av_rescale_q(ntp_time - s->first_rtcp_ntp_time, (AVRational){1, 1000000},
s1->streams[0]->time_base) + s->base_timestamp;
#endif
put_byte(s1->pb, (RTP_VERSION << 6));
put_byte(s1->pb, RTCP_SR);
put_be16(s1->pb, 6); /* length in words - 1 */
put_be32(s1->pb, s->ssrc);
put_be32(s1->pb, ntp_time / 1000000);
put_be32(s1->pb, ((ntp_time % 1000000) << 32) / 1000000);
put_be32(s1->pb, rtp_ts);
put_be32(s1->pb, s->packet_count);
put_be32(s1->pb, s->octet_count);
put_flush_packet(s1->pb);
}
示例12: rso_write_trailer
static int rso_write_trailer(AVFormatContext *s)
{
ByteIOContext *pb = s->pb;
int64_t file_size;
uint16_t coded_file_size;
file_size = url_ftell(pb);
if (file_size < 0)
return file_size;
if (file_size > 0xffff + RSO_HEADER_SIZE) {
av_log(s, AV_LOG_WARNING,
"Output file is too big (%"PRId64" bytes >= 64kB)\n", file_size);
coded_file_size = 0xffff;
} else {
coded_file_size = file_size - RSO_HEADER_SIZE;
}
/* update file size */
url_fseek(pb, 2, SEEK_SET);
put_be16(pb, coded_file_size);
url_fseek(pb, file_size, SEEK_SET);
put_flush_packet(pb);
return 0;
}
示例13: put_str
static void put_str(ByteIOContext *s, const char *tag)
{
put_be16(s,strlen(tag));
while (*tag) {
put_byte(s, *tag++);
}
}
示例14: put_string
void
RTMPSession::sendConnectPacket()
{
RTMPChunk_0 metadata = {{0}};
metadata.msg_stream_id = kControlChannelStreamId;
metadata.msg_type_id = RTMP_PT_INVOKE;
std::vector<uint8_t> buff;
std::stringstream url ;
if(m_uri.port > 0) {
url << m_uri.protocol << "://" << m_uri.host << ":" << m_uri.port << "/" << m_app;
} else {
url << m_uri.protocol << "://" << m_uri.host << "/" << m_app;
}
put_string(buff, "connect");
put_double(buff, ++m_numberOfInvokes);
m_trackedCommands[m_numberOfInvokes] = "connect";
put_byte(buff, kAMFObject);
put_named_string(buff, "app", m_app.c_str());
put_named_string(buff,"type", "nonprivate");
put_named_string(buff, "tcUrl", url.str().c_str());
put_named_bool(buff, "fpad", false);
put_named_double(buff, "capabilities", 15.);
put_named_double(buff, "audioCodecs", 10. );
put_named_double(buff, "videoCodecs", 7.);
put_named_double(buff, "videoFunction", 1.);
put_be16(buff, 0);
put_byte(buff, kAMFObjectEnd);
metadata.msg_length.data = static_cast<int>( buff.size() );
sendPacket(&buff[0], buff.size(), metadata);
}
示例15: sap_connect_rsp
int sap_connect_rsp(void *sap_device, uint8_t status)
{
struct sap_server *server = sap_device;
struct sap_connection *conn = server->conn;
char buf[SAP_BUF_SIZE];
struct sap_message *msg = (struct sap_message *) buf;
struct sap_parameter *param = (struct sap_parameter *) msg->param;
size_t size = sizeof(struct sap_message);
if (!conn)
return -EINVAL;
DBG("state %d pr 0x%02x status 0x%02x", conn->state,
conn->processing_req, status);
if (conn->state != SAP_STATE_CONNECT_IN_PROGRESS)
return -EPERM;
memset(buf, 0, sizeof(buf));
msg->id = SAP_CONNECT_RESP;
msg->nparam = 0x01;
/* Add connection status */
param->id = SAP_PARAM_ID_CONN_STATUS;
param->len = htons(SAP_PARAM_ID_CONN_STATUS_LEN);
*param->val = status;
size += PARAMETER_SIZE(SAP_PARAM_ID_CONN_STATUS_LEN);
switch (status) {
case SAP_STATUS_OK:
sap_set_connected(server);
break;
case SAP_STATUS_OK_ONGOING_CALL:
DBG("ongoing call. Wait for reset indication!");
conn->state = SAP_STATE_CONNECT_MODEM_BUSY;
break;
case SAP_STATUS_MAX_MSG_SIZE_NOT_SUPPORTED: /* Add MaxMsgSize */
msg->nparam++;
param = (struct sap_parameter *) &buf[size];
param->id = SAP_PARAM_ID_MAX_MSG_SIZE;
param->len = htons(SAP_PARAM_ID_MAX_MSG_SIZE_LEN);
put_be16(SAP_BUF_SIZE, ¶m->val);
size += PARAMETER_SIZE(SAP_PARAM_ID_MAX_MSG_SIZE_LEN);
/* fall */
default:
conn->state = SAP_STATE_DISCONNECTED;
/* Timer will shutdown channel if client doesn't send
* CONNECT_REQ or doesn't shutdown channel itself.*/
start_guard_timer(server, SAP_TIMER_NO_ACTIVITY);
break;
}
conn->processing_req = SAP_NO_REQ;
return send_message(conn, buf, size);
}