本文整理汇总了C++中NV_ENCODE_API_FUNCTION_LIST类的典型用法代码示例。如果您正苦于以下问题:C++ NV_ENCODE_API_FUNCTION_LIST类的具体用法?C++ NV_ENCODE_API_FUNCTION_LIST怎么用?C++ NV_ENCODE_API_FUNCTION_LIST使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了NV_ENCODE_API_FUNCTION_LIST类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: nvenc_check_codec_support
static int nvenc_check_codec_support(AVCodecContext *avctx)
{
NVENCContext *ctx = avctx->priv_data;
NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
int i, ret, count = 0;
GUID *guids = NULL;
ret = nv->nvEncGetEncodeGUIDCount(ctx->nvenc_ctx, &count);
if (ret != NV_ENC_SUCCESS || !count)
return AVERROR(ENOSYS);
guids = av_malloc(count * sizeof(GUID));
if (!guids)
return AVERROR(ENOMEM);
ret = nv->nvEncGetEncodeGUIDs(ctx->nvenc_ctx, guids, count, &count);
if (ret != NV_ENC_SUCCESS) {
ret = AVERROR(ENOSYS);
goto fail;
}
ret = AVERROR(ENOSYS);
for (i = 0; i < count; i++) {
if (!memcmp(&guids[i], &ctx->params.encodeGUID, sizeof(*guids))) {
ret = 0;
break;
}
}
fail:
av_free(guids);
return ret;
}
示例2: ff_nvenc_encode_close
av_cold int ff_nvenc_encode_close(AVCodecContext *avctx)
{
NVENCContext *ctx = avctx->priv_data;
NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
int i;
if (ctx->in) {
for (i = 0; i < ctx->nb_surfaces; ++i) {
nv->nvEncDestroyInputBuffer(ctx->nvenc_ctx, ctx->in[i].in);
nv->nvEncDestroyBitstreamBuffer(ctx->nvenc_ctx, ctx->out[i].out);
}
}
av_freep(&ctx->in);
av_freep(&ctx->out);
if (ctx->nvenc_ctx)
nv->nvEncDestroyEncoder(ctx->nvenc_ctx);
if (ctx->cu_context)
ctx->nvel.cu_ctx_destroy(ctx->cu_context);
if (ctx->nvel.nvenc)
dlclose(ctx->nvel.nvenc);
if (ctx->nvel.cuda)
dlclose(ctx->nvel.cuda);
return 0;
}
示例3: nvenc_get_frame
static int nvenc_get_frame(AVCodecContext *avctx, AVPacket *pkt)
{
NVENCContext *ctx = avctx->priv_data;
NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
NV_ENC_LOCK_BITSTREAM params = { 0 };
NVENCOutputSurface *out = NULL;
int ret;
ret = nvenc_dequeue_surface(ctx->pending, &out);
if (ret)
return ret;
params.version = NV_ENC_LOCK_BITSTREAM_VER;
params.outputBitstream = out->out;
ret = nv->nvEncLockBitstream(ctx->nvenc_ctx, ¶ms);
if (ret < 0)
return AVERROR_UNKNOWN;
ret = ff_alloc_packet(pkt, params.bitstreamSizeInBytes);
if (ret < 0)
return ret;
memcpy(pkt->data, params.bitstreamBufferPtr, pkt->size);
ret = nv->nvEncUnlockBitstream(ctx->nvenc_ctx, out->out);
if (ret < 0)
return AVERROR_UNKNOWN;
out->busy = out->in->locked = 0;
ret = nvenc_set_timestamp(ctx, ¶ms, pkt);
if (ret < 0)
return ret;
switch (params.pictureType) {
case NV_ENC_PIC_TYPE_IDR:
pkt->flags |= AV_PKT_FLAG_KEY;
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
case NV_ENC_PIC_TYPE_INTRA_REFRESH:
case NV_ENC_PIC_TYPE_I:
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
break;
case NV_ENC_PIC_TYPE_P:
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
break;
case NV_ENC_PIC_TYPE_B:
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;
break;
case NV_ENC_PIC_TYPE_BI:
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_BI;
break;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
}
return 0;
}
示例4: nvenc_alloc_surface
static int nvenc_alloc_surface(AVCodecContext *avctx, int idx)
{
NVENCContext *ctx = avctx->priv_data;
NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
int ret;
NV_ENC_CREATE_INPUT_BUFFER in_buffer = { 0 };
NV_ENC_CREATE_BITSTREAM_BUFFER out_buffer = { 0 };
in_buffer.version = NV_ENC_CREATE_INPUT_BUFFER_VER;
out_buffer.version = NV_ENC_CREATE_BITSTREAM_BUFFER_VER;
in_buffer.width = avctx->width;
in_buffer.height = avctx->height;
in_buffer.memoryHeap = NV_ENC_MEMORY_HEAP_SYSMEM_UNCACHED;
switch (avctx->pix_fmt) {
case AV_PIX_FMT_YUV420P:
in_buffer.bufferFmt = NV_ENC_BUFFER_FORMAT_YV12_PL;
break;
case AV_PIX_FMT_NV12:
in_buffer.bufferFmt = NV_ENC_BUFFER_FORMAT_NV12_PL;
break;
case AV_PIX_FMT_YUV444P:
in_buffer.bufferFmt = NV_ENC_BUFFER_FORMAT_YUV444_PL;
break;
default:
return AVERROR_BUG;
}
ret = nv->nvEncCreateInputBuffer(ctx->nvenc_ctx, &in_buffer);
if (ret != NV_ENC_SUCCESS) {
av_log(avctx, AV_LOG_ERROR, "CreateInputBuffer failed\n");
return AVERROR_UNKNOWN;
}
ctx->in[idx].in = in_buffer.inputBuffer;
ctx->in[idx].format = in_buffer.bufferFmt;
/* 1MB is large enough to hold most output frames.
* NVENC increases this automaticaly if it's not enough. */
out_buffer.size = BITSTREAM_BUFFER_SIZE;
out_buffer.memoryHeap = NV_ENC_MEMORY_HEAP_SYSMEM_UNCACHED;
ret = nv->nvEncCreateBitstreamBuffer(ctx->nvenc_ctx, &out_buffer);
if (ret != NV_ENC_SUCCESS) {
av_log(avctx, AV_LOG_ERROR, "CreateBitstreamBuffer failed\n");
return AVERROR_UNKNOWN;
}
ctx->out[idx].out = out_buffer.bitstreamBuffer;
ctx->out[idx].busy = 0;
return 0;
}
示例5: nvenc_check_cap
static int nvenc_check_cap(AVCodecContext *avctx, NV_ENC_CAPS cap)
{
NVENCContext *ctx = avctx->priv_data;
NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
NV_ENC_CAPS_PARAM params = { 0 };
int ret, val = 0;
params.version = NV_ENC_CAPS_PARAM_VER;
params.capsToQuery = cap;
ret = nv->nvEncGetEncodeCaps(ctx->nvenc_ctx, ctx->params.encodeGUID, ¶ms, &val);
if (ret == NV_ENC_SUCCESS)
return val;
return 0;
}
示例6: nvenc_enqueue_frame
static int nvenc_enqueue_frame(AVCodecContext *avctx, const AVFrame *frame,
NVENCInputSurface **in_surf)
{
NVENCContext *ctx = avctx->priv_data;
NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
NV_ENC_LOCK_INPUT_BUFFER params = { 0 };
NVENCInputSurface *in = get_input_surface(ctx);
int ret;
if (!in)
return AVERROR_BUG;
params.version = NV_ENC_LOCK_INPUT_BUFFER_VER;
params.inputBuffer = in->in;
ret = nv->nvEncLockInputBuffer(ctx->nvenc_ctx, ¶ms);
if (ret != NV_ENC_SUCCESS) {
av_log(avctx, AV_LOG_ERROR, "Cannot lock the buffer %p.\n",
in);
return AVERROR_UNKNOWN;
}
ret = nvenc_copy_frame(¶ms, frame);
if (ret < 0)
goto fail;
ret = nv->nvEncUnlockInputBuffer(ctx->nvenc_ctx, in->in);
if (ret != NV_ENC_SUCCESS) {
av_log(avctx, AV_LOG_ERROR, "Cannot unlock the buffer %p.\n",
in);
return AVERROR_UNKNOWN;
}
*in_surf = in;
return 0;
fail:
nv->nvEncUnlockInputBuffer(ctx->nvenc_ctx, in->in);
return ret;
}
示例7: nvenc_open_session
static int nvenc_open_session(AVCodecContext *avctx)
{
NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS params = { 0 };
NVENCContext *ctx = avctx->priv_data;
NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
int ret;
params.version = NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER;
params.apiVersion = NVENCAPI_VERSION;
params.device = ctx->cu_context;
params.deviceType = NV_ENC_DEVICE_TYPE_CUDA;
ret = nv->nvEncOpenEncodeSessionEx(¶ms, &ctx->nvenc_ctx);
if (ret != NV_ENC_SUCCESS) {
ctx->nvenc_ctx = NULL;
av_log(avctx, AV_LOG_ERROR,
"Cannot open the NVENC Session\n");
return AVERROR_UNKNOWN;
}
return 0;
}
示例8: nvenc_setup_extradata
static int nvenc_setup_extradata(AVCodecContext *avctx)
{
NVENCContext *ctx = avctx->priv_data;
NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
NV_ENC_SEQUENCE_PARAM_PAYLOAD payload = { 0 };
int ret;
avctx->extradata = av_mallocz(EXTRADATA_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
if (!avctx->extradata)
return AVERROR(ENOMEM);
payload.version = NV_ENC_SEQUENCE_PARAM_PAYLOAD_VER;
payload.spsppsBuffer = avctx->extradata;
payload.inBufferSize = EXTRADATA_SIZE;
payload.outSPSPPSPayloadSize = &avctx->extradata_size;
ret = nv->nvEncGetSequenceParams(ctx->nvenc_ctx, &payload);
if (ret != NV_ENC_SUCCESS) {
av_log(avctx, AV_LOG_ERROR, "Cannot get the extradata\n");
return AVERROR_UNKNOWN;
}
return 0;
}
示例9: nvenc_setup_encoder
static int nvenc_setup_encoder(AVCodecContext *avctx)
{
NVENCContext *ctx = avctx->priv_data;
NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
NV_ENC_PRESET_CONFIG preset_cfg = { 0 };
int ret;
ctx->params.version = NV_ENC_INITIALIZE_PARAMS_VER;
ctx->params.encodeHeight = avctx->height;
ctx->params.encodeWidth = avctx->width;
if (avctx->sample_aspect_ratio.num &&
avctx->sample_aspect_ratio.den &&
(avctx->sample_aspect_ratio.num != 1 ||
avctx->sample_aspect_ratio.den != 1)) {
av_reduce(&ctx->params.darWidth,
&ctx->params.darHeight,
avctx->width * avctx->sample_aspect_ratio.num,
avctx->height * avctx->sample_aspect_ratio.den,
INT_MAX / 8);
} else {
ctx->params.darHeight = avctx->height;
ctx->params.darWidth = avctx->width;
}
ctx->params.frameRateNum = avctx->time_base.den;
ctx->params.frameRateDen = avctx->time_base.num * avctx->ticks_per_frame;
ctx->params.enableEncodeAsync = 0;
ctx->params.enablePTD = 1;
ctx->params.encodeConfig = &ctx->config;
nvec_map_preset(ctx);
preset_cfg.version = NV_ENC_PRESET_CONFIG_VER;
preset_cfg.presetCfg.version = NV_ENC_CONFIG_VER;
ret = nv->nvEncGetEncodePresetConfig(ctx->nvenc_ctx,
ctx->params.encodeGUID,
ctx->params.presetGUID,
&preset_cfg);
if (ret != NV_ENC_SUCCESS) {
av_log(avctx, AV_LOG_ERROR,
"Cannot get the preset configuration\n");
return AVERROR_UNKNOWN;
}
memcpy(&ctx->config, &preset_cfg.presetCfg, sizeof(ctx->config));
ctx->config.version = NV_ENC_CONFIG_VER;
if (avctx->gop_size > 0) {
if (avctx->max_b_frames > 0) {
ctx->last_dts = -2;
/* 0 is intra-only,
* 1 is I/P only,
* 2 is one B Frame,
* 3 two B frames, and so on. */
ctx->config.frameIntervalP = avctx->max_b_frames + 1;
} else if (avctx->max_b_frames == 0) {
ctx->config.frameIntervalP = 1;
}
ctx->config.gopLength = avctx->gop_size;
} else if (avctx->gop_size == 0) {
ctx->config.frameIntervalP = 0;
ctx->config.gopLength = 1;
}
if (ctx->config.frameIntervalP > 1)
avctx->max_b_frames = ctx->config.frameIntervalP - 1;
nvenc_setup_rate_control(avctx);
if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
ctx->config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FIELD;
} else {
ctx->config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME;
}
if ((ret = nvenc_setup_codec_config(avctx)) < 0)
return ret;
ret = nv->nvEncInitializeEncoder(ctx->nvenc_ctx, &ctx->params);
if (ret != NV_ENC_SUCCESS) {
av_log(avctx, AV_LOG_ERROR, "Cannot initialize the decoder");
return AVERROR_UNKNOWN;
}
return 0;
}
示例10: ff_nvenc_encode_frame
int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *frame, int *got_packet)
{
NVENCContext *ctx = avctx->priv_data;
NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
NV_ENC_PIC_PARAMS params = { 0 };
NVENCInputSurface *in = NULL;
NVENCOutputSurface *out = NULL;
int ret;
params.version = NV_ENC_PIC_PARAMS_VER;
if (frame) {
ret = nvenc_enqueue_frame(avctx, frame, &in);
if (ret < 0)
return ret;
out = get_output_surface(ctx);
if (!out)
return AVERROR_BUG;
out->in = in;
params.inputBuffer = in->in;
params.bufferFmt = in->format;
params.inputWidth = frame->width;
params.inputHeight = frame->height;
params.outputBitstream = out->out;
params.inputTimeStamp = frame->pts;
if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
if (frame->top_field_first)
params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_TOP_BOTTOM;
else
params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_BOTTOM_TOP;
} else {
params.pictureStruct = NV_ENC_PIC_STRUCT_FRAME;
}
nvenc_codec_specific_pic_params(avctx, ¶ms);
ret = nvenc_enqueue_timestamp(ctx->timestamps, frame->pts);
if (ret < 0)
return ret;
} else {
params.encodePicFlags = NV_ENC_PIC_FLAG_EOS;
}
ret = nv->nvEncEncodePicture(ctx->nvenc_ctx, ¶ms);
if (ret != NV_ENC_SUCCESS &&
ret != NV_ENC_ERR_NEED_MORE_INPUT) {
return AVERROR_UNKNOWN;
}
if (out) {
ret = nvenc_enqueue_surface(ctx->pending, out);
if (ret < 0)
return ret;
}
if (ret != NV_ENC_ERR_NEED_MORE_INPUT &&
av_fifo_size(ctx->pending)) {
ret = nvenc_get_frame(avctx, pkt);
if (ret < 0)
return ret;
*got_packet = 1;
} else {
*got_packet = 0;
}
return 0;
}
示例11: memcpy
bool fcH264EncoderNVIDIA::encode(fcH264Frame& dst, const void *image, fcPixelFormat fmt, fcTime timestamp, bool force_keyframe)
{
if (!isValid()) { return false; }
dst.timestamp = timestamp;
// convert image to NV12
AnyToNV12(m_nv12_image, m_rgba_image, image, fmt, m_conf.width, m_conf.height);
NV12Data data = m_nv12_image.data();
NVENCSTATUS stat;
// upload image to input buffer
{
NV_ENC_LOCK_INPUT_BUFFER lock_params = { 0 };
lock_params.version = NV_ENC_LOCK_INPUT_BUFFER_VER;
lock_params.inputBuffer = m_input.inputBuffer;
stat = nvenc.nvEncLockInputBuffer(m_encoder, &lock_params);
memcpy(lock_params.bufferDataPtr, data.y, m_nv12_image.size());
stat = nvenc.nvEncUnlockInputBuffer(m_encoder, m_input.inputBuffer);
}
NV_ENC_PIC_PARAMS params = { 0 };
params.version = NV_ENC_PIC_PARAMS_VER;
params.inputBuffer = m_input.inputBuffer;
params.outputBitstream = m_output.bitstreamBuffer;
params.bufferFmt = NV_ENC_BUFFER_FORMAT_NV12;
params.inputWidth = m_conf.width;
params.inputHeight = m_conf.height;
params.completionEvent = 0;
params.pictureStruct = NV_ENC_PIC_STRUCT_FRAME;
params.encodePicFlags = 0;
if (force_keyframe) {
params.encodePicFlags |= NV_ENC_PIC_FLAG_FORCEINTRA;
}
params.inputTimeStamp = to_usec(timestamp);
params.inputDuration = to_usec(1.0 / m_conf.target_framerate);
// encode!
stat = nvenc.nvEncEncodePicture(m_encoder, ¶ms);
// retrieve encoded data
{
NV_ENC_LOCK_BITSTREAM lock_params = { 0 };
lock_params.version = NV_ENC_LOCK_BITSTREAM_VER;
lock_params.outputBitstream = m_output.bitstreamBuffer;
stat = nvenc.nvEncLockBitstream(m_encoder, &lock_params);
dst.data.append((char*)lock_params.bitstreamBufferPtr, lock_params.bitstreamSizeInBytes);
dst.gatherNALInformation();
stat = nvenc.nvEncUnlockBitstream(m_encoder, m_output.bitstreamBuffer);
}
return true;
}
示例12:
fcH264EncoderNVIDIA::~fcH264EncoderNVIDIA()
{
if (m_encoder) {
if (m_input.inputBuffer) {
nvenc.nvEncDestroyInputBuffer(m_encoder, m_input.inputBuffer);
}
if (m_output.bitstreamBuffer) {
nvenc.nvEncDestroyBitstreamBuffer(m_encoder, m_output.bitstreamBuffer);
}
nvenc.nvEncDestroyEncoder(m_encoder);
}
}
示例13: memset
fcH264EncoderNVIDIA::fcH264EncoderNVIDIA(const fcH264EncoderConfig& conf, void *device, fcHWEncoderDeviceType type)
: m_conf(conf)
{
if (!LoadNVENCModule()) { return; }
NVENCSTATUS stat;
{
NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS params;
memset(¶ms, 0, sizeof(params));
params.version = NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER;
params.apiVersion = NVENCAPI_VERSION;
params.device = device;
switch (type) {
case fcHWEncoderDeviceType::D3D9:
case fcHWEncoderDeviceType::D3D10:
case fcHWEncoderDeviceType::D3D11:
case fcHWEncoderDeviceType::D3D12:
params.deviceType = NV_ENC_DEVICE_TYPE_DIRECTX;
break;
case fcHWEncoderDeviceType::CUDA:
params.deviceType = NV_ENC_DEVICE_TYPE_CUDA;
break;
}
stat = nvenc.nvEncOpenEncodeSessionEx(¶ms, &m_encoder);
if (!m_encoder) {
return;
}
}
std::vector<GUID> encode_guilds;
std::vector<GUID> profile_guilds;
std::vector<GUID> preset_guilds;
{
uint32_t num_encode_guilds = 0;
uint32_t num_profile_guilds = 0;
uint32_t num_preset_guilds = 0;
nvenc.nvEncGetEncodeGUIDCount(m_encoder, &num_encode_guilds);
encode_guilds.resize(num_encode_guilds);
nvenc.nvEncGetEncodeGUIDs(m_encoder, encode_guilds.data(), num_encode_guilds, &num_encode_guilds);
nvenc.nvEncGetEncodeProfileGUIDCount(m_encoder, NV_ENC_CODEC_H264_GUID, &num_profile_guilds);
profile_guilds.resize(num_profile_guilds);
nvenc.nvEncGetEncodeProfileGUIDs(m_encoder, NV_ENC_CODEC_H264_GUID, profile_guilds.data(), num_profile_guilds, &num_profile_guilds);
nvenc.nvEncGetEncodePresetCount(m_encoder, NV_ENC_CODEC_H264_GUID, &num_preset_guilds);
preset_guilds.resize(num_preset_guilds);
nvenc.nvEncGetEncodePresetGUIDs(m_encoder, NV_ENC_CODEC_H264_GUID, preset_guilds.data(), num_preset_guilds, &num_preset_guilds);
}
{
NV_ENC_INITIALIZE_PARAMS params = { 0 };
params.version = NV_ENC_INITIALIZE_PARAMS_VER;
params.encodeGUID = NV_ENC_CODEC_H264_GUID;
params.presetGUID = NV_ENC_PRESET_DEFAULT_GUID;
params.encodeWidth = conf.width;
params.encodeHeight = conf.height;
params.darWidth = conf.width;
params.darHeight = conf.height;
params.frameRateNum = m_conf.target_framerate;
params.frameRateDen = 1;
params.enablePTD = 1;
NV_ENC_PRESET_CONFIG preset_config = { 0 };
preset_config.version = NV_ENC_PRESET_CONFIG_VER;
preset_config.presetCfg.version = NV_ENC_CONFIG_VER;
stat = nvenc.nvEncGetEncodePresetConfig(m_encoder, params.encodeGUID, params.presetGUID, &preset_config);
NV_ENC_CONFIG encode_config = { 0 };
encode_config.version = NV_ENC_CONFIG_VER;
memcpy(&encode_config, &preset_config.presetCfg, sizeof(NV_ENC_CONFIG));
encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_GUID;
params.encodeConfig = &encode_config;
stat = nvenc.nvEncInitializeEncoder(m_encoder, ¶ms);
}
{
memset(&m_input, 0, sizeof(m_input));
m_input.version = NV_ENC_CREATE_INPUT_BUFFER_VER;
m_input.width = m_conf.width;
m_input.height = m_conf.height;
m_input.bufferFmt = NV_ENC_BUFFER_FORMAT_NV12;
stat = nvenc.nvEncCreateInputBuffer(m_encoder, &m_input);
}
{
memset(&m_output, 0, sizeof(m_output));
m_output.version = NV_ENC_CREATE_BITSTREAM_BUFFER_VER;
m_output.size = roundup<16>(conf.width) * roundup<16>(conf.height) * 2;
stat = nvenc.nvEncCreateBitstreamBuffer(m_encoder, &m_output);
}
}