当前位置: 首页>>代码示例>>C++>>正文


C++ VideoInfo类代码示例

本文整理汇总了C++中VideoInfo的典型用法代码示例。如果您正苦于以下问题:C++ VideoInfo类的具体用法?C++ VideoInfo怎么用?C++ VideoInfo使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了VideoInfo类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: LoadVideo

void YouTubeWebPageView::LoadVideo(const VideoInfo& info)
{
   m_bLoaded = false;

   ATLTRACE(_T("%08x: LoadVideo: \"%s\" (%s)\n"),
      m_hWnd,
      info.Name(),
      info.Address());

   m_iLastError = 0;

   CString cszHtml = GetHtmlTemplate();

   cszHtml.Replace(_T("{..$address..}"), info.Address());
   cszHtml.Replace(_T("{..$name..}"), info.Name());
   cszHtml.Replace(_T("\r\n"), _T("\n"));

   // generate temp name
   CString cszTempFolder;
   GetTempPath(MAX_PATH, cszTempFolder.GetBuffer(MAX_PATH));
   cszTempFolder.ReleaseBuffer();

   CString cszFilename;
   GetTempFileName(cszTempFolder, _T("YTP"), 0, cszFilename.GetBuffer(MAX_PATH));
   cszFilename.ReleaseBuffer();

   // write to temporary file
   {
      FILE* fd = NULL;
	  errno_t err = _tfopen_s(&fd, cszFilename, _T("wt"));
	  ATLVERIFY(err == 0 && fd != NULL);

      USES_CONVERSION;
      fprintf(fd, T2CA(cszHtml));
      fclose(fd);
   }

   // navigate to page
   CComPtr<IWebBrowser2> spWebBrowser2;
   HRESULT hr = GetWebBrowser2(spWebBrowser2);
   if (SUCCEEDED(hr))
   {
      CString cszURL = _T("file:///") + cszFilename;
      cszURL.Replace(_T("\\"), _T("/"));

      CComBSTR bstrURL = cszURL;

      CComVariant varFlags(static_cast<int>(navNoHistory | navNoWriteToCache));

      hr = spWebBrowser2->Navigate(bstrURL, &varFlags, NULL, NULL, NULL);
   }

   if (!m_cszHtmlFilename.IsEmpty())
      DeleteFile(m_cszHtmlFilename);
   m_cszHtmlFilename = cszFilename;
}
开发者ID:vividos,项目名称:PartyVideoDJ,代码行数:56,代码来源:YouTubeWebPageView.cpp

示例2: allocSpace

bool PlanarFrame::allocSpace(VideoInfo &viInfo)
{
	myalignedfree(planar_4);
	myalignedfree(planar_3);
	myalignedfree(planar_2);
	myalignedfree(planar_1);
	alloc_ok=false;

	grey = viInfo.IsY();
	isRGBPfamily = viInfo.IsPlanarRGB() || viInfo.IsPlanarRGBA();
	isAlphaChannel = viInfo.IsYUVA() || viInfo.IsPlanarRGBA();
	pixelsize = (uint8_t)viInfo.ComponentSize(); // AVS16
	bits_per_pixel = (uint8_t)viInfo.BitsPerComponent();
	
	ypitch = uvpitch = 0;
	ywidth = uvwidth = 0;
	yheight = uvheight = 0;

	int height = viInfo.height;
	int width = viInfo.width;
	if ((height==0) || (width==0)) return(false);
	if (viInfo.Is420())
	{
		ypitch = modnpf((int)pixelsize*(width+MIN_PAD),MIN_ALIGNMENT);
		ywidth = width;
		yheight = height;
		width >>= 1;
		height >>= 1;
		uvpitch = modnpf((int)pixelsize*(width+MIN_PAD),MIN_ALIGNMENT);
		uvwidth = width;
		uvheight = height;
	}
开发者ID:,项目名称:,代码行数:32,代码来源:

示例3: allocSpace

bool PlanarFrame::allocSpace(VideoInfo &viInfo)
{
	if (y != NULL) { _aligned_free(y); y = NULL; }
	if (u != NULL) { _aligned_free(u); u = NULL; }
	if (v != NULL) { _aligned_free(v); v = NULL; }
	int height = viInfo.height;
	int width = viInfo.width;
	if (viInfo.IsYV12())
	{
		ypitch = modnpf(width+MIN_PAD,MIN_ALIGNMENT);
		ywidth = width;
		yheight = height;
		width >>= 1;
		height >>= 1;
		uvpitch = modnpf(width+MIN_PAD,MIN_ALIGNMENT);
		uvwidth = width;
		uvheight = height;
		y = (unsigned char*)_aligned_malloc(ypitch*yheight,MIN_ALIGNMENT);
		if (y == NULL) return false;
		u = (unsigned char*)_aligned_malloc(uvpitch*uvheight,MIN_ALIGNMENT);
		if (u == NULL) return false;
		v = (unsigned char*)_aligned_malloc(uvpitch*uvheight,MIN_ALIGNMENT);
		if (v == NULL) return false;
		return true;
	}
开发者ID:Rodeo314,项目名称:nnedi3,代码行数:25,代码来源:PlanarFrame.cpp

示例4: Redraw

void VideoSink::Redraw(const VideoInfo& aInfo) {
  AssertOwnerThread();

  // No video track, nothing to draw.
  if (!aInfo.IsValid() || !mContainer) {
    return;
  }

  auto now = TimeStamp::Now();

  RefPtr<VideoData> video = VideoQueue().PeekFront();
  if (video) {
    if (mBlankImage) {
      video->mImage = mBlankImage;
    }
    video->MarkSentToCompositor();
    mContainer->SetCurrentFrame(video->mDisplay, video->mImage, now);
    if (mSecondaryContainer) {
      mSecondaryContainer->SetCurrentFrame(video->mDisplay, video->mImage, now);
    }
    return;
  }

  // When we reach here, it means there are no frames in this video track.
  // Draw a blank frame to ensure there is something in the image container
  // to fire 'loadeddata'.

  RefPtr<Image> blank =
      mContainer->GetImageContainer()->CreatePlanarYCbCrImage();
  mContainer->SetCurrentFrame(aInfo.mDisplay, blank, now);

  if (mSecondaryContainer) {
    mSecondaryContainer->SetCurrentFrame(aInfo.mDisplay, blank, now);
  }
}
开发者ID:jasonLaster,项目名称:gecko-dev,代码行数:35,代码来源:VideoSink.cpp

示例5: GetFrame

/////////////
// Get frame
PVideoFrame __stdcall DrawPRS::GetFrame(int n, IScriptEnvironment* env) {
    // Avisynth frame
    PVideoFrame avsFrame = child->GetFrame(n,env);

    try {
        // Check if there is anything to be drawn
        if (file.HasDataAtFrame(n)) {
            // Create the PRSFrame structure
            env->MakeWritable(&avsFrame);
            PRSVideoFrame frame;
            frame.data[0] = (char*) avsFrame->GetWritePtr();
            frame.w = avsFrame->GetRowSize()/4;
            frame.h = avsFrame->GetHeight();
            frame.pitch = avsFrame->GetPitch();
            frame.flipColors = true;
            frame.flipVertical = true;

            // Set colorspace
            VideoInfo vi = child->GetVideoInfo();
            if (vi.IsYV12()) frame.colorSpace = ColorSpace_YV12;
            else if (vi.IsYUY2()) frame.colorSpace = ColorSpace_YUY2;
            else if (vi.IsRGB32()) frame.colorSpace = ColorSpace_RGB32;
            else if (vi.IsRGB24()) frame.colorSpace = ColorSpace_RGB24;

            // Draw into the frame
            file.DrawFrame(n,&frame);
        }
    }

    // Catch exception
    catch (const std::exception &e) {
        env->ThrowError(e.what());
    }

    // Return frame
    return avsFrame;
}
开发者ID:BackupTheBerlios,项目名称:aegisub-svn,代码行数:39,代码来源:draw_prs.cpp

示例6: VPXDecoder

already_AddRefed<MediaDataDecoder>
AgnosticDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
                                          layers::LayersBackend aLayersBackend,
                                          layers::ImageContainer* aImageContainer,
                                          FlushableTaskQueue* aVideoTaskQueue,
                                          MediaDataDecoderCallback* aCallback)
{
  RefPtr<MediaDataDecoder> m;

  if (VPXDecoder::IsVPX(aConfig.mMimeType)) {
    m = new VPXDecoder(*aConfig.GetAsVideoInfo(),
                       aImageContainer,
                       aVideoTaskQueue,
                       aCallback);
  }

  return m.forget();
}
开发者ID:Danielzac,项目名称:gecko-dev,代码行数:18,代码来源:AgnosticDecoderModule.cpp

示例7: AssertOwnerThread

void
VideoSink::Redraw(const VideoInfo& aInfo)
{
  AssertOwnerThread();

  // No video track, nothing to draw.
  if (!aInfo.IsValid() || !mContainer) {
    return;
  }

  if (VideoQueue().GetSize() > 0) {
    RenderVideoFrames(1);
    return;
  }

  // When we reach here, it means there are no frames in this video track.
  // Draw a blank frame to ensure there is something in the image container
  // to fire 'loadeddata'.
  RefPtr<Image> blank =
    mContainer->GetImageContainer()->CreatePlanarYCbCrImage();
  mContainer->SetCurrentFrame(aInfo.mDisplay, blank, TimeStamp::Now());
}
开发者ID:subsevenx2001,项目名称:gecko-dev,代码行数:22,代码来源:VideoSink.cpp

示例8: strcpy

/// @brief Read from environment
/// @param _clip
///
void AvisynthAudioProvider::LoadFromClip(AVSValue _clip) {
	AVSValue script;

	// Check if it has audio
	VideoInfo vi = _clip.AsClip()->GetVideoInfo();
	if (!vi.HasAudio()) throw agi::AudioDataNotFoundError("No audio found.", 0);

	IScriptEnvironment *env = avs_wrapper.GetEnv();

	// Convert to one channel
	char buffer[1024];
	strcpy(buffer,lagi_wxString(OPT_GET("Audio/Downmixer")->GetString()).mb_str(csConvLocal));
	script = env->Invoke(buffer, _clip);

	// Convert to 16 bits per sample
	script = env->Invoke("ConvertAudioTo16bit", script);
	vi = script.AsClip()->GetVideoInfo();

	// Convert sample rate
	int setsample = OPT_GET("Provider/Audio/AVS/Sample Rate")->GetInt();
	if (vi.SamplesPerSecond() < 32000) setsample = 44100;
	if (setsample != 0) {
		AVSValue args[2] = { script, setsample };
		script = env->Invoke("ResampleAudio", AVSValue(args,2));
	}

	// Set clip
	PClip tempclip = script.AsClip();
	vi = tempclip->GetVideoInfo();

	// Read properties
	channels = vi.AudioChannels();
	num_samples = vi.num_audio_samples;
	sample_rate = vi.SamplesPerSecond();
	bytes_per_sample = vi.BytesPerAudioSample();
	float_samples = false;

	clip = tempclip;
}
开发者ID:sthenc,项目名称:Aegisub,代码行数:42,代码来源:audio_provider_avs.cpp

示例9: dimzon_avs_init

int __stdcall dimzon_avs_init(SafeStruct** ppstr, char *func, char *arg, AVSDLLVideoInfo *vi)
{
	SafeStruct* pstr = NULL;

	if(!*ppstr)
	{
		pstr = ((SafeStruct*)malloc(sizeof(SafeStruct)));
		*ppstr = pstr;
		memset(pstr, 0, sizeof(SafeStruct));

		pstr->dll = LoadLibrary("avisynth.dll");
		if(!pstr->dll)
		{
			strncpy_s(pstr->err, ERRMSG_LEN, "Cannot load avisynth.dll", _TRUNCATE);
			return 1;
		}

		IScriptEnvironment* (*CreateScriptEnvironment)(int version) = (IScriptEnvironment*(*)(int)) GetProcAddress(pstr->dll, "CreateScriptEnvironment");
		if(!CreateScriptEnvironment)
		{
			strncpy_s(pstr->err, ERRMSG_LEN, "Cannot load CreateScriptEnvironment", _TRUNCATE);
			return 2;
		}

		pstr->env = CreateScriptEnvironment(AVISYNTH_INTERFACE_VERSION);

		if (pstr->env == NULL)
		{
			strncpy_s(pstr->err, ERRMSG_LEN, "Required Avisynth 2.5", _TRUNCATE);
			return 3;
		}
	}
	else
	{
		pstr = *ppstr;
	}

	pstr->err[0] = 0;

	//Заходили только чтоб получить ppstr
	if (!func || strlen(func) == 0 || !arg)
		return 0;

	try
	{
		AVSValue arg(arg);
		AVSValue res;

		if (vi != NULL && vi->mt_import == MT_DISABLED)
		{
			//Если надо, отключаем MT - до импорта
			try { pstr->env->Invoke("SetMTMode", 0); }
			catch (IScriptEnvironment::NotFound) { /*AviSynth без MT*/ }
		}

		res = pstr->env->Invoke(func, AVSValue(&arg, 1));
		if(!*ppstr) return 1;

		if (!res.IsClip())
		{
			strncpy_s(pstr->err, ERRMSG_LEN, "The script's return was not a video clip.", _TRUNCATE);
			return 4;
		}

		if (vi != NULL && (vi->mt_import == MT_ADDDISTR || vi->mt_import == MT_ADDM1DISTR))
		{
			try
			{
				//Если надо, добавляем Distributor() - после импорта
				AVSValue mt_test = pstr->env->Invoke("GetMTMode", false);
				const int mt_mode = mt_test.IsInt() ? mt_test.AsInt() : 0;
				if (mt_mode > 0 && mt_mode < 5)
				{
					if (mt_mode != 1 && vi->mt_import == MT_ADDM1DISTR)
						pstr->env->Invoke("SetMTMode", 1);

					res = pstr->env->Invoke("Distributor", res);
				}
			}
			catch (IScriptEnvironment::NotFound) { /*AviSynth без MT*/ }

			if (!res.IsClip())
			{
				strncpy_s(pstr->err, ERRMSG_LEN, "After adding \"Distributor()\" the script's return was not a video clip.", _TRUNCATE);
				return 4;
			}
		}

		pstr->clp = res.AsClip();
		VideoInfo inf = pstr->clp->GetVideoInfo();

		if (inf.HasVideo())
		{
			string filter = "";
			string err_string = "";

			//Original и Requested PixelType
			if (vi != NULL) vi->pixel_type_orig = inf.pixel_type;
			int pixel_type_req = (vi != NULL) ? vi->pixel_type : 0;

//.........这里部分代码省略.........
开发者ID:BrunoReX,项目名称:xvid4psp,代码行数:101,代码来源:AviSynthWrapper.cpp

示例10: main

int main(int argc, TCHAR* argv[]) {
	SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED);

	printf("Usage: filmtester <avs filename> [duplicates_maxlength=2]\n");
	printf("The program plays the AVS file and tests for frame duplicates\n\n");

	int duplicates_maxlength = 2;
	if (argc < 2) {
		printf("No filename specified.\n\n");
		return -1;
	}
	if (argc > 2) {
		duplicates_maxlength = _ttoi(argv[2]);
		printf("INFO: duplicates_maxlength set to %d\n", duplicates_maxlength);
	}

	IScriptEnvironment *env = CreateScriptEnvironment();
	_tprintf(_T("Loading \"%s\" ...\n"), argv[1]);

	LPCSTR arg_names[1] = { nullptr };
	AVSValue arg_vals[1] = { (LPCSTR)argv[1] };
	clip = env->Invoke("import", AVSValue(arg_vals,1), arg_names).AsClip();

	printf("AVS file loaded successfully.\n\n");

	VideoInfo vi = clip->GetVideoInfo();
	printf("VideoInfo:\n");
	printf("-----------\n");
	if (vi.HasVideo()) {
		printf("width x height: %dx%d\n", vi.width, vi.height);
		printf("num_frames: %d\n", vi.num_frames);
		printf("fps: %d/%d\n", vi.fps_numerator, vi.fps_denominator);

		std::string colorspace;
		if (vi.pixel_type & VideoInfo::CS_BGR) colorspace += "BGR, ";
		if (vi.pixel_type & VideoInfo::CS_YUV) colorspace += "YUV, ";
		if (vi.pixel_type & VideoInfo::CS_INTERLEAVED) colorspace += "INTERLEAVED, ";
		if (vi.pixel_type & VideoInfo::CS_PLANAR) colorspace += "PLANAR, ";
		if (colorspace.length() > 0) colorspace.erase(colorspace.length()-2);
		printf("colorspace: %s\n", colorspace.c_str());

		std::string colorformat;
		if (vi.pixel_type & VideoInfo::CS_BGR24) colorformat += "BGR24, ";
		if (vi.pixel_type & VideoInfo::CS_BGR32) colorformat += "BGR32, ";
		if (vi.pixel_type & VideoInfo::CS_YUY2)  colorformat += "YUY2, ";
		if (vi.pixel_type & VideoInfo::CS_YV12)  colorformat += "YV12, ";
		if (vi.pixel_type & VideoInfo::CS_I420)  colorformat += "I420 (IYUV), ";
		if (colorformat.length() > 0)
			colorformat.erase(colorformat.length()-2);
		else
			colorformat = "UNKNOWN";
		printf("colorformat: %s\n", colorformat.c_str());

		std::string imagetype;
		if (vi.image_type & VideoInfo::IT_BFF) imagetype += "BFF, ";
		if (vi.image_type & VideoInfo::IT_TFF) imagetype += "TFF, ";
		if (vi.image_type & VideoInfo::IT_FIELDBASED)  imagetype += "FIELDBASED, ";
		if (imagetype.length() > 0)
			imagetype.erase(imagetype.length()-2);
		else
			imagetype = "UNKNOWN";
		printf("image_type: %s\n", imagetype.c_str());
		printf("bits per pixel: %d\n", vi.BitsPerPixel());
	}
	else
		printf("NO VIDEO\n");

	if (vi.HasAudio()) {
		printf("audio channels: %d\n", vi.nchannels);
		printf("sample_type: %x\n", vi.sample_type);
		printf("samples per second: %d\n", vi.audio_samples_per_second);
		printf("bytes per channel sample: %d\n", vi.BytesPerChannelSample());
		printf("bytes per audio sample: %d\n", vi.BytesPerAudioSample());
		printf("num_audio_samples: %lld\n", vi.num_audio_samples);
	}
	else
		printf("NO AUDIO\n");
	printf("-----------\n\n");

	if (!vi.HasVideo()) {
		printf("Can't start video playback for the sequence without video.\n\n");
		return -1;
	}

	printf("Starting playback ...\n");
	prev_frame = clip->GetFrame(0, env);
	int framesize = prev_frame->GetFrameBuffer()->GetDataSize();
	printf("INFO: framesize = %d bytes.\n\n", framesize);

	InitializeCriticalSection(&cs);
	SetConsoleCtrlHandler((PHANDLER_ROUTINE)CtrlHandler, TRUE);

	int error_count = 0;
	int dup_start_frame = 0;
	bool flag_dup = false;
	std::vector<std::pair<int, int>> duplicates;
	for(int i=1; i<vi.num_frames; ++i) {
		EnterCriticalSection(&cs);
		dst = clip->GetFrame(i, env);
		const BYTE *src_ptr = prev_frame->GetFrameBuffer()->GetReadPtr();
//.........这里部分代码省略.........
开发者ID:slavanap,项目名称:ssifSource,代码行数:101,代码来源:filmtester.cpp

示例11: GetSubSamplingW

static int GetSubSamplingW(const VideoInfo &vi) {
    if (vi.IsYUV() && vi.IsPlanar())
        return vi.GetPlaneWidthSubsampling(PLANAR_U);
    else
        return 0;
}
开发者ID:jeeb,项目名称:ffms2,代码行数:6,代码来源:avssources.cpp

示例12: dimzon_avs_invoke

int __stdcall dimzon_avs_invoke(SafeStruct* pstr, char *func, char **arg, int len, AVSDLLVideoInfo *vi, float* func_out)
{
	try
	{
		*func_out = -FLT_MAX;
		pstr->err[0] = 0;

		const int N = 10;
		int actual_len = 0;

		AVSValue args[N] = { };
		if (len == 0) args[0] = 0;
		else if (len > N) len = N;

		for(int i = 0; i < len; i++)
		{
			if (strlen(arg[i]) > 0)
			{
				string lower = arg[i];
				bool was_letters = false;
				bool was_digits = false;
				bool was_spaces = false;

				//Слишком длинные значения - точно текст
				for (unsigned int n = 0; n < lower.size() && lower.size() <= 10; n++)
				{
					lower[n] = tolower(lower[n]);
					if (!was_letters && isalpha(lower[n])) was_letters = true;
					if (!was_digits && isdigit(lower[n])) was_digits = true;
					if (!was_spaces && isspace(lower[n])) was_spaces = true;
				}

				if (i == 0 && was_letters && !was_digits && !was_spaces && lower.compare("last") == 0)
				{
					//Clip (last)
					if(!pstr->clp) throw AvisynthError("AviSynthWrapper: The \"last\" clip was requested, but it doesn't exist!");
					args[actual_len] = pstr->clp; //pstr->res->AsClip();
					actual_len += 1;

					//pstr->clp; pstr->res->AsClip(); //С обработкой после прошлых вызовов Invoke
					//pstr->env->GetVar("last").AsClip(); //"Чистый" выход скрипта
				}
				else if (was_letters && !was_digits && !was_spaces && lower.compare("true") == 0)
				{
					//Bool (true)
					args[actual_len] = true;
					actual_len += 1;
				}
				else if (was_letters && !was_digits && !was_spaces && lower.compare("false") == 0)
				{
					//Bool (false)
					args[actual_len] = false;
					actual_len += 1;
				}
				else if (!was_letters && was_digits && !was_spaces && lower.find(".") != string::npos)
				{
					//Float (double..)
					args[actual_len] = atof(arg[i]);
					actual_len += 1;
				}
				else if (!was_letters && was_digits && !was_spaces)
				{
					//Integer
					args[actual_len] = atoi(arg[i]);
					actual_len += 1;
				}
				else
				{
					//String
					args[actual_len] = arg[i];
					actual_len += 1;
				}
			}
		}

		AVSValue res = pstr->env->Invoke(func, AVSValue(args, actual_len));

		if (!res.IsClip())
		{
			//Вывод результата
			if (res.IsBool())
			{ 
				if(!res.AsBool()) *func_out = 0;
				else *func_out = FLT_MAX;
			}
			else if (res.IsInt()) *func_out = (float)res.AsInt();
			else if (res.IsFloat()) *func_out = (float)res.AsFloat();
			else if (res.IsString()) { *func_out = FLT_MAX; strncpy_s(pstr->err, ERRMSG_LEN, res.AsString(), _TRUNCATE); }
		}
		else
		{
			pstr->clp = res.AsClip();
			VideoInfo inf = pstr->clp->GetVideoInfo();

			if (vi != NULL)
			{
				vi->width   = inf.width;
				vi->height  = inf.height;
				vi->raten   = inf.fps_numerator;
				vi->rated   = inf.fps_denominator;
//.........这里部分代码省略.........
开发者ID:BrunoReX,项目名称:xvid4psp,代码行数:101,代码来源:AviSynthWrapper.cpp

示例13: OutputAudio

int AvxContext::OutputAudio() {
	FILE *sink;
	void *writeBuffer = NULL;
	sighandler_t old_sigpipe = signal(SIGPIPE, SIG_IGN);

	if (launchMPlayer) {
		char command[1024];
		if (MPlayerCommandAudio(command))
			return -1;
		AVXLOG_INFO("MPlayer command line: %s", command);

		sink = popen(command, "w");
		if (!sink) {
			AVXLOG_ERROR("%s", "Error starting mplayer");
			return -1;
		}
	} else {
		sink = stdout;
	}

	#define AUDIO_SAMPLES 1000
	try {
		writeBuffer = malloc(vi.BytesPerAudioSample() * AUDIO_SAMPLES);
		if (!writeBuffer) {
			AVXLOG_ERROR("%s", "Unable to allocate memory");
			goto fail;
		}
		for (__int64 i = 0; i < vi.num_audio_samples; i += AUDIO_SAMPLES) {
			if (launchMPlayer && (feof(sink) || ferror(sink))) {
				AVXLOG_ERROR("%s", "mplayer process exited");
				break;
			}
			int read_samples;
			if (vi.num_audio_samples - AUDIO_SAMPLES < i)
				read_samples = vi.num_audio_samples - i;
			else
				read_samples = AUDIO_SAMPLES;
			clip->GetAudio(writeBuffer, i, read_samples, avx_library.env);
			fwrite(writeBuffer, vi.BytesPerAudioSample(), read_samples, sink);
		}
	} catch (AvisynthError &e) {
		AVXLOG_ERROR("AvisynthError: %s", e.msg);
		goto fail;
	}
	#undef AUDIO_SAMPLES

	free(writeBuffer);
	if (launchMPlayer)
		pclose(sink);
	signal(SIGPIPE, old_sigpipe);
	return 0;

fail:
	if (writeBuffer)
		free(writeBuffer);
	if (launchMPlayer)
		pclose(sink);
	signal(SIGPIPE, old_sigpipe);
	return -1;
}
开发者ID:btb,项目名称:avxsynth,代码行数:60,代码来源:avxSynthAppInterface.cpp

示例14: MPlayerCommandAudio

int AvxContext::MPlayerCommandAudio(char *command) { // This doesn't seem to work on my MPlayer
	if (vi.sample_type == SAMPLE_FLOAT) {
		AVXLOG_ERROR("%s", "Cannot pipe float audio to mplayer");
		return -1;
	}

	sprintf(command, MPLAYER " -demuxer rawaudio -rawaudio channels=%d:rate=%d:samplesize=%d:format=0 - 1> /dev/null",
			vi.nchannels, vi.audio_samples_per_second, vi.BytesPerChannelSample());
	return 0;
}
开发者ID:btb,项目名称:avxsynth,代码行数:10,代码来源:avxSynthAppInterface.cpp

示例15: EMU_copyMasterBuffer

void EMU_copyMasterBuffer()
{
	video.srcBuffer = (u8*)GPU_screen;
	
	//convert pixel format to 32bpp for compositing
	//why do we do this over and over? well, we are compositing to
	//filteredbuffer32bpp, and it needs to get refreshed each frame..
	const int size = video.size();
	u16* src = (u16*)video.srcBuffer;
    u32* dest = video.buffer;
    for(int i=0;i<size;++i)
        *dest++ = 0xFF000000 | RGB15TO32_NOALPHA(src[i]);
	
}
开发者ID:,项目名称:,代码行数:14,代码来源:


注:本文中的VideoInfo类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。