本文整理汇总了C#中Emgu.CV.Capture.SetCaptureProperty方法的典型用法代码示例。如果您正苦于以下问题:C# Capture.SetCaptureProperty方法的具体用法?C# Capture.SetCaptureProperty怎么用?C# Capture.SetCaptureProperty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Emgu.CV.Capture
的用法示例。
在下文中一共展示了Capture.SetCaptureProperty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: button1_Click
private void button1_Click(object sender, RoutedEventArgs e)
{
m_Playing = true;
m_Engine = new Engine.Engine();
if (RadioButtonWebcam.IsChecked.Value)
{
m_Capture = new Capture(0);
m_Capture.SetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FRAME_WIDTH, 1280);
m_Capture.SetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FRAME_HEIGHT, 720);
}
else
{
m_Capture = new Capture(System.IO.Path.GetFullPath(".\\..\\Videos\\vidD.mp4"));
}
m_Timer = new Timer(ExpectedFrameUpdate, null, 0, 1000 / 15);
m_DisplayFrames = new Thread(ShowFrames);
m_DisplayFrames.Start();
}
示例2: InitVideoCapture
public void InitVideoCapture(string path)
{
try
{
m_FrameMat = new Mat();
m_VideoCaptureFilename = path;
m_VideoCaptureInterface = null;
m_VideoCaptureInterface = new Capture(m_VideoCaptureFilename);
m_VideoCaptureInterface.SetCaptureProperty(CapProp.FrameHeight, 640);
m_VideoCaptureInterface.SetCaptureProperty(CapProp.FrameWidth, 360);
m_VideoCaptureInterface.SetCaptureProperty(CapProp.Fps, 5);
m_VideoCaptureInterface.ImageGrabbed += VideoCaptureInterface_ImageGrabbed;
m_VideoCaptureFrameCount = (int)m_VideoCaptureInterface.GetCaptureProperty(CapProp.FrameCount);
m_VideoCaptureInterface.Start();
}
catch (Exception e)
{
}
}
示例3: CatchImages
//filename: Catch image from video file
//ms: Capture every ms. ms = 0 means capture all frames. (24 frames per second)
public List<string> CatchImages(string fileName, int ms, string outputPath)
{
_log.Debug("Start to capture");
if (string.IsNullOrWhiteSpace(fileName) || string.IsNullOrWhiteSpace(outputPath))
{
_log.Error("Cannot catch images from path: " + fileName + " and output to: " + outputPath);
}
//List<Image<Bgr, Byte>> imageList = new List<Image<Bgr, Byte>>();
List<string> imagePath = new List<string>();
Capture capture = new Capture(fileName);
double frameCount = capture.GetCaptureProperty(CapProp.FrameCount);
capture.Dispose();
int index = 0;
int fc = (int)frameCount;
Mat mat = null;
try
{
//TODO: Modified this to change period of capture image.
while (index < 30/*fc*/)
{
index++;
using (capture = new Capture(fileName))
{
capture.SetCaptureProperty(CapProp.PosFrames, (double)index);
using (mat = capture.QueryFrame())
{
string indexStr = index < 10 ? "0" + index : index.ToString();
string imgPath = outputPath + "\\" + indexStr;
if (!Directory.Exists(outputPath))
{
Directory.CreateDirectory(outputPath);
}
//long quality = 60;
//saveJpeg(imgPath, mat.Bitmap, quality);
string grayImgName = saveGrayAndThreshold(imgPath, mat.Bitmap);
if (!string.IsNullOrEmpty(grayImgName))
{
imagePath.Add(grayImgName);
}
}
}
}
}
catch (System.Exception ex)
{
_log.Error("Exception:", ex);
}
return imagePath;
}
示例4: MainForm
public MainForm()
{
InitializeComponent();
m_UnitsComboBox.SelectedIndex = 0;
CameraParameters cameraParameters = null;
HomographyMatrix groundProjectionTransformation = null;
bool useCamera = false;
if (useCamera)
{
m_Capture = new Capture();
m_Capture.SetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FRAME_WIDTH, 1280);
m_Capture.SetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FRAME_HEIGHT, 720);
cameraParameters = CameraParameters.Load(@"C:\svnDev\oss\Google\drh-visual-odometry\CalibrationFiles\MicrosoftCinema\Focus14\1280x720\MicrosoftCinemaFocus14_1280x720.txt");
groundProjectionTransformation = HomographyMatrixSupport.Load(@"C:\svnDev\oss\Google\drh-visual-odometry\CalibrationFiles\MicrosoftCinema\Focus14\1280x720\BirdsEyeViewTransformationForCalculation.txt");
m_GroundProjectionTransformationForUI = HomographyMatrixSupport.Load(@"C:\svnDev\oss\Google\drh-visual-odometry\CalibrationFiles\MicrosoftCinema\Focus14\1280x720\BirdsEyeViewTransformationForUI.txt");
}
else
{
m_Capture = new Capture(@"C:\svnDev\oss\Google\drh-visual-odometry\TestVideos\2010-07-18 11-10-22.853.wmv");
m_Timer.Interval = 33;
m_Timer.Enabled = true;
cameraParameters = CameraParameters.Load(@"C:\svnDev\oss\Google\drh-visual-odometry\CalibrationFiles\MicrosoftCinema\Focus12\1280x720\MicrosoftCinemaFocus12_1280x720.txt");
groundProjectionTransformation = HomographyMatrixSupport.Load(@"C:\svnDev\oss\Google\drh-visual-odometry\CalibrationFiles\MicrosoftCinema\Focus12\1280x720\BirdsEyeViewTransformationForCalculation.txt");
m_GroundProjectionTransformationForUI = HomographyMatrixSupport.Load(@"C:\svnDev\oss\Google\drh-visual-odometry\CalibrationFiles\MicrosoftCinema\Focus12\1280x720\BirdsEyeViewTransformationForUI.txt");
}
m_VisualOdometer = new VisualOdometer(m_Capture, cameraParameters, groundProjectionTransformation, new OpticalFlow());
UpdateFromModel();
m_VisualOdometer.Changed += new EventHandler(OnVisualOdometerChanged);
Application.Idle += OnApplicationIdle;
}
示例5: Base
public Base()
{
var comports = SerialPort.GetPortNames();
if (comports.Length == 0)
{
throw new Exception("Error: No COM ports found");
}
for (int i = 0; i < comports.Length; i++)
{
try
{
Mega2560 = new Arduino(comports[i]);
break;
}
catch (Exception) {}
}
ForceSensor = new ResistiveForce(Mega2560, Force_Analog_Pin);
DistanceSensor = new SharpIR(Mega2560, Distance_Analog_Pin);
LightSensor = new Sensor(Mega2560, Light_Analog_Pin);
Webcam = new Capture();
Dictionary = new SpeechDictionary();
Webcam.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_WIDTH, 1280);
Webcam.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_HEIGHT, 720); // set cam resolution to 720P
Webcam.QueryFrame(); // take a test photo
Webcam.ImageGrabbed += new EventHandler(Webcam_ImageGrabbed);
XboxController = new Controller(UserIndex.One);
xAxisServo = new Servo(ref Mega2560, xServoPin);
yAxisServo1 = new Servo(ref Mega2560, y1ServoPin, y1ServoMax, y1ServoMin);
yAxisServo2 = new Servo(ref Mega2560, y2ServoPin, y2ServoMax, y2ServoMin);
gripperServo = new Servo(ref Mega2560, gripServoPin, gripServoMax, gripServoMin);
xAxisServo.ServoAngleChange(xServoStart);
yAxisServo1.ServoAngleChange(y1ServoStart);
yAxisServo2.ServoAngleChange(y2ServoStart);
gripperServo.ServoAngleChange(gripServoStart);
SpeechEngine = new SpeechRecognition.Base();
}
示例6: faceTrack
public faceTrack()
{
InitializeComponent();
CvInvoke.UseOpenCL = false;
_cascadeClassifierFace = new CascadeClassifier(Application.StartupPath + "/haarcascade_frontalface_default.xml");
_cascadeClassifierEye = new CascadeClassifier(Application.StartupPath + "/haarcascade_eye.xml");
try
{
capturecam = new Capture(0);
capturecam.SetCaptureProperty(CapProp.Fps, 30);
capturecam.SetCaptureProperty(CapProp.FrameHeight, 240);
capturecam.SetCaptureProperty(CapProp.FrameWidth, 320);
capturecam.SetCaptureProperty(CapProp.AutoExposure, 1);
trackBarUpdate(trackBarContrast, (int)capturecam.GetCaptureProperty(CapProp.Contrast));
trackBarUpdate(trackBarBrightness, (int)capturecam.GetCaptureProperty(CapProp.Brightness));
//trackBarUpdate(trackBarGain, (int)capturecam.GetCaptureProperty(CapProp.Gain));
trackBarUpdate(trackBarZoom, (int)capturecam.GetCaptureProperty(CapProp.Zoom));
textBoxTime.Text = "Time: ";
textBoxCodec.Text = "Codec: ";
textBoxFrameRate.Text = "Frame: ";
capturecam.ImageGrabbed += ProcessFrame;
//Application.Idle += ProcessFrame;
//original.Image = capturecam.QueryFrame();
}
catch (NullReferenceException excpt)
{
MessageBox.Show(excpt.Message);
}
}
示例7: Form1
public Form1()
{
InitializeComponent();
markedPoints = new Dictionary<string, Point[]>();
try
{
capture = new Capture("kinect_local_rgb_raw_synced.avi");
capture.SetCaptureProperty(Emgu.CV.CvEnum.CapProp.PosFrames, currentFrame);
pictureBox.Image = capture.QueryFrame().Bitmap;
frameCount = (int) capture.GetCaptureProperty(Emgu.CV.CvEnum.CapProp.FrameCount);
} catch ( Exception e)
{
Console.WriteLine(e);
}
}
示例8: CameraViewModel
public CameraViewModel()
{
if(IsInDesignMode)
return;
SelectedCam = Properties.Settings.Default.SelectedCam;
DetectionEnabled = Properties.Settings.Default.DetectionEnabled;
Fps = 0;
CameraHandler = new CameraHandler();
Capture = CameraHandler.CreateCapture(SelectedCam);
Capture.SetCaptureProperty(CapProp.Fps, 30);
Capture.ImageGrabbed += CaptureOnImageGrabbed;
_fpsStopwatch = Stopwatch.StartNew();
_delayStopwatch = new Stopwatch();
InitializeMessageHandler();
RefreshCameras();
}
示例9: Initialise
public void Initialise(int id, int width, int height)
{
if (id == FCameraID && width == FRequestedWidth && height == FRequestedHeight)
return;
Close();
lock (FCaptureLock)
{
try
{
FCapture = new Capture(id);
FCapture.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_WIDTH, width);
FCapture.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_HEIGHT, height);
}
catch (Exception e)
{
Status = "Camera open failed";
IsRunning = false;
return;
}
Status = "OK";
IsRunning = true;
FCameraID = id;
FWidth = FCapture.Width;
FHeight = FCapture.Height;
FRequestedWidth = width;
FRequestedHeight = height;
}
FCaptureRunThread = true;
FCaptureThread = new Thread(Capture);
FCaptureThread.Start();
}
示例10: SetupCapture
private void SetupCapture(int Camera_Identifier)
{
//update the selected device
CameraDevice = Camera_Identifier;
//Dispose of Capture if it was created before
if (_capture != null) _capture.Dispose();
try
{
//Set up capture device
_capture = new Capture(CameraDevice);
_capture.SetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FPS, Int16.Parse(fpstext.Text));
if (resolution.SelectedIndex == 0)
{
_capture.SetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FRAME_HEIGHT, 480);
_capture.SetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FRAME_WIDTH, 640);
}
if (resolution.SelectedIndex == 1)
{
_capture.SetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FRAME_HEIGHT, 1000);
_capture.SetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FRAME_WIDTH, 2000);
}
selectedWidth= _capture.Width;
selectedHeight = _capture.Height;
selectedfps = Convert.ToInt16(_capture.GetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FPS));
// _capture.ImageGrabbed += ProcessFrame;
}
catch (NullReferenceException excpt)
{MessageBox.Show(excpt.Message);}
}
示例11: button1_Click
private void button1_Click(object sender, EventArgs e)
{
if (_captureInProgress)
{
//stop the capture
button1.Text = "Start";
camListComboBox.Enabled = true;
fpsBox.Enabled = true;
widthBox.Enabled = true;
heightBox.Enabled = true;
_capture.Dispose();
_frames = 0;
_fps = 0;
_dFps = 1;
}
else
{
try
{
_capture = new Capture(_camIndex);
_capture.ImageGrabbed += ProcessFrame;
}
catch (NullReferenceException excpt)
{
MessageBox.Show(excpt.Message);
}
//start the capture
button1.Text = "Stop";
camListComboBox.Enabled = false;
fpsBox.Enabled = false;
widthBox.Enabled = false;
heightBox.Enabled = false;
if (String.IsNullOrWhiteSpace(fpsBox.Text))
{
fpsBox.Text = sfps.ToString();
printConsole("FPS set automaticaly: " + fpsBox.Text + "\n");
}
else
{
sfps = Convert.ToInt32(fpsBox.Text);
}
if (String.IsNullOrWhiteSpace(widthBox.Text) || String.IsNullOrWhiteSpace(heightBox.Text))
{
widthBox.Text = width.ToString();
heightBox.Text = height.ToString();
printConsole("Frame size set automaticaly: " + widthBox.Text + " x " + heightBox.Text+ "\n");
}
else
{
width = Convert.ToInt32(widthBox.Text);
height = Convert.ToInt32(heightBox.Text);
}
_capture.SetCaptureProperty(Emgu.CV.CvEnum.CapProp.Fps, sfps);
_capture.SetCaptureProperty(Emgu.CV.CvEnum.CapProp.FrameWidth, width);
_capture.SetCaptureProperty(Emgu.CV.CvEnum.CapProp.FrameHeight, height);
_capture.Start();
_currentTime = DateTime.Now.Ticks;
}
_captureInProgress = !_captureInProgress;
}
示例12: Form1
public Form1()
{
#region visual-studio-inserted code (do not modify)
// This is inserted by VS by default; do not move.
InitializeComponent();
#endregion
#region setup server to wait for glove connection
//ServerController server = new ServerController();
////for every delegate you want to functino
//server.registerDelegate(CAPIStreamCommon.PacketType.VIDEO_FRAME, new ImageWork(doWorkOnData));
//server.startServer(CAPIStreamServer.ConnectionType.TCP);
#endregion
#region setup decoder
initFrameConverter(stream_width, stream_height);
#endregion
#region combo box 1 (available items to track)
itemsAvailableForLocation = new List<string>();
string[] itemNames = Directory.GetFiles("itemsToTrack/", "*.jpg");
foreach (string s in itemNames)
{
string name = System.Text.RegularExpressions.Regex.Replace(s, "itemsToTrack/", "");
name = System.Text.RegularExpressions.Regex.Replace(name, ".jpg", "");
comboBox1.Items.Add(name);
itemsAvailableForLocation.Add(name);
}
comboBox1.DropDownStyle = ComboBoxStyle.DropDownList;
comboBox1.SelectedIndex = 0;
#endregion
#region combo box 2 (serial ports for Arduino connection)
RefreshSerialPortList();
comboBox2.DropDownStyle = ComboBoxStyle.DropDownList;
//comboBox2.SelectedIndex = 0;
#endregion
#region check box 1 (haptic feedback)
// Haptic feedback starts disabled
checkBox1.Enabled = false;
#endregion
#region picture box
pictureBox1.SizeMode = PictureBoxSizeMode.StretchImage;
#endregion
#region initialize Capture object
/**
* TODO this shouldn't be done here, as the capture can be initialized in a number
* of ways in the future.
* There should be menus with options for initializing capture (from webcam, from
* file, from streaming) and then once the needed information (e.g. camera number,
* filename, or ip/port) is input and validated, only THEN will the capture be
* created.
*/
// TODO this shouldn't be hardcoded
cap = new Capture(camera);
float width = 648.0f, height = 1152.0f;
cap.SetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FRAME_HEIGHT, height);
cap.SetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FRAME_WIDTH, width);
#endregion
#region create EventHandler
/**
* TODO we either need to make this handler work for ALL video input sources,
* or make different handlers for different situations.
* Right now, handling from webcam uses this event handler, which gets attached
* to Application.Idle. However, handling from streaming doesn't use EventHandlers
* at all; everything happens in the doWorkOnData function. This needs to be changed.
*/
ShowFromCamHandler = new EventHandler(ShowFromCam);
#endregion
}
示例13: btnStart_Click
//btnStart_Click() function is the one that handles our "Start!" button' click
//event. it creates a new capture object if its not created already. e.g at first time
//starting. once the capture is created, it checks if the capture is still in progress,
//if so the
private void btnStart_Click(object sender, EventArgs e)
{
#region if capture is not created, create it now
if (captures.Count == 0)
{
try
{
for (var cameraIndex = 0; cameraIndex < cameras; cameraIndex++)
{
var capture = new Capture(cameraIndex);
capture.SetCaptureProperty(Emgu.CV.CvEnum.CapProp.Fps, fps);
capture.SetCaptureProperty(Emgu.CV.CvEnum.CapProp.FrameHeight, frameHeight);
capture.SetCaptureProperty(Emgu.CV.CvEnum.CapProp.FrameWidth, frameWidth);
captures.Add(capture);
}
}
catch (NullReferenceException excpt)
{
MessageBox.Show(excpt.Message);
}
}
#endregion
if (captures.Count > 0)
{
if (captureInProgress)
{ //if camera is getting frames then stop the capture and set button Text
// "Start" for resuming capture
btnStart.Text = "Start!"; //
Application.Idle -= ProcessFrame;
}
else
{
//if camera is NOT getting frames then start the capture and set button
// Text to "Stop" for pausing capture
btnStart.Text = "Stop";
Application.Idle += ProcessFrame;
}
captureInProgress = !captureInProgress;
}
}
示例14: SelectCamera
private void SelectCamera(int camera_index)
{
if (grabber == null)
{
grabber = new Emgu.CV.Capture();
width = ((DeviceCapabilityInfo)cam_capability.SelectedItem).FrameSize.Width;
height = ((DeviceCapabilityInfo)cam_capability.SelectedItem).FrameSize.Height;
grabber.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_HEIGHT, height);
grabber.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_WIDTH, width);
InitializeImages(width, height);
}
else
{
Application.Idle -= FrameGrabber;
grabber.Dispose();
ReleaseData();
grabber = new Emgu.CV.Capture();
width = ((DeviceCapabilityInfo)cam_capability.SelectedItem).FrameSize.Width;
height = ((DeviceCapabilityInfo)cam_capability.SelectedItem).FrameSize.Height;
grabber.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_HEIGHT, height);
grabber.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_WIDTH, width);
InitializeImages(width, height);
Application.Idle += FrameGrabber;
}
}
示例15: MachineExecution
/***
Function: private void MachineExecution()
Parameter(s):
Return Value: void
This is where the main execution of the program takes place. Most of the code called from this function
resides in the MachineHeuristics.cs file with the facial detection and recognition processing.
***/
private void MachineExecution()
{
if (machineInputSourceKind == MachineInputSourceKind.SourceNone)
PanicAndTerminateProgram();
if (machineInputSourceKind == MachineInputSourceKind.SourceFile)
{
capture = InitCapture();
frame_count = capture.GetCaptureProperty(Emgu.CV.CvEnum.CapProp.FrameCount);
frame_rate = capture.GetCaptureProperty(Emgu.CV.CvEnum.CapProp.Fps);
theoretical_delay = (int)(1000 / frame_rate);
number_of_delayable_frames = (int)(frame_rate / 2);
dispatcherTimer = new DispatcherTimer(
TimeSpan.FromMilliseconds(theoretical_delay),
DispatcherPriority.ApplicationIdle,
ProcessAnyFrame,
Application.Current.Dispatcher
);
dispatcherTimer.Start();
}
else if (machineInputSourceKind == MachineInputSourceKind.SourceWebcam)
{
/***
Some computers have their default, working camera on a different internal number.
Note, zero is usually the correct number, and is actually the default in the InputSelection.xaml.cs file.
***/
capture = new Capture(cameraNumber);
// Locks the camera frame rate to a constant value.
frame_rate = cameraFrameRate;
capture.SetCaptureProperty(Emgu.CV.CvEnum.CapProp.Fps, frame_rate);
theoretical_delay = (int)frame_rate;
number_of_delayable_frames = (int)(frame_rate / 2);
dispatcherTimer = new DispatcherTimer(
TimeSpan.FromMilliseconds(1000 / frame_rate),
DispatcherPriority.ApplicationIdle,
ProcessAnyFrame,
Application.Current.Dispatcher
);
// Keep running the ProcessAnyFrame function every time a frame is received (theoretically), should work with no hitches.
dispatcherTimer.Start();
}
}