本文整理汇总了C#中CvSize类的典型用法代码示例。如果您正苦于以下问题:C# CvSize类的具体用法?C# CvSize怎么用?C# CvSize使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
CvSize类属于命名空间,在下文中一共展示了CvSize类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Resize
public Resize()
{
// cvResize
// 指定した出力画像サイズに合うように、入力画像のサイズを変更し出力する.
// (1)画像を読み込む
using (IplImage src = new IplImage(Const.ImageSquare5, LoadMode.AnyColor | LoadMode.AnyDepth))
{
// (2)出力用画像領域の確保を行なう
CvSize size = new CvSize(src.Width * 2, src.Height * 2);
using (IplImage dstNN = new IplImage(size, src.Depth, src.NChannels))
using (IplImage dstCubic = new IplImage(size, src.Depth, src.NChannels))
using (IplImage dstLinear = new IplImage(size, src.Depth, src.NChannels))
using (IplImage dstLanczos = new IplImage(size, src.Depth, src.NChannels))
{
// (3)画像のサイズ変更を行う
Cv.Resize(src, dstNN, Interpolation.NearestNeighbor);
Cv.Resize(src, dstCubic, Interpolation.Cubic);
Cv.Resize(src, dstLinear, Interpolation.Linear);
Cv.Resize(src, dstLanczos, Interpolation.Lanczos4);
// (4)結果を表示する
using (new CvWindow("src", src))
using (new CvWindow("dst NearestNeighbor", dstNN))
using (new CvWindow("dst Cubic", dstCubic))
using (new CvWindow("dst Linear", dstLinear))
using (new CvWindow("dst Lanczos4", dstLanczos))
{
Cv.WaitKey();
}
}
}
}
示例2: FitLine
public FitLine()
{
CvSize imageSize = new CvSize(500, 500);
// cvFitLine
CvPoint2D32f[] points = GetRandomPoints(20, imageSize);
CvLine2D line = Cv.FitLine2D(points, DistanceType.L2, 0, 0.01, 0.01);
using (IplImage img = new IplImage(imageSize, BitDepth.U8, 3))
{
img.Zero();
// draw line
{
CvPoint pt1, pt2;
line.FitSize(img.Width, img.Height, out pt1, out pt2);
img.Line(pt1, pt2, CvColor.Green, 1, LineType.Link8);
}
// draw points and distances
using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.33, 0.33))
{
foreach (CvPoint2D32f p in points)
{
double d = line.Distance(p);
img.Circle(p, 2, CvColor.White, -1, LineType.AntiAlias);
img.PutText(string.Format("{0:F1}", d), new CvPoint((int) (p.X + 3), (int) (p.Y + 3)), font, CvColor.Green);
}
}
CvWindow.ShowImages(img);
}
}
示例3: InitializeComponent
public 描画画面()
{
InitializeComponent();
dis_height= System.Windows.Forms.Screen.PrimaryScreen.Bounds.Height;
dis_width=System.Windows.Forms.Screen.PrimaryScreen.Bounds.Width;
pos_max = Tobii.pos_max;
while (Tobii. 眼球位置_L[0] == 0 || Tobii. 眼球位置_R[0] == 100) { }//両目とれるまでここにとどまる
diff_in = Tobii. 眼球位置_R[0]-Tobii. 眼球位置_L[0];
posY_in = (Tobii.眼球位置_L[1] + Tobii.眼球位置_R[1] )/ 2;
pictureBoxIpl1.Width = dis_width;
pictureBoxIpl1.Height = dis_height;
frame = Cv.CreateImage(new CvSize(dis_width, dis_height), BitDepth.U8, 3);
background = Cv.CreateImage(new CvSize(dis_width, dis_height), BitDepth.U8, 3);
background=メイン画面.background;
pictureBoxIpl1.ImageIpl = background;
window_size = new CvSize(メイン画面.window[0], メイン画面.window[1]);
point_old = new CvPoint(window_size.Width / 2, window_size.Height / 2);
許容半径 = メイン画面.radius;
PC=new System.Diagnostics.PerformanceCounter[3];
タイマー開始();
}
示例4: Resize
public Resize()
{
using (var src = new IplImage(FilePath.Image.Square5, LoadMode.AnyColor | LoadMode.AnyDepth))
{
CvSize size = new CvSize(src.Width * 2, src.Height * 2);
using (IplImage dstNN = new IplImage(size, src.Depth, src.NChannels))
using (IplImage dstCubic = new IplImage(size, src.Depth, src.NChannels))
using (IplImage dstLinear = new IplImage(size, src.Depth, src.NChannels))
using (IplImage dstLanczos = new IplImage(size, src.Depth, src.NChannels))
{
Cv.Resize(src, dstNN, Interpolation.NearestNeighbor);
Cv.Resize(src, dstCubic, Interpolation.Cubic);
Cv.Resize(src, dstLinear, Interpolation.Linear);
Cv.Resize(src, dstLanczos, Interpolation.Lanczos4);
using (new CvWindow("src", src))
using (new CvWindow("dst NearestNeighbor", dstNN))
using (new CvWindow("dst Cubic", dstCubic))
using (new CvWindow("dst Linear", dstLinear))
using (new CvWindow("dst Lanczos4", dstLanczos))
{
Cv.WaitKey();
}
}
}
}
示例5: CaptureCameraCallback
private void CaptureCameraCallback()
{
const double ScaleFactor = 2.5;
const int MinNeighbors = 1;
CvSize MinSize = new CvSize(30, 30);
CvCapture cap = CvCapture.FromCamera(1);
CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("haarcascade_eye.xml");
while (true)
{
IplImage img = cap.QueryFrame();
//IplImage.FromBitmap()
//CvSeq<CvAvgComp> eyes = Cv.HaarDetectObjects(img, cascade, Cv.CreateMemStorage(), ScaleFactor, MinNeighbors, HaarDetectionType.DoCannyPruning, MinSize);
//foreach (CvAvgComp eye in eyes.AsParallel())
//{
// img.DrawRect(eye.Rect, CvColor.Red);
// if (eye.Rect.Left > pctCvWindow.Width / 2)
// {
// try
// {
// IplImage rightEyeImg1 = img.Clone();
// Cv.SetImageROI(rightEyeImg1, eye.Rect);
// IplImage rightEyeImg2 = Cv.CreateImage(eye.Rect.Size, rightEyeImg1.Depth, rightEyeImg1.NChannels);
// Cv.Copy(rightEyeImg1, rightEyeImg2, null);
// Cv.ResetImageROI(rightEyeImg1);
// Bitmap rightEyeBm = BitmapConverter.ToBitmap(rightEyeImg2);
// pctRightEye.Image = rightEyeBm;
// }
// catch { }
// }
// else
// {
// try
// {
// IplImage leftEyeImg1 = img.Clone();
// Cv.SetImageROI(leftEyeImg1, eye.Rect);
// IplImage leftEyeImg2 = Cv.CreateImage(eye.Rect.Size, leftEyeImg1.Depth, leftEyeImg1.NChannels);
// Cv.Copy(leftEyeImg1, leftEyeImg2, null);
// Cv.ResetImageROI(leftEyeImg1);
// Bitmap leftEyeBm = BitmapConverter.ToBitmap(leftEyeImg2);
// pctLeftEye.Image = leftEyeBm;
// }catch{}
// }
//}
Bitmap bm = BitmapConverter.ToBitmap(img);
bm.SetResolution(pctCvWindow.Width, pctCvWindow.Height);
//pctCvWindow.Image = bm;
pb.Image = bm;
img = null;
bm = null;
Thread.Sleep(100);
}
}
示例6: CvModelEstimator2
/// <summary>
///
/// </summary>
/// <param name="_modelPoints"></param>
/// <param name="_modelSize"></param>
/// <param name="_maxBasicSolutions"></param>
public CvModelEstimator2(int _modelPoints, CvSize _modelSize, int _maxBasicSolutions)
{
modelPoints = _modelPoints;
modelSize = _modelSize;
maxBasicSolutions = _maxBasicSolutions;
checkPartialSubsets = true;
rng = new CvRNG(-1);
}
示例7: CvModelEstimator2
/// <summary>
///
/// </summary>
/// <param name="modelPoints"></param>
/// <param name="modelSize"></param>
/// <param name="maxBasicSolutions"></param>
protected CvModelEstimator2(int modelPoints, CvSize modelSize, int maxBasicSolutions)
{
this.modelPoints = modelPoints;
this.modelSize = modelSize;
this.maxBasicSolutions = maxBasicSolutions;
this.checkPartialSubsets = true;
this.rng = new CvRNG(-1);
}
示例8: ContoursFinder
/// <summary>
/// Конструктор
/// </summary>
/// <param name="size">Размер обрабатываемого изображения</param>
public ContoursFinder(CvSize size)
{
this.size = size;
hsvImg = new IplImage(size, BitDepth.U8, 3);
hImg = new IplImage(size, BitDepth.U8, 1);
sImg = new IplImage(size, BitDepth.U8, 1);
vImg = new IplImage(size, BitDepth.U8, 1);
tmpImg = new IplImage(size, BitDepth.U8, 1);
}
示例9: WebCam
/// <summary>
/// Конструктор
/// </summary>
/// <param name="deviceId">ID камеры с которой будут захватываться кадры</param>
/// <param name="frameSize">Желаемое разрешение кадров</param>
public WebCam(int deviceId, CvSize frameSize)
{
this.deviceId = deviceId;
vi = new VideoInput();
vi.SetupDevice(deviceId, frameSize.Width, frameSize.Height);
this.frameSize = new CvSize(vi.GetWidth(deviceId), vi.GetHeight(deviceId));
sum = new IplImage(this.frameSize, BitDepth.F32, 3);
tmp = new IplImage(this.frameSize, BitDepth.U8, 3);
}
示例10: ImageProcessingRoutine
/// <summary>
/// Конструктор
/// </summary>
/// <param name="deviceId">ID камеры которая будет использоваться для получения изображения</param>
/// <param name="frameSize">Размер изображения которое будет обрабатываться</param>
public ImageProcessingRoutine(int deviceId, CvSize frameSize)
{
Camera = new WebCam(deviceId, frameSize);
Calibrator = new CameraCalibrator(Camera.FrameSize);
Finder = new ContoursFinder(Camera.FrameSize);
Transformer = new CoordinatesTransformer();
routineThread = new Thread(routine);
routineThread.IsBackground = true;
routineThread.Start();
}
示例11: CvSize
private void OnClick_csv出力(object sender, EventArgs e)
{
if (合成画像 != null)
{
string 結果 = "";
int x,y;
int roi_w = 9;
int roi_h = 9;
CvSize roiSize = new CvSize(roi_w, roi_h);
CvPoint roiPoint;
for (x = 0; x < 合成画像.Width - roi_w; x++)
{
System.Diagnostics.Debug.WriteLine(x + "\n" + 結果);
for (y = 0; y < 合成画像.Height - roi_h; y++)
{
string buff = "";
string type = 検査領域か判断(x,y,roi_w,roi_h);
if (type != "")//ちょっと高速化
{
roiPoint = new CvPoint(x, y);
Cv.SetImageROI(検査対象, new CvRect(roiPoint, roiSize));
if (type == "1") buff = csvフォーマットを取得(検査対象, roiSize, "1");
else if (type == "0") buff = csvフォーマットを取得(検査対象, roiSize, "0");
}
//if (checkBox_all.Checked)
//{
// roiPoint = new CvPoint(x, y);
// Cv.SetImageROI(検査対象, new CvRect(roiPoint, roiSize));
// Cv.SetImageROI(マスク画像, new CvRect(roiPoint, roiSize));
// if (マスク画像.Avg().Val0 == 0) buff = csvフォーマットを取得(検査対象, roiSize, "0");
// else if (マスク画像.Avg().Val0 == 255) buff = csvフォーマットを取得(検査対象, roiSize, "1");
//}
//else if (checkBox_black.Checked)
//{
// if (マスク画像.Avg().Val0 == 0) buff = csvフォーマットを取得(検査対象, roiSize, "0");
//}
//else
//{
// if (マスク画像.Avg().Val0 == 255) buff = csvフォーマットを取得(検査対象, roiSize, "1");
//}
if(buff!="")結果 += buff + "\n";
Cv.ResetImageROI(マスク画像);
Cv.ResetImageROI(検査対象);
}
}
stringをcsv出力(結果,DateTime.Now.ToString("yy-MM-dd_")+this.Text);
}
}
示例12: GetRandomPoints
private CvPoint2D32f[] GetRandomPoints(int count, CvSize imageSize)
{
Random rand = new Random();
CvPoint2D32f[] points = new CvPoint2D32f[count];
double a = rand.NextDouble() + 0.5;
for (int i = 0; i < points.Length; i++)
{
double x = rand.Next(imageSize.Width);
double y = (x * a) + (rand.Next(100) - 50);
points[i] = new CvPoint2D32f(x, y);
}
return points;
}
示例13: GetOverlapLocation
private Point GetOverlapLocation(Bitmap screan)
{
IplImage ipltemplate = BitmapConverter.ToIplImage(template);
IplImage iplScrean = BitmapConverter.ToIplImage(screan);
CvSize resSize = new CvSize(iplScrean.Width - ipltemplate.Width + 1,
iplScrean.Height - ipltemplate.Height + 1);
IplImage resImg = Cv.CreateImage(resSize, BitDepth.F32, 1);
Cv.MatchTemplate(iplScrean, ipltemplate, resImg, MatchTemplateMethod.CCorrNormed);
double minVal;
double maxVal;
CvPoint minLoc;
CvPoint maxLoc;
Cv.MinMaxLoc(resImg, out minVal, out maxVal, out minLoc, out maxLoc);
return maxVal >= 0.99 ? new Point(maxLoc.X, maxLoc.Y) : new Point(0, 0);
}
示例14: Start
/* ------------------------- */
// Use this for initialization
void Start()
{
// 変数宣言
int x_window = GlobalVar.CAMERA_WIDTH;
int y_window = GlobalVar.CAMERA_HEIGHT;
// カメラデバイス選択、設定
cam1.setDevice(0); // 水平
cam2.setDevice(1); // 垂直
// HSV画像初期化
CvSize WINDOW_SIZE = new CvSize(x_window, y_window);
h_img1 = Cv.CreateImage(WINDOW_SIZE, BitDepth.U8, 3);
h_img2 = Cv.CreateImage(WINDOW_SIZE, BitDepth.U8, 3);
// データ格納用配列の初期化
int x = x_window / GlobalVar.POINT_INTERVAL;
int y = y_window / GlobalVar.POINT_INTERVAL;
int z = y_window / GlobalVar.POINT_INTERVAL;
hps_arr = new int[y, x];
vps_arr = new int[z, x];
ps_arr3D = new int[x, y, z];
pl_arrXZ = new double[GlobalVar.VERTICE_NUM / 2, 2];
pl_arrY = new double[2];
io_flag = new int[x, y, z]; // 外部(遠):0 外部(近):1 内部:2
// 3Dポリゴン指定
refObj = GameObject.Find("Object");
polygon = refObj.GetComponent<CreatePolygonMesh>();
polygon.Init();
// 観測点データ初期化
init3DArr(ps_arr3D);
initMFlag(io_flag);
// 図形と観測点の内部外部判定を行う
polygon.getIODMonitoringPoint(io_flag);
/* デバッグ用(FPS) */
frameCount = 0;
prevTime = 0.0f;
/* ------------------------- */
}
示例15: Start
/* ------------------ */
// Use this for initialization
void Start()
{
// カメラデバイスの選択、設定
cam.setDevice(index);
// HSV用画像の初期化
CvSize WINDOW_SIZE = new CvSize(GlobalVar.CAMERA_WIDTH, GlobalVar.CAMERA_HEIGHT);
h_img = Cv.CreateImage(WINDOW_SIZE, BitDepth.U8, 3);
// データ格納用配列の初期化
ps_arr = new int[GlobalVar.CAMERA_HEIGHT / GlobalVar.POINT_INTERVAL, GlobalVar.CAMERA_WIDTH / GlobalVar.POINT_INTERVAL];
/* デバッグ用 */
CvSize D_WINDOW_SIZE = new CvSize(GlobalVar.CAMERA_WIDTH, GlobalVar.CAMERA_HEIGHT);
d_img = Cv.CreateImage(D_WINDOW_SIZE, BitDepth.U8, 3);
texture = new Texture2D(GlobalVar.CAMERA_WIDTH, GlobalVar.CAMERA_HEIGHT, TextureFormat.RGB24, false);
GetComponent<Renderer>().material.mainTexture = texture;
/* ------------------ */
}