本文整理汇总了C#中Image.Resize方法的典型用法代码示例。如果您正苦于以下问题:C# Image.Resize方法的具体用法?C# Image.Resize怎么用?C# Image.Resize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Image
的用法示例。
在下文中一共展示了Image.Resize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: FromImage
public static GalleryImageSource FromImage(Image image)
{
var imageSource = new GalleryImageSource();
if (image != null)
{
imageSource.OriginalImage = new Bitmap(image);
imageSource.BigImage = image.Resize(new Size((Int32)BigWidth, (Int32)BigHeight));
imageSource.SmallImage = image.Resize(new Size((Int32)SmallWidth, (Int32)SmallHeight));
imageSource.TinyImage = image.Resize(new Size((Int32)TinyWidth, (Int32)TinyHeight));
imageSource.XtraTinyImage = image.Resize(new Size((Int32)XtraTinyWidth, (Int32)XtraTinyHeight));
};
return imageSource;
}
示例2: ViewDidLoad
public override void ViewDidLoad()
{
base.ViewDidLoad();
ButtonText = "Detect Pedestrian";
OnButtonClick += delegate
{
long processingTime;
using (Image<Bgr, byte> image = new Image<Bgr, byte>("pedestrian.png"))
{
Rectangle[] pedestrians = FindPedestrian.Find(
image.Mat, false,
out processingTime
);
foreach (Rectangle rect in pedestrians)
{
image.Draw(rect, new Bgr(Color.Red), 1);
}
Size frameSize = FrameSize;
using (Image<Bgr, Byte> resized = image.Resize(frameSize.Width, frameSize.Height, Emgu.CV.CvEnum.Inter.Nearest, true))
{
MessageText = String.Format(
"Detection Time: {0} milliseconds.",
processingTime
);
SetImage(resized);
}
}
};
}
示例3: ResizeImage
public async Task<IActionResult> ResizeImage(ResizeImageUpload resizeImageUpload)
{
if (!ModelState.IsValid || resizeImageUpload.FormFile.Length == 0)
return View(resizeImageUpload);
var uploadDir = Path.Combine(_hostingEnvironment.WebRootPath, "uploads");
var fileName = ContentDispositionHeaderValue.Parse(resizeImageUpload.FormFile.ContentDisposition).FileName.Trim('"');
fileName = Path.GetFileNameWithoutExtension(fileName).Slugify() + Path.GetExtension(fileName);
var filePath = Path.Combine(uploadDir, fileName);
await resizeImageUpload.FormFile.SaveAsAsync(filePath);
using (var inputStream = System.IO.File.OpenRead(filePath))
{
var sw = new Stopwatch();
sw.Start();
var image = new Image(inputStream);
using (var outputStream = System.IO.File.OpenWrite(filePath))
{
image.Resize(resizeImageUpload.Width, resizeImageUpload.Height).Save(outputStream);
}
sw.Stop();
ViewBag.ProcessingTime = sw.Elapsed.TotalSeconds.ToString();
}
ViewBag.UploadedFilePath = "/uploads/" + fileName;
return View(resizeImageUpload);
}
示例4: GetBackdrop
private Image GetBackdrop(Image originalImage)
{
return originalImage.Resize(this.Settings.Width)
.Crop(this.Settings.Width, this.Settings.Height, this.Settings.VerticalCenterOfGravity)
.Decontrast(this.Settings.ContrastFactor, this.Settings.BrightnessFactor)
.AddOverlay(this.Settings.OverlayColor);
}
示例5: ViewDidLoad
public override void ViewDidLoad()
{
base.ViewDidLoad();
ButtonText = "Detect Stop Sign";
OnButtonClick +=
delegate
{
using (Image<Bgr, byte> stopSignModel = new Image<Bgr, byte>("stop-sign-model.png"))
using (Image<Bgr, Byte> image = new Image<Bgr, Byte>("stop-sign.jpg"))
{
Stopwatch watch = Stopwatch.StartNew(); // time the detection process
List<Image<Gray, Byte>> stopSignList = new List<Image<Gray, byte>>();
List<Rectangle> stopSignBoxList = new List<Rectangle>();
StopSignDetector detector = new StopSignDetector(stopSignModel);
detector.DetectStopSign(image, stopSignList, stopSignBoxList);
watch.Stop(); //stop the timer
foreach (Rectangle rect in stopSignBoxList)
{
image.Draw(rect, new Bgr(Color.Red), 2);
}
Size frameSize = FrameSize;
using (Image<Bgr, byte> resized = image.Resize(frameSize.Width, frameSize.Height, Emgu.CV.CvEnum.Inter.Cubic, true))
{
MessageText = String.Format("Detection time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);
SetImage(resized);
}
}
};
}
示例6: RecognizeUser
public int RecognizeUser(Image<Gray, byte> userImage)
{
faceRecognizer.Load(recognizerFilePath);
var result = faceRecognizer.Predict(userImage.Resize(100, 100, Inter.Cubic));
return result.Label;
}
示例7: PerformOCR
private void PerformOCR(string fileName)
{
//Read the image from file
Image<Gray, Byte> image = new Image<Gray, byte>(fileName);
fileNameTextBox.Text = fileName;
//Resize the image if it is too big, display it on the image box
int width = Math.Min(image.Width, imageBox1.Width);
int height = Math.Min(image.Height, imageBox1.Height);
imageBox1.Image = image.Resize(width, height, true);
//Perform OCR
Tesseract ocr = new Tesseract();
//You can download more language definition data from
//http://code.google.com/p/tesseract-ocr/downloads/list
//Languages supported includes:
//Dutch, Spanish, German, Italian, French and English
ocr.Init("eng", numericalOnlyCheckBox.Checked);
List<tessnet2.Word> result = ocr.DoOCR(image.Bitmap, Rectangle.Empty);
//Obtain the texts from OCR result
String[] texts = result.ConvertAll<String>(delegate(Word w) { return w.Text; }).ToArray();
//Display the text in the text box
textBox1.Text = String.Join(" ", texts);
}
示例8: LineDetectionFromFileTesting
public LineDetectionFromFileTesting()
{
viewer = new ImageViewer(); //create an image viewer
//Convert the image to grayscale and filter out the noise
// gray = new Image<Gray, Byte>("C:/RoboSub/RoboImagesTest2/92c.png");
fileImage = new Image<Bgr, Byte>(fileName);
fileImage = fileImage.Resize(300, 200, Emgu.CV.CvEnum.INTER.CV_INTER_AREA, true);
img = fileImage.Clone();
gray = img.Convert<Gray, Byte>();
// img = new Image<Bgr, Byte>("C:/RoboSub/RoboImagesTest2/92c.png");
viewer.Size = new Size(fileImage.Width * 3, fileImage.Height * 3);
Thread input = new Thread(getKeyboardInput);
input.Start();
Thread test = new Thread(testShapeDetection);
test.Start();
Application.Idle += new EventHandler(delegate(object sender, EventArgs e)
{
//testShapeDetection();
});
viewer.ShowDialog();
test.Abort();
input.Abort();
}
示例9: ColorSampleForm
public ColorSampleForm(Capture c)
{
InitializeComponent();
sampleImg = c.QueryFrame();
sampleImg = sampleImg.Resize(_frameWidth, _frameHeight, true); //resize while maintaining proportion.
sampleImageBox.Image = sampleImg;
}
示例10: Simple3DReconstruction
public Simple3DReconstruction()
{
InitializeComponent();
_left = new Image<Bgr, byte>("left.jpg");
Image<Bgr, Byte> right = new Image<Bgr, byte>("right.jpg");
Image<Gray, Int16> leftDisparityMap;
Computer3DPointsFromImages(_left.Convert<Gray, Byte>(), right.Convert<Gray, Byte>(), out leftDisparityMap, out _points);
//remove some depth outliers
for (int i = 0; i < _points.Length; i++)
{
if (Math.Abs(_points[i].z) >= 1000) _points[i].z = 0;
}
//Display the disparity map
imageBox1.Image = leftDisparityMap;
Osg.Geode geode = new Osg.Geode();
Osg.Geometry geometry = new Osg.Geometry();
int textureSize = 256;
//create and setup the texture
SetTexture(_left.Resize(textureSize, textureSize, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC), geode);
#region setup the vertices
Osg.Vec3Array vertices = new Osg.Vec3Array();
foreach (MCvPoint3D32f p in _points)
vertices.Add(new Osg.Vec3(-p.x, p.y, p.z));
geometry.setVertexArray(vertices);
#endregion
#region setup the primitive as point cloud
Osg.DrawElementsUInt draw = new Osg.DrawElementsUInt(
(uint)Osg.PrimitiveSet.Mode.POINTS, 0);
for (uint i = 0; i < _points.Length; i++)
draw.Add(i);
geometry.addPrimitiveSet(draw);
#endregion
#region setup the texture coordinates for the pixels
Osg.Vec2Array textureCoor = new Osg.Vec2Array();
foreach (MCvPoint3D32f p in _points)
textureCoor.Add(new Osg.Vec2(p.x / _left.Width + 0.5f, p.y / _left.Height + 0.5f));
geometry.setTexCoordArray(0, textureCoor);
#endregion
geode.addDrawable(geometry);
#region apply the rotation on the scene
Osg.MatrixTransform transform = new Osg.MatrixTransform(
Osg.Matrix.rotate(90.0 / 180.0 * Math.PI, new Osg.Vec3d(1.0, 0.0, 0.0)) *
Osg.Matrix.rotate(180.0 / 180.0 * Math.PI, new Osg.Vec3d(0.0, 1.0, 0.0)));
transform.addChild(geode);
#endregion
viewer3D.Viewer.setSceneData(transform);
viewer3D.Viewer.realize();
}
示例11: detect
public int detect(Image<Gray, byte> img)
{
if (img == null) return -1;
if (img.Height != 200 || img.Width != 200)
img = img.Resize(200, 200, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
return model.Predict(img).Label;
}
示例12: SetData
public void SetData(Image<Gray, byte> left, Image<Gray, byte> right)
{
Image<Gray, short> disparityMap;
Computer3DPointsFromStereoPair(left, right, out disparityMap, out _points);
//Display the disparity map
imageBox1.Image = disparityMap;
Osg.Geode geode = new Osg.Geode();
Osg.Geometry geometry = new Osg.Geometry();
int textureSize = 256;
//create and setup the texture
SetTexture(left.Resize(textureSize, textureSize, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC), geode);
#region setup the vertices, the primitive and the texture
Osg.Vec3Array vertices = new Osg.Vec3Array();
Osg.DrawElementsUInt draw = new Osg.DrawElementsUInt
((uint)Osg.PrimitiveSet.Mode.POINTS, 0);
Osg.Vec2Array textureCoor = new Osg.Vec2Array();
uint verticesCount = 0;
foreach (MCvPoint3D32f p in _points)
{
//skip the depth outliers
if (Math.Abs(p.z) < 1000)
{
vertices.Add(new Osg.Vec3(-p.x, p.y, p.z));
draw.Add(verticesCount);
textureCoor.Add(new Osg.Vec2(p.x / left.Width + 0.5f, p.y / left.Height + 0.5f));
verticesCount++;
}
}
geometry.setVertexArray(vertices);
geometry.addPrimitiveSet(draw);
geometry.setTexCoordArray(0, textureCoor);
#endregion
geode.addDrawable(geometry);
#region apply the rotation on the scene
//Osg.MatrixTransform transform = new Osg.MatrixTransform
// (Osg.Matrix.rotate(90.0 / 180.0 * Math.PI, new Osg.Vec3d(1.0, 0.0, 0.0)) *
// Osg.Matrix.rotate(180.0 / 180.0 * Math.PI, new Osg.Vec3d(0.0, 1.0, 0.0)));
//transform.addChild(geode);
#endregion
//viewer3D.Viewer.setSceneData(transform);
viewer3D.Viewer.realize();
}
示例13: ViewDidLoad
public override void ViewDidLoad()
{
base.ViewDidLoad();
RootElement root = Root;
root.UnevenRows = true;
UIImageView imageView = new UIImageView(View.Frame);
StringElement messageElement = new StringElement("");
StringElement licenseElement = new StringElement("");
root.Add(new Section()
{ new StyledStringElement("Process", delegate {
using (Image<Bgr, Byte> image = new Image<Bgr, Byte>("license-plate.jpg"))
{
LicensePlateDetector detector = new LicensePlateDetector(".");
Stopwatch watch = Stopwatch.StartNew(); // time the detection process
List<IInputOutputArray> licensePlateImagesList = new List<IInputOutputArray>();
List<IInputOutputArray> filteredLicensePlateImagesList = new List<IInputOutputArray>();
List<RotatedRect> licenseBoxList = new List<RotatedRect>();
List<string> words = detector.DetectLicensePlate(
image,
licensePlateImagesList,
filteredLicensePlateImagesList,
licenseBoxList);
watch.Stop(); //stop the timer
messageElement.Value = String.Format("{0} milli-seconds", watch.Elapsed.TotalMilliseconds);
StringBuilder builder = new StringBuilder();
foreach (String w in words)
builder.AppendFormat("{0} ", w);
licenseElement.Value = builder.ToString();
messageElement.GetImmediateRootElement().Reload(messageElement, UITableViewRowAnimation.Automatic);
licenseElement.GetImmediateRootElement().Reload(licenseElement, UITableViewRowAnimation.Automatic);
foreach (RotatedRect box in licenseBoxList)
{
image.Draw(box, new Bgr(Color.Red), 2);
}
Size frameSize = FrameSize;
using (Image<Bgr, byte> resized = image.Resize( frameSize.Width, frameSize.Height, Emgu.CV.CvEnum.Inter.Cubic, true))
{
imageView.Image = resized.ToUIImage();
imageView.Frame = new RectangleF(PointF.Empty, resized.Size);
}
imageView.SetNeedsDisplay();
ReloadData();
}
}
)});
root.Add(new Section("Recognition Time") {messageElement});
root.Add(new Section("License Plate") { licenseElement});
root.Add(new Section() {imageView});
}
示例14: TakePicture
public String TakePicture(string path) {
if (path == null) { return TakePicture(); }
if (File.Exists(path)) { File.Delete(path); }
var factor = Color.Width / ConfigManager.GetInstance().Find("debug.resize", Color.Width);
var frame = new Image<Bgra, Byte>(Color.Width, Color.Height);
frame.Bytes = Color.Pixels;
frame = frame.Resize(Color.Width / factor, Color.Height / factor, Emgu.CV.CvEnum.Inter.Cubic);
frame.Save(path);
return path;
}
示例15: getImageFromContours
private void getImageFromContours()
{
Image<Gray, Byte> imgCopy = new Image<Gray, Byte>(img.Size);
for (var i = 0; i < rects.Count; i++)
{
imgCopy = this.img.Copy();
imgCopy.ROI = this.rects[i];
imgCopy = imgCopy.Resize(50, 50, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR);
CvInvoke.cvShowImage("test", imgCopy);
this.images.Add(imgCopy);
}
}