本文整理汇总了C#中Seq类的典型用法代码示例。如果您正苦于以下问题:C# Seq类的具体用法?C# Seq怎么用?C# Seq使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Seq类属于命名空间,在下文中一共展示了Seq类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Emit
public void Emit()
{
if (Trace.Flavor == TraceFlavor.Remainder)
{
foreach (var kv in Trace.AssemblyMap)
{
var compiler = new AssemblyCompiler(this, kv.Value);
compiler.Emit(null);
}
}
else
{
var rootEnv = Env.Global.Environment();
var body = new Seq<JST.Statement>();
body.Add(JST.Statement.Var(RootId, new JST.Identifier(Env.Root).ToE()));
foreach (var nm in rootEnv.AllLoadedAssembliesInLoadOrder().Where(Trace.AssemblyMap.ContainsKey))
{
var compiler = new AssemblyCompiler(this, Trace.AssemblyMap[nm]);
compiler.Emit(body);
}
var program = new JST.Program
(new JST.Statements
(new JST.ExpressionStatement
(new JST.StatementsPseudoExpression(new JST.Statements(body), null))));
var fileName = Path.Combine(Env.OutputDirectory, Trace.Name + ".js");
program.ToFile(fileName, Env.PrettyPrint);
Env.Log(new GeneratedJavaScriptFile("trace '" + Trace.Name + "'", fileName));
}
}
示例2: HOGDescriptor
/// <summary>
/// Create a new HOGDescriptor using the specific parameters
/// </summary>
public HOGDescriptor(
Size winSize,
Size blockSize,
Size blockStride,
Size cellSize,
int nbins,
int derivAperture,
double winSigma,
double L2HysThreshold,
bool gammaCorrection)
{
_ptr = CvHOGDescriptorCreate(
ref winSize,
ref blockSize,
ref blockStride,
ref cellSize,
nbins,
derivAperture,
winSigma,
0,
L2HysThreshold,
gammaCorrection);
_rectStorage = new MemStorage();
_rectSeq = new Seq<Rectangle>(_rectStorage);
}
示例3: BuildTypeExpression
// Complete a first-kinded type structure. If type definition is higher kinded, this will
// complete an instance of the type at the type arguments. Otherwise, this will complete
// the type definition itself.
private void BuildTypeExpression(Seq<JST.Statement> body, JST.Expression lhs)
{
TypeCompEnv.BindUsage(body, CollectPhase1Usage(), TypePhase.Id);
// TODO: Replace with prototype
body.Add(JST.Statement.DotCall(RootId.ToE(), Constants.RootSetupTypeDefaults, TypeId.ToE()));
EmitBaseAndSupertypes(body, lhs);
EmitDefaultConstructor(body, lhs);
EmitMemberwiseClone(body, lhs);
EmitClone(body, lhs);
EmitDefaultValue(body, lhs);
EmitStaticMethods(body, lhs);
EmitConstructObjectAndInstanceMethods(body, lhs);
EmitVirtualAndInterfaceMethodRedirectors(body, lhs);
EmitSetupType(body, lhs);
EmitUnbox(body, lhs);
EmitBox(body, lhs);
EmitUnboxAny(body, lhs);
EmitConditionalDeref(body, lhs);
EmitIsValue(body, lhs);
EmitEquals(body, lhs);
EmitHash(body, lhs);
EmitInterop(body, lhs);
}
示例4: GpuHOGDescriptor
/// <summary>
/// Create a new HOGDescriptor using the specific parameters
/// </summary>
/// <param name="blockSize">Block size in cells. Only (2,2) is supported for now.</param>
/// <param name="cellSize">Cell size. Only (8, 8) is supported for now.</param>
/// <param name="blockStride">Block stride. Must be a multiple of cell size.</param>
/// <param name="gammaCorrection">Do gamma correction preprocessing or not.</param>
/// <param name="L2HysThreshold">L2-Hys normalization method shrinkage.</param>
/// <param name="nbins">Number of bins. Only 9 bins per cell is supported for now.</param>
/// <param name="nLevels">Maximum number of detection window increases.</param>
/// <param name="winSigma">Gaussian smoothing window parameter.</param>
/// <param name="winSize">Detection window size. Must be aligned to block size and block stride.</param>
public GpuHOGDescriptor(
Size winSize,
Size blockSize,
Size blockStride,
Size cellSize,
int nbins,
double winSigma,
double L2HysThreshold,
bool gammaCorrection,
int nLevels)
{
_ptr = gpuHOGDescriptorCreate(
ref winSize,
ref blockSize,
ref blockStride,
ref cellSize,
nbins,
winSigma,
L2HysThreshold,
gammaCorrection,
nLevels);
_rectStorage = new MemStorage();
_rectSeq = new Seq<Rectangle>(_rectStorage);
}
示例5: HOGDescriptor
/// <summary>
/// Create a new HOGDescriptor
/// </summary>
public HOGDescriptor()
{
_ptr = CvHOGDescriptorCreateDefault();
_rectStorage = new MemStorage();
_rectSeq = new Seq<Rectangle>(_rectStorage);
_vector = new VectorOfFloat();
}
示例6: GetDefaultPeopleDetector
/// <summary>
/// Return the default people detector
/// </summary>
/// <returns>the default people detector</returns>
public static float[] GetDefaultPeopleDetector()
{
using (MemStorage stor = new MemStorage())
{
Seq<float> desc = new Seq<float>(stor);
CvHOGDescriptorPeopleDetectorCreate(desc);
return desc.ToArray();
}
}
示例7: DetectKeyPoints
/// <summary>
/// Detect STAR key points from the image
/// </summary>
/// <param name="image">The image to extract key points from</param>
/// <returns>The STAR key points of the image</returns>
public MKeyPoint[] DetectKeyPoints(Image<Gray, Byte> image)
{
using (MemStorage stor = new MemStorage())
{
Seq<MKeyPoint> seq = new Seq<MKeyPoint>(stor);
CvStarDetectorDetectKeyPoints(ref this, image, seq.Ptr);
return seq.ToArray();
}
}
示例8: HoughLineTransform
/// <summary>
/// Hough Line Transform, as in OpenCV (EmguCv does not wrap this function as it should be)
/// </summary>
/// <param name="img">Binary image</param>
/// <param name="type">type of hough transform</param>
/// <param name="threshold">how many votes is needed to accept line</param>
/// <returns>Lines in theta/rho format</returns>
public static PointF[] HoughLineTransform(Image<Gray, byte> img, Emgu.CV.CvEnum.HOUGH_TYPE type, int threshold)
{
using (MemStorage stor = new MemStorage())
{
IntPtr linePtr = CvInvoke.cvHoughLines2(img, stor.Ptr, type, 5, Math.PI / 180 * 15, threshold, 0, 0);
Seq<PointF> seq = new Seq<PointF>(linePtr, stor);
return seq.ToArray(); ;
}
}
示例9: GetModelPoints
/// <summary>
/// Get the model points stored in this detector
/// </summary>
/// <returns>The model points stored in this detector</returns>
public MKeyPoint[] GetModelPoints()
{
using (MemStorage stor = new MemStorage())
{
Seq<MKeyPoint> modelPoints = new Seq<MKeyPoint>(stor);
CvPlanarObjectDetectorGetModelPoints(_ptr, modelPoints);
return modelPoints.ToArray();
}
}
示例10: Detect
/// <summary>
/// Detect planar object from the specific image
/// </summary>
/// <param name="image">The image where the planar object will be detected</param>
/// <param name="h">The homography matrix which will be updated</param>
/// <returns>The four corners of the detected region</returns>
public PointF[] Detect(Image<Gray, Byte> image, HomographyMatrix h)
{
using (MemStorage stor = new MemStorage())
{
Seq<PointF> corners = new Seq<PointF>(stor);
CvPlanarObjectDetectorDetect(_ptr, image, h, corners);
return corners.ToArray();
}
}
示例11: DetectKeyPoints
/// <summary>
/// Detect the Fast keypoints from the image
/// </summary>
/// <param name="image">The image to extract keypoints from</param>
/// <returns>The array of fast keypoints</returns>
public MKeyPoint[] DetectKeyPoints(Image<Gray, byte> image)
{
using (MemStorage stor = new MemStorage())
{
Seq<MKeyPoint> keypoints = new Seq<MKeyPoint>(stor);
CvInvoke.CvFASTKeyPoints(image, keypoints, Threshold, NonmaxSupression);
return keypoints.ToArray();
}
}
示例12: DetectKeyPoints
/// <summary>
/// Detect the Lepetit keypoints from the image
/// </summary>
/// <param name="image">The image to extract Lepetit keypoints</param>
/// <param name="maxCount">The maximum number of keypoints to be extracted</param>
/// <param name="scaleCoords">Indicates if the coordinates should be scaled</param>
/// <returns>The array of Lepetit keypoints</returns>
public MKeyPoint[] DetectKeyPoints(Image<Gray, Byte> image, int maxCount, bool scaleCoords)
{
using (MemStorage stor = new MemStorage())
{
Seq<MKeyPoint> seq = new Seq<MKeyPoint>(stor);
CvLDetectorDetectKeyPoints(ref this, image, seq.Ptr, maxCount, scaleCoords);
return seq.ToArray();
}
}
示例13: DetectMultiScale
/// <summary>
/// Finds rectangular regions in the given image that are likely to contain objects the cascade has been trained for and returns those regions as a sequence of rectangles.
/// The function scans the image several times at different scales. Each time it considers overlapping regions in the image.
/// It may also apply some heuristics to reduce number of analyzed regions, such as Canny prunning.
/// After it has proceeded and collected the candidate rectangles (regions that passed the classifier cascade), it groups them and returns a sequence of average rectangles for each large enough group.
/// </summary>
/// <param name="image">The image where the objects are to be detected from</param>
/// <param name="scaleFactor">The factor by which the search window is scaled between the subsequent scans, for example, 1.1 means increasing window by 10%</param>
/// <param name="minNeighbors">Minimum number (minus 1) of neighbor rectangles that makes up an object. All the groups of a smaller number of rectangles than min_neighbors-1 are rejected. If min_neighbors is 0, the function does not any grouping at all and returns all the detected candidate rectangles, which may be useful if the user wants to apply a customized grouping procedure</param>
/// <param name="minSize">Minimum window size. Use Size.Empty for default, where it is set to the size of samples the classifier has been trained on (~20x20 for face detection)</param>
/// <param name="maxSize">Maxumum window size. Use Size.Empty for default, where the parameter will be ignored.</param>
/// <returns>The objects detected, one array per channel</returns>
public Rectangle[] DetectMultiScale(Image<Gray, Byte> image, double scaleFactor, int minNeighbors, Size minSize, Size maxSize)
{
using (MemStorage stor = new MemStorage())
{
Seq<Rectangle> rectangles = new Seq<Rectangle>(stor);
CvInvoke.CvCascadeClassifierDetectMultiScale(_ptr, image, rectangles, scaleFactor, minNeighbors, 0, minSize, maxSize);
return rectangles.ToArray();
}
}
示例14: Detect
/// <summary>
/// Find rectangular regions in the given image that are likely to contain objects and corresponding confidence levels
/// </summary>
/// <param name="image">The image to detect objects in</param>
/// <param name="overlapThreshold">Threshold for the non-maximum suppression algorithm, Use default value of 0.5</param>
/// <returns>Array of detected objects</returns>
public MCvObjectDetection[] Detect(Image<Bgr, Byte> image, float overlapThreshold)
{
using (MemStorage stor = new MemStorage())
{
IntPtr seqPtr = CvInvoke.cvLatentSvmDetectObjects(image, Ptr, stor, overlapThreshold, -1);
if (seqPtr == IntPtr.Zero)
return new MCvObjectDetection[0];
Seq<MCvObjectDetection> seq = new Seq<MCvObjectDetection>(seqPtr, stor);
return seq.ToArray();
}
}
示例15: ConvexHull
/// <summary>
/// Finds convex hull of 2D point set using Sklansky's algorithm
/// </summary>
/// <param name="points">The points to find convex hull from</param>
/// <param name="storage">the storage used by the resulting sequence</param>
/// <param name="orientation">The orientation of the convex hull</param>
/// <returns>The convex hull of the points</returns>
public static Seq<PointF> ConvexHull(PointF[] points, MemStorage storage, CvEnum.ORIENTATION orientation)
{
IntPtr seq = Marshal.AllocHGlobal(StructSize.MCvSeq);
IntPtr block = Marshal.AllocHGlobal(StructSize.MCvSeqBlock);
GCHandle handle = GCHandle.Alloc(points, GCHandleType.Pinned);
CvInvoke.cvMakeSeqHeaderForArray(
CvInvoke.CV_MAKETYPE((int)CvEnum.MAT_DEPTH.CV_32F, 2),
StructSize.MCvSeq,
StructSize.PointF,
handle.AddrOfPinnedObject(),
points.Length,
seq,
block);
Seq<PointF> convexHull = new Seq<PointF>(CvInvoke.cvConvexHull2(seq, storage.Ptr, orientation, 1), storage);
handle.Free();
Marshal.FreeHGlobal(seq);
Marshal.FreeHGlobal(block);
return convexHull;
}