本文整理汇总了C#中Windows.Storage.Streams.InMemoryRandomAccessStream.AsStreamForRead方法的典型用法代码示例。如果您正苦于以下问题:C# InMemoryRandomAccessStream.AsStreamForRead方法的具体用法?C# InMemoryRandomAccessStream.AsStreamForRead怎么用?C# InMemoryRandomAccessStream.AsStreamForRead使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Windows.Storage.Streams.InMemoryRandomAccessStream
的用法示例。
在下文中一共展示了InMemoryRandomAccessStream.AsStreamForRead方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: ConvertToGrayScale
public async Task<byte[]> ConvertToGrayScale(byte[] imageBytes, int height, int width)
{
using (InMemoryRandomAccessStream rasStream = new InMemoryRandomAccessStream())
{
await rasStream.WriteAsync(imageBytes.AsBuffer());
var decoder = await BitmapDecoder.CreateAsync(rasStream);
var pixelData = await decoder.GetPixelDataAsync();
var pixels = pixelData.DetachPixelData();
if (_filter == null)
_filter = new ImageFilter();
await _filter.ToGrayScale(pixels.AsBuffer());
BitmapEncoder encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.JpegEncoderId, rasStream);
encoder.SetPixelData(decoder.BitmapPixelFormat, BitmapAlphaMode.Ignore, (uint)width, (uint)height, decoder.DpiX, decoder.DpiY, pixels);
await encoder.FlushAsync();
using (BinaryReader br = new BinaryReader(rasStream.AsStreamForRead()))
{
rasStream.Seek(0);
return br.ReadBytes((int)rasStream.AsStreamForRead().Length);
}
}
}
示例2: LoadImages
public static async Task LoadImages()
{
for (int i = 0; i < NUM_ICON_SIZES; i++)
{
for (int j = 0; j < NUM_ICON_TYPES; j++)
{
string postfix;
if (j >= 1 && j <= 8)
postfix = "BusDirection" + ((StopDirection)j).ToString();
else if (j == 0)
postfix = "BusBase";
else
postfix = j == 9 ? "BusAlert" : "BusClosed";
postfix += (i == 0 ? "20" : "40");
var sprite = new Sprite() { ImageUri = new Uri($"ms-appx:///Assets/Icons/{postfix}.png") };
await sprite.Load();
sprite.Unlock();
//var buffer1 = new SpriteBitmapStream(sprite).GetFullBuffer();
//using (MemoryStream stream1 = new MemoryStream(buffer1, true))
//{
// BusIconStreams[i * NUM_ICON_TYPES + j] = stream1.AsRandomAccessStream();
//}
//var bitmap = await WriteableBitmapExtensions.FromContent(null, new Uri($"ms-appx:///Assets/Icons/{postfix}.png"));
InMemoryRandomAccessStream stream = new InMemoryRandomAccessStream();
await sprite.Bitmap.ToStream(stream, BitmapEncoder.BmpEncoderId);
BusIconStreams[i * NUM_ICON_TYPES + j] = stream;
byte[] buffer = new byte[stream.Size];
stream.AsStreamForRead().Read(buffer, 0, buffer.Length);
for (int k = 0; k < buffer.Length; k++)
{
//if (buffer[k] != buffer1[k])
//{
// //System.Diagnostics.Debug.WriteLine($"{buffer[k]} vs {buffer1[k]}");
//}
}
}
}
}
示例3: CompressBitmap
public Stream CompressBitmap(byte[] data, int w, int h)
{
var memStream = new InMemoryRandomAccessStream();
Encode(data, (uint)w, (uint)h, memStream);
var stream = memStream.AsStreamForRead();
Logger($"Log Encode stream for bugfix:{memStream.Size} => {stream.Length} bytes");
return stream;
}
示例4: RunControlLoopAsync
/// <summary>
/// This is an infinite loop which
/// takes a picture with the attached camera,
/// displays it,
/// sends it for recognition to the Microsoft Project Oxford Face API,
/// displays recognition results overlaid on the picture,
/// waits for 5 seconds to allow the result to be examined,
/// starts over.
/// </summary>
private async Task RunControlLoopAsync()
{
while (true)
{
// Take camera picture
await UpdateStatusAsync("Taking still picture...");
// TODO focus if possible
//await mediaCapture.VideoDeviceController.FocusControl.FocusAsync();
FaceResultsGrid.Children.Clear();
CountdownProgressBar.Value = 100;
CameraFlashStoryboard.Begin();
using (var stream = new InMemoryRandomAccessStream())
{
var imageEncodingProperties = ImageEncodingProperties.CreatePng();
imageEncodingProperties.Width = 320;
imageEncodingProperties.Height = 200;
await mediaCapture.CapturePhotoToStreamAsync(imageEncodingProperties, stream);
// Display camera picture
await UpdateStatusAsync("Displaying sample picture...");
stream.Seek(0);
var bitmapImage = new BitmapImage();
await bitmapImage.SetSourceAsync(stream);
ResultImage.Source = bitmapImage;
// Send picture for recognition
// We need to encode the raw image as a JPEG to make sure the service can recognize it.
await UpdateStatusAsync("Uploading picture to Microsoft Project Oxford Face API...");
stream.Seek(0);
var recognizedFaces = await GetFaces(stream.AsStreamForRead());
// Display recognition results
// Wait a few seconds seconds to give viewers a chance to appreciate all we've done
await UpdateStatusAsync($"{recognizedFaces.Count()} face(s) found by Microsoft 'Project Oxford' Face API");
// The face rectangles received from Face API are measured in pixels of the raw image.
// We need to calculate the extra scaling and displacement that results from the raw image
// being displayed in a larger container.
// We use the FaceResultsGrid as a basis for the calculation, because the ResultImage control's ActualHeight and ActualWidth
// properties have the same aspect ratio as the image, and not the aspect ratio of the screen.
double widthScaleFactor = FaceResultsGrid.ActualWidth / bitmapImage.PixelWidth;
double heightScaleFactor = FaceResultsGrid.ActualHeight / bitmapImage.PixelHeight;
double scaleFactor = Math.Min(widthScaleFactor, heightScaleFactor);
bool isTheBlackSpaceOnTheLeft = widthScaleFactor > heightScaleFactor;
double extraLeftNeeded = 0;
double extraTopNeeded = 0;
if (isTheBlackSpaceOnTheLeft) extraLeftNeeded = (FaceResultsGrid.ActualWidth - scaleFactor * bitmapImage.PixelWidth) / 2;
else extraTopNeeded = (FaceResultsGrid.ActualHeight - scaleFactor * bitmapImage.PixelHeight) / 2;
foreach (var face in recognizedFaces)
{
var faceOutlineRectangleLeft = extraLeftNeeded + scaleFactor * face.FaceRectangle.Left;
var faceOutlineRectangleTop = extraTopNeeded + scaleFactor * face.FaceRectangle.Top;
var faceOutlineRectangleHeight = scaleFactor * face.FaceRectangle.Height;
var faceOutlineRectangleWidth = scaleFactor * face.FaceRectangle.Width;
Rectangle faceOutlineRectangle = new Rectangle();
faceOutlineRectangle.Stroke = new SolidColorBrush(Colors.Black);
faceOutlineRectangle.StrokeThickness = 3;
faceOutlineRectangle.HorizontalAlignment = HorizontalAlignment.Left;
faceOutlineRectangle.VerticalAlignment = VerticalAlignment.Top;
faceOutlineRectangle.Margin = new Thickness(faceOutlineRectangleLeft, faceOutlineRectangleTop, 0, 0);
faceOutlineRectangle.Height = faceOutlineRectangleHeight;
faceOutlineRectangle.Width = faceOutlineRectangleWidth;
FaceResultsGrid.Children.Add(faceOutlineRectangle);
TextBlock faceInfoTextBlock = new TextBlock();
faceInfoTextBlock.Foreground = new SolidColorBrush(Colors.White);
faceInfoTextBlock.FontSize = 30;
faceInfoTextBlock.Text = $"{face.Attributes.Gender}, {face.Attributes.Age}";
Border faceInfoBorder = new Border();
faceInfoBorder.Background = new SolidColorBrush(Colors.Black);
faceInfoBorder.Padding = new Thickness(5);
faceInfoBorder.Child = faceInfoTextBlock;
faceInfoBorder.HorizontalAlignment = HorizontalAlignment.Left;
faceInfoBorder.VerticalAlignment = VerticalAlignment.Top;
faceInfoBorder.Margin = new Thickness(faceOutlineRectangleLeft, faceOutlineRectangleTop - 50, 0, 0);
FaceResultsGrid.Children.Add(faceInfoBorder);
TextBlock carInfoTextBlock = new TextBlock();
carInfoTextBlock.Foreground = new SolidColorBrush(Colors.White);
carInfoTextBlock.FontSize = 30;
carInfoTextBlock.Text = GetCarRecommendation(face.Attributes.Gender, (int)face.Attributes.Age);
Border carInfoBorder = new Border();
carInfoBorder.Background = new SolidColorBrush(Colors.Black);
carInfoBorder.Padding = new Thickness(5);
carInfoBorder.Child = carInfoTextBlock;
//.........这里部分代码省略.........
示例5: AnalyzeButton_Click
private async void AnalyzeButton_Click(object sender, RoutedEventArgs e)
{
if (processingImage)
{
// Ignore button presses while processing the image
return;
}
if (inCaptureState)
{
processingImage = true;
inCaptureState = false;
// Make the 'Processing...' label visible
canvasControl.Visibility = Visibility.Visible;
AnalyzeButton.Content = "...";
canvasControl.Invalidate();
var originalPhoto = new InMemoryRandomAccessStream();
var reencodedPhoto = new InMemoryRandomAccessStream();
await mediaCapture.CapturePhotoToStreamAsync(ImageEncodingProperties.CreateJpeg(), originalPhoto);
await originalPhoto.FlushAsync();
originalPhoto.Seek(0);
captureElement.Visibility = Visibility.Collapsed;
// Store the captured photo as a Win2D type for later use
photoCanvasBitmap = await CanvasBitmap.LoadAsync(canvasControl, originalPhoto);
// Send the photo to Project Oxford to detect the faces
lastCapturedFaces = await faceServiceClient.DetectAsync(originalPhoto.AsStreamForRead(), true, true, true, false);
// Force the canvasControl to be redrawn now that the photo is available
canvasControl.Invalidate();
processingImage = false;
AnalyzeButton.Content = "Restart";
}
else
{
canvasControl.Visibility = Visibility.Collapsed;
captureElement.Visibility = Visibility.Visible;
AnalyzeButton.Content = "Capture Photo";
photoCanvasBitmap = null;
canvasControl.Invalidate();
inCaptureState = true;
}
}
示例6: UploadImage
/// <summary>
/// Uploads a photo as a WriteableBitmap. This methods converts the given bitmap to a PNG file before sending it to the server.
/// </summary>
/// <param name="uri"></param>
/// <param name="bmp"></param>
/// <returns></returns>
public static async Task UploadImage(string uri, WriteableBitmap bmp)
{
InMemoryRandomAccessStream memoryStream = new InMemoryRandomAccessStream();
BitmapEncoder encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.PngEncoderId, memoryStream);
encoder.SetPixelData(BitmapPixelFormat.Bgra8, BitmapAlphaMode.Premultiplied, (uint)bmp.PixelWidth, (uint)bmp.PixelHeight, 96, 96, bmp.PixelBuffer.ToArray());
await encoder.FlushAsync();
Stream stream = memoryStream.AsStreamForRead();
byte[] pngBuffer = new byte[stream.Length];
stream.Read(pngBuffer, 0, pngBuffer.Length);
await UploadImage(uri, pngBuffer);
}
示例7: EncodeAsync
/// <summary>
/// Encodes the specified bitmap data and outputs it to the specified
/// <c>BinaryWriter</c>. Bitmap data should be in BGRA format.
/// For internal use only.
/// </summary>
public async Task EncodeAsync(byte[] bytes, BinaryWriter writer)
{
#if NETFX_CORE
using (var jpegStream = new InMemoryRandomAccessStream())
{
var propertySet = new BitmapPropertySet();
var qualityValue = new BitmapTypedValue(this.JpegQuality / 100.0, PropertyType.Single);
propertySet.Add("ImageQuality", qualityValue);
var encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.JpegEncoderId, jpegStream, propertySet);
if (this.Width != this.OutputWidth)
{
encoder.BitmapTransform.ScaledWidth = (uint)this.OutputWidth;
encoder.BitmapTransform.ScaledHeight = (uint)this.OutputHeight;
}
encoder.SetPixelData(
BitmapPixelFormat.Bgra8,
BitmapAlphaMode.Straight,
(uint)this.Width,
(uint)this.Height,
96,
96,
bytes);
await encoder.FlushAsync();
if (writer.BaseStream == null || writer.BaseStream.CanWrite == false)
return;
// Header
writer.Write(this.OutputWidth);
writer.Write(this.OutputHeight);
writer.Write((int)jpegStream.Size);
// Data
jpegStream.AsStreamForRead().CopyTo(writer.BaseStream);
}
#else
await Task.Run(() =>
{
var format = PixelFormats.Bgra32;
int stride = (int)this.Width * format.BitsPerPixel / 8;
var bmp = BitmapSource.Create(
this.Width,
this.Height,
96.0,
96.0,
format,
null,
bytes,
stride);
BitmapFrame frame;
if (this.Width != this.OutputWidth || this.Height != this.OutputHeight)
{
var transform = new ScaleTransform((double)this.OutputHeight / this.Height, (double)this.OutputHeight / this.Height);
var scaledbmp = new TransformedBitmap(bmp, transform);
frame = BitmapFrame.Create(scaledbmp);
}
else
{
frame = BitmapFrame.Create(bmp);
}
var encoder = new JpegBitmapEncoder()
{
QualityLevel = this.JpegQuality
};
encoder.Frames.Add(frame);
using (var jpegStream = new MemoryStream())
{
encoder.Save(jpegStream);
if (writer.BaseStream == null || writer.BaseStream.CanWrite == false)
return;
// Header
writer.Write(this.OutputWidth);
writer.Write(this.OutputHeight);
writer.Write((int)jpegStream.Length);
// Data
jpegStream.Position = 0;
jpegStream.CopyTo(writer.BaseStream);
}
});
#endif
}
示例8: CaptureAndAnalyze
async Task<string> CaptureAndAnalyze(bool readText = false)
{
var imgFormat = ImageEncodingProperties.CreateJpeg();
//NOTE: this is how you can save a frame to the CameraRoll folder:
//var file = await KnownFolders.CameraRoll.CreateFileAsync($"MCS_Photo{DateTime.Now:HH-mm-ss}.jpg", CreationCollisionOption.GenerateUniqueName);
//await mediaCapture.CapturePhotoToStorageFileAsync(imgFormat, file);
//var stream = await file.OpenStreamForReadAsync();
// Capture a frame and put it to MemoryStream
var memoryStream = new MemoryStream();
using (var ras = new InMemoryRandomAccessStream())
{
await mediaCapture.CapturePhotoToStreamAsync(imgFormat, ras);
ras.Seek(0);
using (var stream = ras.AsStreamForRead())
stream.CopyTo(memoryStream);
}
var imageBytes = memoryStream.ToArray();
memoryStream.Position = 0;
if (withPreview)
{
InvokeOnMain(() =>
{
var image = new Image();
image.Load(new Urho.MemoryBuffer(imageBytes));
Node child = Scene.CreateChild();
child.Position = LeftCamera.Node.WorldPosition + LeftCamera.Node.WorldDirection * 2f;
child.LookAt(LeftCamera.Node.WorldPosition, Vector3.Up, TransformSpace.World);
child.Scale = new Vector3(1f, image.Height / (float)image.Width, 0.1f) / 10;
var texture = new Texture2D();
texture.SetData(image, true);
var material = new Material();
material.SetTechnique(0, CoreAssets.Techniques.Diff, 0, 0);
material.SetTexture(TextureUnit.Diffuse, texture);
var box = child.CreateComponent<Box>();
box.SetMaterial(material);
child.RunActions(new EaseBounceOut(new ScaleBy(1f, 5)));
});
}
try
{
var client = new VisionServiceClient(VisionApiKey);
if (readText)
{
var ocrResult = await client.RecognizeTextAsync(memoryStream, detectOrientation: false);
var words = ocrResult.Regions.SelectMany(region => region.Lines).SelectMany(line => line.Words).Select(word => word.Text);
return "it says: " + string.Join(" ", words);
}
else
{
// just describe the picture, you can also use cleint.AnalyzeImageAsync method to get more info
var result = await client.DescribeAsync(memoryStream);
return result?.Description?.Captions?.FirstOrDefault()?.Text;
}
}
catch (ClientException exc)
{
return exc?.Error?.Message ?? "Failed";
}
catch (Exception exc)
{
return "Failed";
}
}