本文整理匯總了Java中java.nio.FloatBuffer.wrap方法的典型用法代碼示例。如果您正苦於以下問題:Java FloatBuffer.wrap方法的具體用法?Java FloatBuffer.wrap怎麽用?Java FloatBuffer.wrap使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類java.nio.FloatBuffer
的用法示例。
在下文中一共展示了FloatBuffer.wrap方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: processDvsFrame
import java.nio.FloatBuffer; //導入方法依賴的package包/類
@Override
public float[] processDvsFrame(DvsFramer.DvsFrame frame) {
FloatBuffer b = FloatBuffer.wrap(frame.getImage());
float[] results = executeDvsFrameGraph(b, frame.getWidth(), frame.getHeight());
return results;
}
示例2: executeDvsFrameGraph
import java.nio.FloatBuffer; //導入方法依賴的package包/類
/**
* Executes the stored Graph of the CNN.
*
* //https://github.com/tensorflow/tensorflow/blob/master/tensorflow/java/src/main/java/org/tensorflow/op/Operands.java
* // https://github.com/tensorflow/tensorflow/issues/7149
* https://stackoverflow.com/questions/44774234/why-tensorflow-uses-channel-last-ordering-instead-of-row-major
*
* @param pixbuf the pixel buffer holding the frame, as collected from
* DVSFramer in DVSFrame.
*
* @param width width of image
* @param height height of image
* @return activations of output
*/
private float[] executeDvsFrameGraph(FloatBuffer pixbuf, int width, int height) {
// final float mean = processor.getImageMean(), scale = processor.getImageScale();
final int numChannels = processor.isMakeRGBFrames() ? 3 : 1;
inputLayer = new InputLayer(width, height, numChannels); // TODO hack since we don't know the input size yet until network runs
// TODO super hack brute force to flip image vertically because tobi cannot see how to flip an image in TensorFlow.
// Also, make RGB frame from gray dvs image by cloning the gray value to each channel in WHC order
final float[] origarray = pixbuf.array();
FloatBuffer flipped = FloatBuffer.allocate(pixbuf.limit() * numChannels);
final float[] flippedarray = flipped.array();
// prepare rgb scaling factors to make RGB channels from grayscale. each channel has different weighting
float[] rgb = null;
if (processor.isMakeRGBFrames()) {
rgb = new float[]{1, 1, 1};
} else {
rgb = new float[]{1};
}
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
final int origIdx = x + width * y;
for (int c = 0; c < numChannels; c++) {
final int newIdx = c + numChannels * (x + (width * (height - y - 1)));
flippedarray[newIdx] = ((origarray[origIdx] * rgb[c]));
}
}
}
flipped = FloatBuffer.wrap(flippedarray);
try (Tensor<Float> imageTensor = Tensor.create(new long[]{1, height, width, numChannels}, flipped);) { // use NHWC order according to last post above
// int numElements = imageTensor.numElements();
// long[] shape = imageTensor.shape();
float[] output = TensorFlow.executeGraph(executionGraph, imageTensor, processor.getInputLayerName(), processor.getOutputLayerName());
outputLayer = new OutputLayer(output);
getSupport().firePropertyChange(EVENT_MADE_DECISION, null, this);
return output;
} catch (IllegalArgumentException ex) {
StringBuilder msg = new StringBuilder("<html>Caught exception <p>" + ex.toString() + "<p>Did you set inputLayerName and outputLayerName in the property group <i>2. Analysis</i>?</p>");
msg.append("<p>The IO layer names could be as follows (the string inside the single quotes):</p> <ul> ");
for (String s : ioLayers) {
msg.append("<li>" + (s.replaceAll("<", "").replaceAll(">", "")) + "</li>");
}
msg.append("</ul>");
JOptionPane.showMessageDialog(processor.getChip().getAeViewer(), msg.toString(),
"Error computing network", JOptionPane.WARNING_MESSAGE);
throw new IllegalArgumentException(ex.getCause());
}
}
示例3: render
import java.nio.FloatBuffer; //導入方法依賴的package包/類
@SuppressWarnings("static-access")
protected void render(GL2 gl2) {
if (use_draw_pixels_ || view_width_ == 0 || view_height_ == 0) {
return;
}
assert (initialized_context_ != null);
final float[] vertex_data = {
//tu, tv, x, y, z
0.0f, 1.0f, -1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 1.0f, 0.0f };
FloatBuffer vertices = FloatBuffer.wrap(vertex_data);
gl2.glClear(gl2.GL_COLOR_BUFFER_BIT | gl2.GL_DEPTH_BUFFER_BIT);
gl2.glMatrixMode(gl2.GL_MODELVIEW);
gl2.glLoadIdentity();
// Match GL units to screen coordinates.
gl2.glViewport(0, 0, view_width_, view_height_);
gl2.glMatrixMode(gl2.GL_PROJECTION);
gl2.glLoadIdentity();
// Draw the background gradient.
gl2.glPushAttrib(gl2.GL_ALL_ATTRIB_BITS);
gl2.glBegin(gl2.GL_QUADS);
gl2.glColor4f(1.0f, 0.0f, 0.0f, 1.0f); // red
gl2.glVertex2f(-1.0f, -1.0f);
gl2.glVertex2f(1.0f, -1.0f);
gl2.glColor4f(0.0f, 0.0f, 1.0f, 1.0f); // blue
gl2.glVertex2f(1.0f, 1.0f);
gl2.glVertex2f(-1.0f, 1.0f);
gl2.glEnd();
gl2.glPopAttrib();
// Rotate the view based on the mouse spin.
if (spin_x_ != 0) {
gl2.glRotatef(-spin_x_, 1.0f, 0.0f, 0.0f);
}
if (spin_y_ != 0) {
gl2.glRotatef(-spin_y_, 0.0f, 1.0f, 0.0f);
}
if (transparent_) {
// Alpha blending style. Texture values have premultiplied alpha.
gl2.glBlendFunc(gl2.GL_ONE, gl2.GL_ONE_MINUS_SRC_ALPHA);
// Enable alpha blending.
gl2.glEnable(gl2.GL_BLEND);
}
// Enable 2D textures.
gl2.glEnable(gl2.GL_TEXTURE_2D);
// Draw the facets with the texture.
assert (texture_id_[0] != 0);
gl2.glBindTexture(gl2.GL_TEXTURE_2D, texture_id_[0]);
gl2.glInterleavedArrays(gl2.GL_T2F_V3F, 0, vertices);
gl2.glDrawArrays(gl2.GL_QUADS, 0, 4);
// Disable 2D textures.
gl2.glDisable(gl2.GL_TEXTURE_2D);
if (transparent_) {
// Disable alpha blending.
gl2.glDisable(gl2.GL_BLEND);
}
}
示例4: process
import java.nio.FloatBuffer; //導入方法依賴的package包/類
/**
* Process a batch of samples. Alternative interface if you prefer to work with arrays.
*
* @param factor resampling rate for this batch
* @param inBuffer array containing input samples in the range -1.0 to 1.0
* @param inBufferOffset offset into inBuffer at which to start processing
* @param inBufferLen number of valid elements in the inputBuffer
* @param lastBatch pass true if this is the last batch of samples
* @param outBuffer array to hold the resampled data
* @param outBufferOffset Offset in the output buffer.
* @param outBufferLen Output buffer length.
* @return the number of samples consumed and generated
*/
public Result process(double factor, float[] inBuffer, int inBufferOffset, int inBufferLen, boolean lastBatch, float[] outBuffer, int outBufferOffset, int outBufferLen) {
FloatBuffer inputBuffer = FloatBuffer.wrap(inBuffer, inBufferOffset, inBufferLen);
FloatBuffer outputBuffer = FloatBuffer.wrap(outBuffer, outBufferOffset, outBufferLen);
process(factor, inputBuffer, lastBatch, outputBuffer);
return new Result(inputBuffer.position() - inBufferOffset, outputBuffer.position() - outBufferOffset);
}
示例5: tensorFloat
import java.nio.FloatBuffer; //導入方法依賴的package包/類
/**
* Creates a TensorFlow Tensor containing data from the given float image.
* <p>
* Note that this does _not_ adjust any dimensions. This means that
* the resulting Tensor will have a shape corresponding to the reversed
* dimensions of the image.
* </p><p>
* Also note that this will use the backing RAI's primitive array when one is
* available. Otherwise a copy will be made.
* </p>
* @param image The image which should be put into the Tensor.
* @return A Tensor containing the data of the image.
*/
public static Tensor tensorFloat(
final RandomAccessibleInterval<FloatType> image)
{
final float[] value = floatArray(image);
FloatBuffer buffer = FloatBuffer.wrap(value);
return Tensor.create(shape(image), buffer);
}