本文整理汇总了C++中ofPixels::getData方法的典型用法代码示例。如果您正苦于以下问题:C++ ofPixels::getData方法的具体用法?C++ ofPixels::getData怎么用?C++ ofPixels::getData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ofPixels
的用法示例。
在下文中一共展示了ofPixels::getData方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: save
void ofxTurboJpeg::save(ofBuffer &buf, const ofPixels& pix, int jpegQuality)
{
int pitch = 0, flags = 0, jpegsubsamp = 0;
unsigned long size = 0;
if (pix.getImageType() == OF_IMAGE_COLOR)
{
int bpp = 3;
vector<unsigned char> buffer;
buffer.resize(pix.getWidth() * pix.getHeight() * bpp);
unsigned char * output = &buffer[0];
tjCompress(handleCompress, (unsigned char*)(pix.getData()), pix.getWidth(), pitch, pix.getHeight(), bpp, output, &size, jpegsubsamp, jpegQuality, flags);
buf.set((const char*)output, size);
}
else if (pix.getImageType() == OF_IMAGE_COLOR_ALPHA)
{
ofPixels p;
p.allocate(pix.getWidth(), pix.getHeight(), 3);
const unsigned char *src = pix.getData();
unsigned char *dst = p.getData();
int num = pix.getWidth() * pix.getHeight();
for (int i = 0; i < num; i++)
{
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
src += 4;
dst += 3;
}
save(buf, p, jpegQuality);
}
else if (pix.getImageType() == OF_IMAGE_GRAYSCALE)
{
ofPixels p;
p.allocate(pix.getWidth(), pix.getHeight(), 3);
const unsigned char *src = pix.getData();
unsigned char *dst = p.getData();
int num = pix.getWidth() * pix.getHeight();
for (int i = 0; i < num; i++)
{
dst[0] = src[0];
dst[1] = src[0];
dst[2] = src[0];
src += 1;
dst += 3;
}
save(buf, p, jpegQuality);
}
}
示例2: loadData
//----------------------------------------------------------
void ofTexture::loadData(const ofPixels & pix, int glFormat){
if(!isAllocated()){
allocate(pix.getWidth(), pix.getHeight(), ofGetGlInternalFormat(pix), ofGetUsingArbTex(), glFormat, ofGetGlType(pix));
}
ofSetPixelStoreiAlignment(GL_UNPACK_ALIGNMENT,pix.getWidth(),pix.getBytesPerChannel(),ofGetNumChannelsFromGLFormat(glFormat));
loadData(pix.getData(), pix.getWidth(), pix.getHeight(), glFormat, ofGetGlType(pix));
}
示例3: getData
//----------
bool Message::getData(ofPixels & data) const {
auto & header = this->getHeader<Header::Pixels>();
if (this->hasHeader<Header::Pixels>()) {
const auto & header = this->getHeader<Header::Pixels>();
auto bodySize = this->getBodySize();
ofPixelFormat pixelFormat = (ofPixelFormat)header.pixelFormat;
//reallocate if we need to
if (data.getWidth() != header.width || data.getHeight() != header.height || data.getPixelFormat() != pixelFormat) {
data.allocate(header.width, header.height, pixelFormat);
}
if (data.size() != bodySize) {
OFXSQUASHBUDDIES_ERROR << "Message body is of wrong size to fill pixels. Maybe a bug in sender?";
return false;
}
else {
memcpy(data.getData(), this->getBodyData(), bodySize);
return true;
}
}
else {
OFXSQUASHBUDDIES_WARNING << "Message Header doesn't match Pixels type";
return false;
}
}
示例4: toCcv
ccv_dense_matrix_t toCcv(const ofPixels& pix) {
return ccv_dense_matrix(pix.getHeight(),
pix.getWidth(),
CCV_8U | CCV_C3,
(void*) pix.getData(),
0);
}
示例5: publishPixels
void Output::publishPixels(ofPixels &pix)
{
assert(mutex);
if (pix.getWidth() == uiFrameWidth
&& pix.getHeight() == uiFrameHeight)
{
mutex->lock();
if (!back_buffer->isAllocated() ||
back_buffer->getWidth() != pix.getWidth() ||
back_buffer->getHeight() != pix.getHeight()) {
back_buffer->allocate(pix.getWidth(), pix.getHeight(), pix.getNumChannels());
}
memcpy(&back_buffer->getData()[1], pix.getData(), pix.size() - 1);
//*back_buffer = pix;
if (back_buffer->getNumChannels() != 4)
back_buffer->setNumChannels(4);
has_new_frame = true;
mutex->unlock();
}
else
ofLogError("ofxDeckLinkAPI::Output") << "invalid pixel size";
}
示例6: glitchUpdate
//--------------------------------------------------------------
void ofApp::glitchUpdate(ofPixels _p) {
string compressedFilename = "compressed.jpg";
unsigned char * _c = _p.getData();
float coin = ofRandom(100);
if (coin > 95) {
_c = _p.getData() + (int)ofRandom(100);
}
imgDirectGlitch.setImageType(OF_IMAGE_COLOR);
float _w = baseArch.fassadeCorner[1].x - baseArch.fassadeCorner[0].x;
float _h = baseArch.fassadeCorner[2].y - baseArch.fassadeCorner[0].y;
imgDirectGlitch.setFromPixels(_c, webCam.getWidth(), webCam.getHeight(), OF_IMAGE_COLOR);
imgDirectGlitch.save(compressedFilename, quality);
ofBuffer file = ofBufferFromFile(compressedFilename);
int fileSize = file.size();
char * buffer = file.getData();
int whichByte = (int) ofRandom(fileSize);
int whichBit = ofRandom(8);
char bitMask;
if ( whichBit >4 ) {
bitMask = 1 << whichBit;
} else {
bitMask = 7 << whichBit;
}
buffer[whichByte] |= bitMask;
ofBufferToFile(compressedFilename, file);
imgDirectGlitch.load(compressedFilename);
// float coin = ofRandom(100);
// if (coin > 95) {
// reset();
// }
}
示例7: readToPixels
//----------------------------------------------------------
void ofTexture::readToPixels(ofPixels & pixels) const {
#ifndef TARGET_OPENGLES
pixels.allocate(texData.width,texData.height,ofGetImageTypeFromGLType(texData.glInternalFormat));
ofSetPixelStoreiAlignment(GL_PACK_ALIGNMENT,pixels.getWidth(),pixels.getBytesPerChannel(),pixels.getNumChannels());
glBindTexture(texData.textureTarget,texData.textureID);
glGetTexImage(texData.textureTarget,0,ofGetGlFormat(pixels),GL_UNSIGNED_BYTE, pixels.getData());
glBindTexture(texData.textureTarget,0);
#endif
}
示例8: readToPixels
//----------------------------------------------------------
void ofFbo::readToPixels(ofPixels & pixels, int attachmentPoint) const{
if(!bIsAllocated) return;
#ifndef TARGET_OPENGLES
getTexture(attachmentPoint).readToPixels(pixels);
#else
pixels.allocate(settings.width,settings.height,ofGetImageTypeFromGLType(settings.internalformat));
bind();
int format = ofGetGLFormatFromInternal(settings.internalformat);
glReadPixels(0,0,settings.width, settings.height, format, GL_UNSIGNED_BYTE, pixels.getData());
unbind();
#endif
}
示例9: setData
//----------
void Message::setData(const ofPixels & data) {
const auto headerSize = sizeof(Header::Pixels);
const auto bodySize = data.size(); // inner payload
this->headerAndData.resize(headerSize + bodySize);
auto & header = this->getHeader<Header::Pixels>(true);
header.width = data.getWidth();
header.height = data.getHeight();
header.pixelFormat = data.getPixelFormat();
auto body = this->getBodyData();
memcpy(body, data.getData(), bodySize);
}
示例10: critical
//----------
void Decoder::operator<<(const ofPixels& pixels) {
if (frame == 0) {
data.allocate(pixels.getWidth(), pixels.getHeight(), payload->getWidth(), payload->getHeight());
}
if (frame > payload->getFrameCount() - 1) {
#pragma omp critical(ofLog)
ofLogWarning("ofxGraycode") << "Can't add more frames, we've already captured a full set. please clear()";
return;
}
if (!pixels.isAllocated()) {
ofLogError("ofxGraycode") << "Cannot add this capture as the pixels object is empty";
return;
}
const ofPixels* greyPixels;
if (pixels.getNumChannels() > 1) {
ofPixels* downsample = new ofPixels();
downsample->allocate(pixels.getWidth(), pixels.getHeight(), OF_PIXELS_MONO);
downsample->set(0, 0);
const uint8_t* in = pixels.getData();
uint8_t* out = downsample->getData();
for (int i = 0; i < pixels.size(); i++, out += (i % pixels.getNumChannels() == 0)) {
*out += *in++ / pixels.getNumChannels();
}
greyPixels = downsample;
}
else
greyPixels = &pixels;
if (this->payload->isOffline())
captures.push_back(*greyPixels);
else
payload->readPixels(frame, *greyPixels);
frame++;
if (frame >= payload->getFrameCount()) {
calc();
frame = payload->getFrameCount();
}
if (greyPixels != &pixels)
delete greyPixels;
}
示例11: toDLib
bool toDLib(const ofPixels& inPix, array2d<rgb_pixel>& outPix){
int width = inPix.getWidth();
int height = inPix.getHeight();
outPix.set_size( height, width );
int chans = inPix.getNumChannels();
const unsigned char* data = inPix.getData();
for ( unsigned n = 0; n < height;n++ )
{
const unsigned char* v = &data[n * width * chans];
for ( unsigned m = 0; m < width;m++ )
{
if ( chans==1 )
{
unsigned char p = v[m];
assign_pixel( outPix[n][m], p );
}
else{
rgb_pixel p;
p.red = v[m*3];
p.green = v[m*3+1];
p.blue = v[m*3+2];
assign_pixel( outPix[n][m], p );
}
}
}
// if(inPix.getNumChannels() == 3){
// int h = inPix.getHeight();
// int w = inPix.getWidth();
// outPix.clear();
// outPix.set_size(h,w);
// for (int i = 0; i < h; i++) {
// for (int j = 0; j < w; j++) {
//
// outPix[i][j].red = inPix.getColor(j, i).r; //inPix[i*w + j];
// outPix[i][j].green = inPix.getColor(j, i).g; //inPix[i*w + j + 1];
// outPix[i][j].blue = inPix.getColor(j, i).b; //inPix[i*w + j + 2];
// }
// }
// return true;
// }else{
// return false;
// }
return true;
}
示例12: load
bool ofxTurboJpeg::load(const ofBuffer& buf, ofPixels &pix)
{
int w, h;
int subsamp;
int ok = tjDecompressHeader2(handleDecompress, (unsigned char*)buf.getData(), buf.size(), &w, &h, &subsamp);
if (ok != 0)
{
printf("Error in tjDecompressHeader2():\n%s\n", tjGetErrorStr());
return false;
}
pix.allocate(w, h, 3);
tjDecompress(handleDecompress, (unsigned char*)buf.getData(), buf.size(), pix.getData(), w, 0, h, 3, 0);
return true;
}
示例13: scale
bool Frame::scale(ofPixels& pixels)
{
if (sws_ctx)
{
uint8_t * inData[1] = { pixels.getData() }; // RGBA32 have one plane
//
// NOTE: In a more general setting, the rows of your input image may
// be padded; that is, the bytes per row may not be 4 * width.
// In such cases, inLineSize should be set to that padded width.
//
int inLinesize[1] = { pixels.getBytesStride() };
sws_scale(sws_ctx, inData, inLinesize, 0, pixels.getHeight(), frm->data, frm->linesize);
frm->pts = frm->pts + 1;
return true;
}
// sws_scale(sws_ctx, inData, inLinesize, 0, ctx->height, frame->data, frame->linesize);
return false;
}
示例14: process
void ofxAndroidMobileVision::process(ofPixels &pixels){
if(!javaMobileVision){
ofLogError("ofxAndroidMobileVision") << "update(): java not loaded";
return;
}
JNIEnv *env = ofGetJNIEnv();
jmethodID javaMethod = env->GetMethodID(javaClass,"update","([BII)I");
if(!javaMethod ){
ofLogError("ofxAndroidMobileVision") << "update(): couldn't get java update for MobileVision";
return;
}
jbyteArray arr = env->NewByteArray(pixels.size());
env->SetByteArrayRegion( arr, 0, pixels.size(), (const signed char*) pixels.getData());
int numFaces = env->CallIntMethod(javaMobileVision, javaMethod, arr, pixels.getWidth(), pixels.getHeight());
env->DeleteLocalRef(arr);
vector<ofxAndroidMobileVisionFace> analyzedfaces;
for(int i=0;i<numFaces;i++) {
// Get data
auto method = env->GetMethodID(javaClass, "getData", "(I)[F");
jfloatArray data = (jfloatArray) env->CallObjectMethod(javaMobileVision, method, 0);
jboolean isCopy;
jfloat *body = env->GetFloatArrayElements(data, &isCopy);
ofxAndroidMobileVisionFace face;
face.smileProbability = body[0];
face.leftEyeOpenProbability = body[1];
face.rightEyeOpenProbability = body[2];
for(int j=0;j<12;j++){
ofVec2f p;
p.x = body[j*2+3];
p.y = body[j*2+4];
face.landmarks.push_back(p);
}
analyzedfaces.push_back(face);
env->DeleteLocalRef(data);
}
fromAnalyze.send(analyzedfaces);
}
示例15: drawHistogram
void drawHistogram(const ofPixels& pix, float height = 128, int skip = 16) {
vector<float> r(256), g(256), b(256);
const unsigned char* data = pix.getData();
int n = pix.size();
int samples = 0;
for(int i = 0; i < n; i += 3*skip) {
r[data[i++]]++;
g[data[i++]]++;
b[data[i++]]++;
samples++;
}
ofMesh rmesh, gmesh, bmesh;
rmesh.setMode(OF_PRIMITIVE_TRIANGLE_STRIP);
gmesh.setMode(OF_PRIMITIVE_TRIANGLE_STRIP);
bmesh.setMode(OF_PRIMITIVE_TRIANGLE_STRIP);
int peak = 0;
for(int i = 0; i < 255; i++) {
rmesh.addVertex(ofVec3f(i, 0));
rmesh.addVertex(ofVec3f(i, r[i]));
gmesh.addVertex(ofVec3f(i, 0));
gmesh.addVertex(ofVec3f(i, g[i]));
bmesh.addVertex(ofVec3f(i, 0));
bmesh.addVertex(ofVec3f(i, b[i]));
peak = MAX(peak, r[i]);
peak = MAX(peak, g[i]);
peak = MAX(peak, b[i]);
}
ofPushMatrix();
ofPushStyle();
ofEnableBlendMode(OF_BLENDMODE_ADD);
ofScale(2, height / peak);
ofSetColor(255);
ofDrawLine(256, 0, 256, peak);
ofTranslate(.5, 0);
ofSetColor(255, 0, 0);
rmesh.draw();
ofSetColor(0, 255, 0);
gmesh.draw();
ofSetColor(0, 0, 255);
bmesh.draw();
ofPopStyle();
ofPopMatrix();
}