本文整理汇总了C++中ofPixels::getNumChannels方法的典型用法代码示例。如果您正苦于以下问题:C++ ofPixels::getNumChannels方法的具体用法?C++ ofPixels::getNumChannels怎么用?C++ ofPixels::getNumChannels使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ofPixels
的用法示例。
在下文中一共展示了ofPixels::getNumChannels方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ofGetGlInternalFormat
//---------------------------------
int ofGetGlInternalFormat(const ofPixels& pix) {
#ifndef TARGET_OPENGLES
switch(pix.getNumChannels()) {
case 3: return GL_RGB8;
case 4: return GL_RGBA8;
case 2:
if(ofIsGLProgrammableRenderer()){
return GL_RG8;
}else{
return GL_LUMINANCE_ALPHA;
}
default:
if(ofIsGLProgrammableRenderer()){
return GL_R8;
}else{
return GL_LUMINANCE8;
}
}
#else
switch(pix.getNumChannels()) {
case 3: return GL_RGB;
case 4: return GL_RGBA;
case 2:
return GL_LUMINANCE_ALPHA;
default:
return GL_LUMINANCE;
}
#endif
}
示例2: ofGetGlInternalFormat
//---------------------------------
int ofGetGlInternalFormat(const ofPixels& pix) {
switch(pix.getNumChannels()) {
case 3: return GL_RGB;
case 4: return GL_RGBA;
default: return GL_LUMINANCE;
}
}
示例3: publishPixels
void Output::publishPixels(ofPixels &pix)
{
assert(mutex);
if (pix.getWidth() == uiFrameWidth
&& pix.getHeight() == uiFrameHeight)
{
mutex->lock();
if (!back_buffer->isAllocated() ||
back_buffer->getWidth() != pix.getWidth() ||
back_buffer->getHeight() != pix.getHeight()) {
back_buffer->allocate(pix.getWidth(), pix.getHeight(), pix.getNumChannels());
}
memcpy(&back_buffer->getData()[1], pix.getData(), pix.size() - 1);
//*back_buffer = pix;
if (back_buffer->getNumChannels() != 4)
back_buffer->setNumChannels(4);
has_new_frame = true;
mutex->unlock();
}
else
ofLogError("ofxDeckLinkAPI::Output") << "invalid pixel size";
}
示例4: update
void VectorField::update(const ofPixels &pixels, const float maxStrength) {
// reset all field vectors to 0.
clearField();
// iterate over all possible vector in field.
//
for (int y = 1; y <= mSubdivision; ++y) {
for (int x = 1; x <= mSubdivision; ++x) {
// calculate index of vector in field
int fieldIndex = (y - 1) * (mSubdivision - 1) + (x - 1);
// calculate all pixel locations of subdivisions: center, top, bottom, right and left.
int xPos = x * mIncX;
int yPos = y * mIncY;
int topPos = (y - 1) * mIncY;
int bottomPos = (y + 1) * mIncY;
int leftPos = (x - 1) * mIncX;
int rightPos = (x + 1) * mIncX;
// little hack to get values for the last line of pixels
bottomPos = (bottomPos == mHeight) ? bottomPos - 1 : bottomPos;
// extract value of red channel for all eight surrounding positions
// only the red channel is relevant, since the perlin noise shader writes the same value
// into all three channels: red, green, blue – hence the grey scale iamge.
int nw = pixels[ (topPos * mWidth + leftPos) * pixels.getNumChannels() ];
int n_ = pixels[ (topPos * mWidth + xPos) * pixels.getNumChannels() ];
int ne = pixels[ (topPos * mWidth + rightPos) * pixels.getNumChannels() ];
int _e = pixels[ (yPos * mWidth + rightPos) * pixels.getNumChannels() ];
int se = pixels[ (bottomPos * mWidth + rightPos) * pixels.getNumChannels() ];
int s_ = pixels[ (bottomPos * mWidth + xPos) * pixels.getNumChannels() ];
int sw = pixels[ (bottomPos * mWidth + leftPos) * pixels.getNumChannels() ];
int _w = pixels[ (yPos * mWidth + leftPos) * pixels.getNumChannels() ];
// calcualte the difference between all subdivisions at the top and bottom and to the left and right.
float diffX = (nw + _w + sw) - (ne + _e + se);
float diffY = (nw + n_ + ne) - (sw + s_ + se);
// update field position
mField[fieldIndex].x = diffX;
mField[fieldIndex].y = diffY;
int pixelValue = (int) pixels[(yPos * mWidth + xPos) * pixels.getNumChannels()];
// apply max strength
mField[fieldIndex].normalize();
mField[fieldIndex] *= ofMap((float) pixelValue, 0.f, 255.f, 0.f, maxStrength);
// store pixel value in z dimension
mField[fieldIndex].z = pixelValue;
}
}
}
示例5: setImage
//--------------------------------------------------------------
// set the division rates based on the ALPHA (!) values of the input image
void Rd::setImage(ofPixels input){
input.resize(w, h); // resize input image to simulation dimensions
int numChannels = input.getNumChannels();
for(int i = 0; i < vectorSize; i++){
int a = input[i * numChannels + 3]; // get alpha of pixel
D[i] = a / 255.0;
}
}
示例6: critical
//----------
void Decoder::operator<<(const ofPixels& pixels) {
if (frame == 0) {
data.allocate(pixels.getWidth(), pixels.getHeight(), payload->getWidth(), payload->getHeight());
}
if (frame > payload->getFrameCount() - 1) {
#pragma omp critical(ofLog)
ofLogWarning("ofxGraycode") << "Can't add more frames, we've already captured a full set. please clear()";
return;
}
if (!pixels.isAllocated()) {
ofLogError("ofxGraycode") << "Cannot add this capture as the pixels object is empty";
return;
}
const ofPixels* greyPixels;
if (pixels.getNumChannels() > 1) {
ofPixels* downsample = new ofPixels();
downsample->allocate(pixels.getWidth(), pixels.getHeight(), OF_PIXELS_MONO);
downsample->set(0, 0);
const uint8_t* in = pixels.getData();
uint8_t* out = downsample->getData();
for (int i = 0; i < pixels.size(); i++, out += (i % pixels.getNumChannels() == 0)) {
*out += *in++ / pixels.getNumChannels();
}
greyPixels = downsample;
}
else
greyPixels = &pixels;
if (this->payload->isOffline())
captures.push_back(*greyPixels);
else
payload->readPixels(frame, *greyPixels);
frame++;
if (frame >= payload->getFrameCount()) {
calc();
frame = payload->getFrameCount();
}
if (greyPixels != &pixels)
delete greyPixels;
}
示例7: updateBlobPoints
//--------------------------------------------------------------
void ofApp::updateBlobPoints(ofPixels pixels, int step, int min, int max){
emitterBlob.clear();
int w = pixels.getWidth();
int h = pixels.getHeight();
int channels = pixels.getNumChannels();
for(int x=0; x<w; x+=step){
for(int y=0; y<h; y+=step){
int i = (x + y * w) * channels;
if(pixels[i] > min && pixels[i] < max){
emitterBlob.setPoint(ofPoint(x,y) * transformWarpToProjector);
}
}
}
}
示例8: broadcast
//------------------------------------------------------------------------------
void BaseWebSocketSessionManager::broadcast(ofPixels& pixels)
{
ofScopedLock lock(_mutex);
WebSocketConnectionsIter iter = _connections.begin();
int numChannels = pixels.getNumChannels();
int width = pixels.getWidth();
int height = pixels.getHeight();
while(iter != _connections.end())
{
ofPixels pixels;
//sendFrame(*iter,frame);
++iter;
}
}
示例9: toDLib
bool toDLib(const ofPixels& inPix, array2d<rgb_pixel>& outPix){
int width = inPix.getWidth();
int height = inPix.getHeight();
outPix.set_size( height, width );
int chans = inPix.getNumChannels();
const unsigned char* data = inPix.getData();
for ( unsigned n = 0; n < height;n++ )
{
const unsigned char* v = &data[n * width * chans];
for ( unsigned m = 0; m < width;m++ )
{
if ( chans==1 )
{
unsigned char p = v[m];
assign_pixel( outPix[n][m], p );
}
else{
rgb_pixel p;
p.red = v[m*3];
p.green = v[m*3+1];
p.blue = v[m*3+2];
assign_pixel( outPix[n][m], p );
}
}
}
// if(inPix.getNumChannels() == 3){
// int h = inPix.getHeight();
// int w = inPix.getWidth();
// outPix.clear();
// outPix.set_size(h,w);
// for (int i = 0; i < h; i++) {
// for (int j = 0; j < w; j++) {
//
// outPix[i][j].red = inPix.getColor(j, i).r; //inPix[i*w + j];
// outPix[i][j].green = inPix.getColor(j, i).g; //inPix[i*w + j + 1];
// outPix[i][j].blue = inPix.getColor(j, i).b; //inPix[i*w + j + 2];
// }
// }
// return true;
// }else{
// return false;
// }
return true;
}
示例10: getColorAtPos
ofColor testApp::getColorAtPos(ofPixels & pixels, int x, int y){
ofColor pickedColor;
if( x >= 0 && x < pixels.getWidth() && y >= 0 && y < pixels.getHeight() ){
unsigned char * pix = pixels.getPixels();
int channels = pixels.getNumChannels();
int posInMem = ( y * pixels.getWidth() + x) * channels;
unsigned char r = pix[posInMem];
unsigned char g = pix[posInMem+1];
unsigned char b = pix[posInMem+2];
pickedColor.set(r, g, b);
}
return pickedColor;
}
示例11: convert
image ofxDarknet::convert( ofPixels & pix )
{
unsigned char *data = ( unsigned char * ) pix.getData();
int h = pix.getHeight();
int w = pix.getWidth();
int c = pix.getNumChannels();
int step = w * c;
image im = make_image( w, h, c );
int i, j, k, count = 0;;
for( k = 0; k < c; ++k ) {
for( i = 0; i < h; ++i ) {
for( j = 0; j < w; ++j ) {
im.data1[ count++ ] = data[ i*step + j*c + k ] / 255.;
}
}
}
return im;
}
示例12: readToPixels
//----------------------------------------------------------
void ofTexture::readToPixels(ofPixels & pixels) const {
#ifndef TARGET_OPENGLES
pixels.allocate(texData.width,texData.height,ofGetImageTypeFromGLType(texData.glInternalFormat));
ofSetPixelStoreiAlignment(GL_PACK_ALIGNMENT,pixels.getWidth(),pixels.getBytesPerChannel(),pixels.getNumChannels());
glBindTexture(texData.textureTarget,texData.textureID);
glGetTexImage(texData.textureTarget,0,ofGetGlFormat(pixels),GL_UNSIGNED_BYTE, pixels.getData());
glBindTexture(texData.textureTarget,0);
#endif
}
示例13: loadData
//----------------------------------------------------------
void ofTexture::loadData(const ofPixels & pix){
ofSetPixelStorei(pix.getWidth(),pix.getBytesPerChannel(),pix.getNumChannels());
loadData(pix.getPixels(), pix.getWidth(), pix.getHeight(), ofGetGlFormat(pix), ofGetGlType(pix));
}
示例14: getContourPoints
/**
2値画像から輪郭追跡を実行し、各輪郭点を順番に格納した配列を返します.
@param src 入力画像ピクセル配列
*/
vector<ofPoint> getContourPoints(const ofPixels src)
{
//参考: http://homepage2.nifty.com/tsugu/sotuken/binedge/#detailed
//格子の開始点(vec)
// _____
// |0 7 6|
// |1 p 5|
// |2 3 4|
// -----
vector<ofPoint> dstPts;
int w = src.getWidth();
int h = src.getHeight();
if (src.getNumChannels() != 1) return dstPts;
//画像内を捜査し有効画素を探す
for(int i=0; i < w * h; i++)
{
//捜査画素が端なら何もしない
if (isSide(i, w, h)) continue;
//有効画素があった場合は追跡処理の開始
if( src.getPixels()[i] != 0 ) {
int p = i; //注目画素index
int tp = 0; //追跡画素index
int vec = 2; //最初の調査点を左下にセットする
bool isFirst = true;
dstPts.push_back(convertIndex2Points(p, w));
//追跡開始点と追跡点が同じ座標なるまで輪郭追跡処理
while( p != i || isFirst ) {
switch(vec) {
case 0: //左上を調査
tp = p - w - 1;
//追跡画素が端っこでなければ調査開始、端っこなら次の画素を追跡
if (!isSide(tp, w, h)) {
if( src.getPixels()[tp] != 0 ) {
p = tp;
dstPts.push_back(convertIndex2Points(p, w));
vec = 6;
break;
}
}
case 1: //左を調査
tp = p - 1;
if (!isSide(tp, w, h)) {
if( src.getPixels()[tp] != 0 ) {
p = tp;
dstPts.push_back(convertIndex2Points(p, w));
vec = 0;
break;
}
}
case 2: //左下を調査
tp = p + w - 1;
if (!isSide(tp, w, h)) {
if( src.getPixels()[tp] != 0 ) {
p = tp;
dstPts.push_back(convertIndex2Points(p, w));
isFirst = false;
vec = 0;
break;
}
}
case 3: //下を調査
tp = p + w;
if (!isSide(tp, w, h)) {
if( src.getPixels()[tp] != 0 ) {
p = tp;
dstPts.push_back(convertIndex2Points(p, w));
isFirst = false;
vec = 2;
break;
}
}
case 4: //右下を調査
tp = p + w + 1;
if (!isSide(tp, w, h)) {
if( src.getPixels()[tp] != 0 ) {
p = tp;
dstPts.push_back(convertIndex2Points(p, w));
isFirst = false;
vec = 2;
break;
}
}
case 5: //右を調査
tp = p + 1;
if (!isSide(tp, w, h)) {
if( src.getPixels()[tp] != 0 ) {
p = tp;
dstPts.push_back(convertIndex2Points(p, w));
//.........这里部分代码省略.........
示例15: toPixels
// get pixels from a fbo or texture // untested
void ftUtil::toPixels(ofTexture& _tex, ofPixels& _pixels) {
ofTextureData& texData = _tex.getTextureData();
int format = texData.glInternalFormat;
int readFormat, numChannels;
switch(format){
case GL_R8: readFormat = GL_RED, numChannels = 1; break; // or is it GL_R
case GL_RG8: readFormat = GL_RG, numChannels = 2; break;
case GL_RGB8: readFormat = GL_RGB, numChannels = 3; break;
case GL_RGBA8: readFormat = GL_RGBA, numChannels = 4; break;
default:
ofLogWarning("ftUtil") << "toPixels: " << "can only read char texturs to ofPixels";
return;
}
if (_pixels.getWidth() != texData.width || _pixels.getHeight() != texData.height || _pixels.getNumChannels() != numChannels) {
_pixels.allocate(texData.width, texData.height, numChannels);
}
ofSetPixelStoreiAlignment(GL_PACK_ALIGNMENT, texData.width, 1, numChannels);
glBindTexture(texData.textureTarget, texData.textureID);
glGetTexImage(texData.textureTarget, 0, readFormat, GL_UNSIGNED_BYTE, _pixels.getData());
glBindTexture(texData.textureTarget, 0);
}