本文整理汇总了C++中Image类的典型用法代码示例。如果您正苦于以下问题:C++ Image类的具体用法?C++ Image怎么用?C++ Image使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Image类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Image
// Make colors from file
void DataValues::MakeColor(STRING file)
{
Image * image = new Image();
if (!image->Load(file))
{
delete image;
return;
}
// Clear
memset(&colorData, 0, sizeof(Color) * DATAVALUES_AMOUNT);
colorData[AIR] = Color(255,255,255,0);
colorData[STONE] = GetColorFromImage(1, 0, image);
colorData[GRASS] = GetColorFromImage(0, 0, image);
colorData[DIRT] = GetColorFromImage(3, 0, image);
colorData[COBBLESTONE] = GetColorFromImage(0, 1, image);
colorData[WOOD] = GetColorFromImage(4, 0, image);
colorData[SAPLING] = Color(120,120,120,0);
colorData[BEDROCK] = GetColorFromImage(1, 1, image);
colorData[WATER] = GetColorFromImage(13, 12, image);
colorData[STATIONARYWATER] = colorData[WATER];
colorData[LAVA] = GetColorFromImage(13, 14, image);
colorData[STATIONARYLAVA] = colorData[LAVA];
colorData[SAND] = GetColorFromImage(2, 1, image);
colorData[GRAVEL] = GetColorFromImage(3, 1, image);
colorData[GOLDORE] = GetColorFromImage(0, 2, image);
colorData[IRONORE] = GetColorFromImage(1, 2, image);
colorData[COALORE] = GetColorFromImage(2, 2, image);
colorData[LOG] = GetColorFromImage(4, 1, image);
colorData[LEAVES] = GetColorFromImage(4, 3, image);
colorData[GLASS] = GetColorFromImage(1, 3, image);
colorData[WOOL] = GetColorFromImage(0, 4, image);
colorData[YELLOWFLOWER] = GetColorFromImage(13, 0, image);
colorData[REDROSE] = GetColorFromImage(12, 0, image);
colorData[GOLDBLOCK] = GetColorFromImage(7, 1, image);
colorData[IRONBLOCK] = GetColorFromImage(6, 1, image);
colorData[DOUBLESLAB_STONE] = GetColorFromImage(6, 0, image);
colorData[SLAB_STONE] = colorData[DOUBLESLAB_STONE];
colorData[BRICK] = GetColorFromImage(7, 0, image);
colorData[TNT] = GetColorFromImage(8, 0, image);
colorData[MOSSYCOBBLESTONE] = GetColorFromImage(4, 2, image);
colorData[OBSIDIAN] = GetColorFromImage(5, 2, image);
colorData[TORCH] = GetColorFromImage(0, 5, image);
colorData[FIRE] = Color(255,170,30,200);
colorData[WOODENSTAIRS] = GetColorFromImage(4, 0, image);
colorData[CHEST] = GetColorFromImage(9, 1, image);
colorData[DIAMONDORE] = GetColorFromImage(2, 3, image);
colorData[DIAMONDBLOCK] = GetColorFromImage(8, 1, image);
colorData[WORKBENCH] = GetColorFromImage(11, 2, image);
colorData[CROPS] = GetColorFromImage(15, 5, image);
colorData[SOIL] = GetColorFromImage(6, 5, image);
colorData[FURNACE] = GetColorFromImage(12, 2, image);
colorData[BURNINGFURNACE] = colorData[FURNACE];
colorData[SIGNPOST] = GetColorFromImage(4, 0, image);
colorData[WOODENDOOR] = GetColorFromImage(1, 6, image);
colorData[LADDER] = GetColorFromImage(3, 5, image);
colorData[MINECARTTRACK] = GetColorFromImage(0, 8, image);
colorData[COBBLESTONESTAIRS] = GetColorFromImage(0, 1, image);
colorData[IRONDOOR] = GetColorFromImage(2, 6, image);
colorData[REDSTONEORE] = GetColorFromImage(3, 3, image);
colorData[GLOWINGREDSTONEORE] = colorData[REDSTONEORE];
colorData[REDSTONETORCHON] = GetColorFromImage(3, 6, image);
colorData[REDSTONETORCHOFF] = GetColorFromImage(3, 7, image);
colorData[SNOW] = GetColorFromImage(2, 4, image);
colorData[ICE] = GetColorFromImage(3, 4, image);
colorData[SNOWBLOCK] = GetColorFromImage(2, 4, image);
colorData[CACTUS] = GetColorFromImage(5, 4, image);
colorData[CLAY] = GetColorFromImage(8, 4, image);
colorData[REED] = GetColorFromImage(9, 4, image);
colorData[JUKEBOX] = GetColorFromImage(11, 4, image);
colorData[PUMPKIN] = GetColorFromImage(6, 6, image);
colorData[BLOODSTONE] = GetColorFromImage(7, 6, image);
colorData[SLOWSAND] = GetColorFromImage(8, 6, image);
colorData[LIGHTSTONE] = GetColorFromImage(9, 6, image);
colorData[PORTAL] = GetColorFromImage(0, 14, image); // Temporarily
colorData[JACKOLANTERN] = colorData[PUMPKIN];
colorData[LAPIZLAZULIORE] = GetColorFromImage(0, 10, image);
colorData[LAPIZLAZULIBLOCK] = GetColorFromImage(0, 9, image);
colorData[DISPENSER] = colorData[FURNACE];
colorData[SANDSTONE] = GetColorFromImage(0, 11, image);
colorData[NOTEBLOCK] = GetColorFromImage(10, 4, image);
colorData[CAKE] = GetColorFromImage(9, 7, image);
colorData[BED] = GetColorFromImage(6, 8, image);
// colorData[NEWORE] = GetColorFromImage(6, 2, image);
// WOOLs color
colorData[WOOLWHITE] = colorData[WOOL];
colorData[WOOLORANGE] = GetColorFromImage(2, 13, image);
colorData[WOOLMAGENTA] = GetColorFromImage(2, 12, image);
colorData[WOOLLIGHTBLUE] = GetColorFromImage(2, 11, image);
colorData[WOOLYELLOW] = GetColorFromImage(2, 10, image);
colorData[WOOLLIMEGREEN] = GetColorFromImage(2, 9, image);
colorData[WOOLPINK] = GetColorFromImage(2, 8, image);
colorData[WOOLGRAY] = GetColorFromImage(2, 7, image);
colorData[WOOLLIGHTGRAY] = GetColorFromImage(1, 14, image);
//.........这里部分代码省略.........
示例2: while
void ProjectManager::_load_recent_projects() {
ProjectListFilter::FilterOption filter_option = project_filter->get_filter_option();
String search_term = project_filter->get_search_term();
while(scroll_childs->get_child_count()>0) {
memdelete( scroll_childs->get_child(0));
}
List<PropertyInfo> properties;
EditorSettings::get_singleton()->get_property_list(&properties);
Color font_color = get_color("font_color","Tree");
List<ProjectItem> projects;
List<ProjectItem> favorite_projects;
for(List<PropertyInfo>::Element *E=properties.front();E;E=E->next()) {
String _name = E->get().name;
if (!_name.begins_with("projects/") && !_name.begins_with("favorite_projects/"))
continue;
String path = EditorSettings::get_singleton()->get(_name);
if (filter_option == ProjectListFilter::FILTER_PATH && search_term!="" && path.findn(search_term)==-1)
continue;
String project = _name.get_slice("/",1);
String conf=path.plus_file("engine.cfg");
bool favorite = (_name.begins_with("favorite_projects/"))?true:false;
uint64_t last_modified = 0;
if (FileAccess::exists(conf))
last_modified = FileAccess::get_modified_time(conf);
String fscache = path.plus_file(".fscache");
if (FileAccess::exists(fscache)) {
uint64_t cache_modified = FileAccess::get_modified_time(fscache);
if ( cache_modified > last_modified )
last_modified = cache_modified;
}
ProjectItem item(project, path, conf, last_modified, favorite);
if (favorite)
favorite_projects.push_back(item);
else
projects.push_back(item);
}
projects.sort();
favorite_projects.sort();
for(List<ProjectItem>::Element *E=projects.front();E;) {
List<ProjectItem>::Element *next = E->next();
if (favorite_projects.find(E->get()) != NULL)
projects.erase(E->get());
E=next;
}
for(List<ProjectItem>::Element *E=favorite_projects.back();E;E=E->prev()) {
projects.push_front(E->get());
}
Ref<Texture> favorite_icon = get_icon("Favorites","EditorIcons");
for(List<ProjectItem>::Element *E=projects.front();E;E=E->next()) {
ProjectItem &item = E->get();
String project = item.project;
String path = item.path;
String conf = item.conf;
bool is_favorite = item.favorite;
Ref<ConfigFile> cf = memnew( ConfigFile );
Error err = cf->load(conf);
ERR_CONTINUE(err!=OK);
String project_name="Unnamed Project";
if (cf->has_section_key("application","name")) {
project_name = cf->get_value("application","name");
}
if (filter_option==ProjectListFilter::FILTER_NAME && search_term!="" && project_name.findn(search_term)==-1)
continue;
Ref<Texture> icon;
if (cf->has_section_key("application","icon")) {
String appicon = cf->get_value("application","icon");
if (appicon!="") {
Image img;
Error err = img.load(appicon.replace_first("res://",path+"/"));
if (err==OK) {
img.resize(64,64);
Ref<ImageTexture> it = memnew( ImageTexture );
it->create_from_image(img);
icon=it;
}
}
}
//.........这里部分代码省略.........
示例3: main
int main( int argc, char* argv[])
{
// On déclare notre pointeur sur SourceVideo
VideoSource *src;
CvVideoWriter *writer = 0;
int isColor = 1;
int fps = 30; // or 30
int frameW = 640; // 744 for firewire cameras
int frameH = 480; // 480 for firewire cameras
writer=cvCreateVideoWriter("out.avi",CV_FOURCC('P','I','M','1'),
fps,cvSize(frameW,frameH),isColor);
if( argc > 1 ) {
// Initialisation : fichier vidéo
string path(argv[1]);
src = new VideoFile( path, (argc > 2) );
}
else {
// Initialisation : webcam
src = new Camera( 0 );
}
// Initialisation du flux vidéo
try {
src->open();
}
catch( Exception &e ) {
// Si une exception se produit, on l'affiche et on quitte.
cout << e.what() << endl;
delete src;
return 10;
}
// Si tout va bien, on affiche les informations du flux vidéo.
cout << src->getInfos() << endl;
cvNamedWindow( "video", CV_WINDOW_AUTOSIZE );
Image img;
char key = 'a';
// Début de la mesure du frame rate
debut_mesure = getTimeMillis();
while( key != 'q' ) {
try {
src->getFrame( img );
}
catch(Exception &e) {
cout << "\n" << e.what() << endl;
break;
}
/*CvScalar scalaire;
scalaire.val[0] = 120;
scalaire.val[1] = scalaire.val[2] = 0;
img.colorFilter(scalaire);*/
img.colorPaint2(top_left,bottom_right);
if (bottom_right.x < 720) {
bottom_right.x++;
}
if (bottom_right.y < 576) {
bottom_right.y++;
}
if (top_left.x > 0) {
top_left.x--;
}
if (top_left.y > 0) {
top_left.y--;
}
//img.colorBlacknWhite();
cvShowImage( "video", img );
cvWriteFrame(writer,img);
key = cvWaitKey( 10 );
// Affichage du frame rate
cout << "\rFrame Rate : " << setw(5);
cout << left << setprecision(4);
cout << calculFrameRate() << " FPS" << flush;
}
cout << endl;
cvDestroyWindow( "video" );
delete src;
return 0;
}
示例4: new
void TextureCache::addImageAsyncCallBack(float dt)
{
// the image is generated in loading thread
std::deque<ImageInfo*> *imagesQueue = _imageInfoQueue;
_imageInfoMutex.lock();
if (imagesQueue->empty())
{
_imageInfoMutex.unlock();
}
else
{
ImageInfo *imageInfo = imagesQueue->front();
imagesQueue->pop_front();
_imageInfoMutex.unlock();
AsyncStruct *asyncStruct = imageInfo->asyncStruct;
Image *image = imageInfo->image;
const std::string& filename = asyncStruct->filename;
Texture2D *texture = nullptr;
if (image)
{
// generate texture in render thread
texture = new (std::nothrow) Texture2D();
texture->initWithImage(image);
#if CC_ENABLE_CACHE_TEXTURE_DATA
// cache the texture file name
VolatileTextureMgr::addImageTexture(texture, filename);
#endif
// cache the texture. retain it, since it is added in the map
_textures.insert( std::make_pair(filename, texture) );
texture->retain();
texture->autorelease();
}
else
{
auto it = _textures.find(asyncStruct->filename);
if(it != _textures.end())
texture = it->second;
}
if (asyncStruct->callback)
{
asyncStruct->callback(texture);
}
if(image)
{
image->release();
}
delete asyncStruct;
delete imageInfo;
--_asyncRefCount;
if (0 == _asyncRefCount)
{
Director::getInstance()->getScheduler()->unschedule(schedule_selector(TextureCache::addImageAsyncCallBack), this);
}
}
}
示例5: Make
Image sImageAdd::Make() const
{
Image dest = i1;
Over(dest, Point(0, 0), i2, i2.GetSize());
return dest;
}
示例6: Image
/** This operator returns a new image, created by adding another image element-wise to this image. */
Image operator+(const Image& image2)
{
Image result = Image(*this, _data + image2.data());
return result;
}
示例7: GetContainer
void
ImageLayerD3D10::RenderLayer()
{
ImageContainer *container = GetContainer();
if (!container) {
return;
}
AutoLockImage autoLock(container);
Image *image = autoLock.GetImage();
if (!image) {
return;
}
gfxIntSize size = mScaleMode == SCALE_NONE ? image->GetSize() : mScaleToSize;
SetEffectTransformAndOpacity();
ID3D10EffectTechnique *technique;
if (image->GetFormat() == Image::CAIRO_SURFACE || image->GetFormat() == Image::REMOTE_IMAGE_BITMAP)
{
bool hasAlpha = false;
if (image->GetFormat() == Image::REMOTE_IMAGE_BITMAP) {
RemoteBitmapImage *remoteImage =
static_cast<RemoteBitmapImage*>(image);
if (!image->GetBackendData(LayerManager::LAYERS_D3D10)) {
nsAutoPtr<TextureD3D10BackendData> dat = new TextureD3D10BackendData();
dat->mTexture = DataToTexture(device(), remoteImage->mData, remoteImage->mStride, remoteImage->mSize);
if (dat->mTexture) {
device()->CreateShaderResourceView(dat->mTexture, NULL, getter_AddRefs(dat->mSRView));
image->SetBackendData(LayerManager::LAYERS_D3D10, dat.forget());
}
}
hasAlpha = remoteImage->mFormat == RemoteImageData::BGRA32;
} else {
CairoImage *cairoImage =
static_cast<CairoImage*>(image);
if (!cairoImage->mSurface) {
return;
}
if (!image->GetBackendData(LayerManager::LAYERS_D3D10)) {
nsAutoPtr<TextureD3D10BackendData> dat = new TextureD3D10BackendData();
dat->mTexture = SurfaceToTexture(device(), cairoImage->mSurface, cairoImage->mSize);
if (dat->mTexture) {
device()->CreateShaderResourceView(dat->mTexture, NULL, getter_AddRefs(dat->mSRView));
image->SetBackendData(LayerManager::LAYERS_D3D10, dat.forget());
}
}
hasAlpha = cairoImage->mSurface->GetContentType() == gfxASurface::CONTENT_COLOR_ALPHA;
}
TextureD3D10BackendData *data =
static_cast<TextureD3D10BackendData*>(image->GetBackendData(LayerManager::LAYERS_D3D10));
if (!data) {
return;
}
nsRefPtr<ID3D10Device> dev;
data->mTexture->GetDevice(getter_AddRefs(dev));
if (dev != device()) {
return;
}
if (hasAlpha) {
if (mFilter == gfxPattern::FILTER_NEAREST) {
technique = effect()->GetTechniqueByName("RenderRGBALayerPremulPoint");
} else {
technique = effect()->GetTechniqueByName("RenderRGBALayerPremul");
}
} else {
if (mFilter == gfxPattern::FILTER_NEAREST) {
technique = effect()->GetTechniqueByName("RenderRGBLayerPremulPoint");
} else {
technique = effect()->GetTechniqueByName("RenderRGBLayerPremul");
}
}
effect()->GetVariableByName("tRGB")->AsShaderResource()->SetResource(data->mSRView);
effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector(
ShaderConstantRectD3D10(
(float)0,
(float)0,
(float)size.width,
(float)size.height)
);
} else if (image->GetFormat() == Image::PLANAR_YCBCR) {
PlanarYCbCrImage *yuvImage =
static_cast<PlanarYCbCrImage*>(image);
//.........这里部分代码省略.........
示例8: display
void display(void) {
static bool integrate = true;
const uint2 imageSize = kfusion.configuration.inputSize;
const double start = Stats.start();
renderInput(vertex.getDeviceImage(), normal.getDeviceImage(), depth.getDeviceImage(), reference, toMatrix4( trans * rot * preTrans ) * getInverseCameraMatrix(kfusion.configuration.camera), kfusion.configuration.nearPlane, kfusion.configuration.farPlane, kfusion.configuration.stepSize(), 0.01 );
cudaDeviceSynchronize();
Stats.sample("ground raycast");
Stats.sample("ground copy");
glRasterPos2i(0,0);
glDrawPixels(vertex);
glRasterPos2i(imageSize.x, 0);
glDrawPixels(normal);
glRasterPos2i(imageSize.x * 2, 0);
glDrawPixels(depth);
Stats.sample("ground draw");
kfusion.setDepth( depth.getDeviceImage() );
cudaDeviceSynchronize();
const double track_start = Stats.sample("process depth");
if(counter > 1){
integrate = kfusion.Track();
cudaDeviceSynchronize();
Stats.sample("track");
}
renderTrackResult(rgb.getDeviceImage(), kfusion.reduction);
cudaDeviceSynchronize();
Stats.sample("track render");
Stats.sample("track copy");
if(integrate){
kfusion.Integrate();
cudaDeviceSynchronize();
Stats.sample("integration");
kfusion.Raycast();
cudaDeviceSynchronize();
Stats.sample("raycast");
vertex = kfusion.vertex;
normal = kfusion.normal;
Stats.sample("raycast get");
}
glRasterPos2i(0,imageSize.y * 1);
glDrawPixels(vertex);
glRasterPos2i(imageSize.x, imageSize.y * 1);
glDrawPixels(normal);
glRasterPos2i(2 * imageSize.x, imageSize.y * 1);
glDrawPixels(rgb);
Stats.sample("track draw");
Stats.sample("total track", Stats.get_time() - track_start, PerfStats::TIME);
renderInput(vertex.getDeviceImage(), normal.getDeviceImage(), depth.getDeviceImage(), kfusion.integration, kfusion.pose * getInverseCameraMatrix(kfusion.configuration.camera), kfusion.configuration.nearPlane, kfusion.configuration.farPlane, kfusion.configuration.stepSize(), 0.7 * kfusion.configuration.mu );
cudaDeviceSynchronize();
Stats.sample("view raycast");
Stats.sample("view copy");
glRasterPos2i(0,imageSize.y * 2);
glDrawPixels(vertex);
glRasterPos2i(imageSize.x, imageSize.y * 2);
glDrawPixels(normal);
glRasterPos2i(imageSize.x * 2, imageSize.y * 2);
glDrawPixels(depth);
Stats.sample("view draw");
Stats.sample("events");
Stats.sample("total all", Stats.get_time() - start, PerfStats::TIME);
if(counter % 30 == 0){
Stats.print();
Stats.reset();
cout << endl;
}
++counter;
printCUDAError();
glutSwapBuffers();
}
示例9: createRenderTexture
void GLTexture::loadImpl()
{
if( mUsage & TU_RENDERTARGET )
{
createRenderTexture();
}
else
{
String baseName, ext;
size_t pos = mName.find_last_of(".");
if( pos == String::npos )
OGRE_EXCEPT(
Exception::ERR_INVALIDPARAMS,
"Unable to load image file '"+ mName + "' - invalid extension.",
"GLTexture::loadImpl" );
baseName = mName.substr(0, pos);
ext = mName.substr(pos+1);
if(mTextureType == TEX_TYPE_1D || mTextureType == TEX_TYPE_2D ||
mTextureType == TEX_TYPE_3D)
{
Image img;
// find & load resource data intro stream to allow resource
// group changes if required
DataStreamPtr dstream =
ResourceGroupManager::getSingleton().openResource(
mName, mGroup, true, this);
img.load(dstream, ext);
// If this is a cube map, set the texture type flag accordingly.
if (img.hasFlag(IF_CUBEMAP))
mTextureType = TEX_TYPE_CUBE_MAP;
// If this is a volumetric texture set the texture type flag accordingly.
if(img.getDepth() > 1)
mTextureType = TEX_TYPE_3D;
// Call internal _loadImages, not loadImage since that's external and
// will determine load status etc again
ConstImagePtrList imagePtrs;
imagePtrs.push_back(&img);
_loadImages( imagePtrs );
}
else if (mTextureType == TEX_TYPE_CUBE_MAP)
{
if(StringUtil::endsWith(getName(), ".dds"))
{
// XX HACK there should be a better way to specify whether
// all faces are in the same file or not
Image img;
// find & load resource data intro stream to allow resource
// group changes if required
DataStreamPtr dstream =
ResourceGroupManager::getSingleton().openResource(
mName, mGroup, true, this);
img.load(dstream, ext);
// Call internal _loadImages, not loadImage since that's external and
// will determine load status etc again
ConstImagePtrList imagePtrs;
imagePtrs.push_back(&img);
_loadImages( imagePtrs );
}
else
{
std::vector<Image> images(6);
ConstImagePtrList imagePtrs;
static const String suffixes[6] = {"_rt", "_lf", "_up", "_dn", "_fr", "_bk"};
for(size_t i = 0; i < 6; i++)
{
String fullName = baseName + suffixes[i] + "." + ext;
// find & load resource data intro stream to allow resource
// group changes if required
DataStreamPtr dstream =
ResourceGroupManager::getSingleton().openResource(
fullName, mGroup, true, this);
images[i].load(dstream, ext);
imagePtrs.push_back(&images[i]);
}
_loadImages( imagePtrs );
}
}
else
OGRE_EXCEPT( Exception::ERR_NOT_IMPLEMENTED, "**** Unknown texture type ****", "GLTexture::load" );
}
}
示例10: maScreenSetFullscreen
void LoginScreen::initializeScreen(MAUtil::String &os)
{
maScreenSetFullscreen(1);
MAExtent ex = maGetScrSize();
int screenWidth = EXTENT_X(ex);
int screenHeight = EXTENT_Y(ex);
int centerH = screenWidth / 2;
int buttonWidth = (int)((float)screenWidth * 0.75);
if(screenHeight > 1000 && os.find("Android", 0) < 0)
{
buttonWidth = (int)((float)screenWidth * 0.4);
}
int buttonHeight = (int)((float)screenWidth * 0.15);
if(screenHeight > 1000 && os.find("Android", 0) < 0)
{
buttonHeight = (int)((float)screenWidth * 0.07);
}
int buttonSpacing = (int)((float)buttonHeight * 0.3);
if(os.find("Windows", 0) >= 0)
{
buttonSpacing = (int)((float)buttonHeight * 0.1);
}
int editBoxHeight = (int)((float)screenHeight * 0.07);
if(screenHeight > 1000 && os.find("Android", 0) < 0)
{
editBoxHeight = (int)((float)screenHeight * 0.02);
}
int logoWidth = (int)((float)screenWidth * 0.75);
int layoutTop = (int)((float)screenHeight * 0.3);
if(screenHeight > 1000 && os.find("Android", 0) < 0)
{
layoutTop = (int)((float)screenHeight * 0.25);
}
int labelHeight = (int)((float)screenHeight * 0.05);
if(screenHeight > 1000 && os.find("Android", 0) < 0)
{
labelHeight = (int)((float)screenHeight * 0.025);
}
int labelWidth = screenWidth;
if(os.find("Android", 0) >= 0)
{
labelWidth = buttonWidth;
}
int labelSpacing = (int)((float)screenHeight * 0.02);
if(screenHeight > 1000 && os.find("Android", 0) < 0)
{
labelSpacing = (int)((float)labelSpacing * 0.01);
}
int layoutHeight = (buttonHeight + buttonSpacing) * 2;
int ipBoxButtonSpacing = (int)((float)screenHeight * 0.03);
mLoginScreen = new Screen();
//The reload Logo
Image* logo = new Image();
logo->setImage(LOGO_IMAGE);
logo->wrapContentHorizontally();
logo->wrapContentVertically();
logo->setWidth(logoWidth);
logo->setScaleMode(IMAGE_SCALE_PRESERVE_ASPECT);
logo->setPosition(centerH - logoWidth/2, screenHeight / 12);
//The connect to server button
if(os == "iPhone OS") //Android image buttons do not support text
{
mServerConnectButton = new ImageButton();
((ImageButton*)mServerConnectButton)->addButtonListener(this);
((ImageButton*)mServerConnectButton)->setBackgroundImage(CONNECT_BG);
mServerConnectButton->setFontColor(0x000000);
}
else
{
mServerConnectButton = new Button();
((Button*)mServerConnectButton)->addButtonListener(this);
}
mServerConnectButton->setText("Connect");
mServerConnectButton->setTextHorizontalAlignment(MAW_ALIGNMENT_CENTER);
mServerConnectButton->setTextVerticalAlignment(MAW_ALIGNMENT_CENTER);
mServerConnectButton->setWidth(buttonWidth);
mServerConnectButton->setHeight(buttonHeight);
mServerConnectButton->setPosition(centerH - buttonWidth/2, layoutHeight - buttonHeight);
//The edit box that receives the server IP
mServerIPBox = new EditBox();
mServerIPBox->setWidth(buttonWidth);
//mServerIPBox->setHeight(editBoxHeight);
mServerIPBox->addEditBoxListener(this);
mServerIPBox->setPosition(centerH - buttonWidth/2,layoutHeight - buttonHeight - editBoxHeight - ipBoxButtonSpacing);
//Label for the server IP edit box
Label *serverIPLabel = new Label();
serverIPLabel->setText("Server IP:");
serverIPLabel->setFontColor(0xFFFFFF);
serverIPLabel->setTextHorizontalAlignment(MAW_ALIGNMENT_CENTER);
serverIPLabel->setTextVerticalAlignment(MAW_ALIGNMENT_CENTER);
serverIPLabel->setWidth(labelWidth);
serverIPLabel->setPosition(centerH - labelWidth/2, layoutHeight - buttonHeight - labelHeight - editBoxHeight - ipBoxButtonSpacing);
//.........这里部分代码省略.........
示例11: im
void ImageButton::paintButton (Graphics& g,
bool isMouseOverButton,
bool isButtonDown)
{
if (! isEnabled())
{
isMouseOverButton = false;
isButtonDown = false;
}
Image im (getCurrentImage());
if (im.isValid())
{
const int iw = im.getWidth();
const int ih = im.getHeight();
int w = getWidth();
int h = getHeight();
int x = (w - iw) / 2;
int y = (h - ih) / 2;
if (scaleImageToFit)
{
if (preserveProportions)
{
int newW, newH;
const float imRatio = ih / (float) iw;
const float destRatio = h / (float) w;
if (imRatio > destRatio)
{
newW = roundToInt (h / imRatio);
newH = h;
}
else
{
newW = w;
newH = roundToInt (w * imRatio);
}
x = (w - newW) / 2;
y = (h - newH) / 2;
w = newW;
h = newH;
}
else
{
x = 0;
y = 0;
}
}
if (! scaleImageToFit)
{
w = iw;
h = ih;
}
imageBounds.setBounds (x, y, w, h);
const bool useDownImage = isButtonDown || getToggleState();
getLookAndFeel().drawImageButton (g, &im, x, y, w, h,
useDownImage ? downOverlay
: (isMouseOverButton ? overOverlay
: normalOverlay),
useDownImage ? downOpacity
: (isMouseOverButton ? overOpacity
: normalOpacity),
*this);
}
}
示例12: CC_SAFE_RELEASE_NULL
void Director::createStatsLabel()
{
Texture2D *texture = nullptr;
TextureCache *textureCache = TextureCache::getInstance();
if (_FPSLabel && _SPFLabel)
{
CC_SAFE_RELEASE_NULL(_FPSLabel);
CC_SAFE_RELEASE_NULL(_SPFLabel);
CC_SAFE_RELEASE_NULL(_drawsLabel);
textureCache->removeTextureForKey("/cc_fps_images");
FileUtils::getInstance()->purgeCachedEntries();
}
Texture2D::PixelFormat currentFormat = Texture2D::getDefaultAlphaPixelFormat();
Texture2D::setDefaultAlphaPixelFormat(Texture2D::PixelFormat::RGBA4444);
unsigned char *data = nullptr;
unsigned int dataLength = 0;
getFPSImageData(&data, &dataLength);
Image* image = new Image();
bool isOK = image->initWithImageData(data, dataLength);
if (! isOK) {
CCLOGERROR("%s", "Fails: init fps_images");
return;
}
texture = textureCache->addImage(image, "/cc_fps_images");
CC_SAFE_RELEASE(image);
/*
We want to use an image which is stored in the file named ccFPSImage.c
for any design resolutions and all resource resolutions.
To achieve this,
Firstly, we need to ignore 'contentScaleFactor' in 'AtlasNode' and 'LabelAtlas'.
So I added a new method called 'setIgnoreContentScaleFactor' for 'AtlasNode',
this is not exposed to game developers, it's only used for displaying FPS now.
Secondly, the size of this image is 480*320, to display the FPS label with correct size,
a factor of design resolution ratio of 480x320 is also needed.
*/
float factor = EGLView::getInstance()->getDesignResolutionSize().height / 320.0f;
_FPSLabel = new LabelAtlas();
_FPSLabel->setIgnoreContentScaleFactor(true);
_FPSLabel->initWithString("00.0", texture, 12, 32 , '.');
_FPSLabel->setScale(factor);
_SPFLabel = new LabelAtlas();
_SPFLabel->setIgnoreContentScaleFactor(true);
_SPFLabel->initWithString("0.000", texture, 12, 32, '.');
_SPFLabel->setScale(factor);
_drawsLabel = new LabelAtlas();
_drawsLabel->setIgnoreContentScaleFactor(true);
_drawsLabel->initWithString("000", texture, 12, 32, '.');
_drawsLabel->setScale(factor);
Texture2D::setDefaultAlphaPixelFormat(currentFormat);
_drawsLabel->setPosition(Point(0, 34*factor) + CC_DIRECTOR_STATS_POSITION);
_SPFLabel->setPosition(Point(0, 17*factor) + CC_DIRECTOR_STATS_POSITION);
_FPSLabel->setPosition(CC_DIRECTOR_STATS_POSITION);
}
示例13: LoadOpenGLExt
//.........这里部分代码省略.........
-526.3f, 13.4f, -605.75f,
0.0f, 1.0f, 0.0f );
glViewport(0, 0, m_width, m_height);
glClearColor(0.5f, 0.8f, 1.0f, 1.0f);
glEnable(GL_CULL_FACE);
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
glMatrixMode(GL_PROJECTION);
projMat.BuildProjection(50.0f, (float)m_width / (float)m_height, 0.1f, 50000.0f);
glLoadMatrixf(projMat);
glMatrixMode(GL_MODELVIEW);
glGenRenderbuffersEXT(1, &g_renderBuffer[0]);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, g_renderBuffer[0]);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, m_width, m_height);
glGenRenderbuffersEXT(1, &g_renderBuffer[1]);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, g_renderBuffer[1]);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, m_width, m_height);
glGenFramebuffersEXT(1, &g_frameBuffer);
glGenRenderbuffersEXT(1, &g_depthRenderBuffer);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, g_depthRenderBuffer);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, m_width, m_height);
//glGenFramebuffersEXT(1, &g_frameBuffer2);//
glGenRenderbuffersEXT(1, &g_depthRenderBuffer2);//
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, g_depthRenderBuffer2);//
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, 512, 512);//
//glGenFramebuffersEXT(1, &g_frameBufferHDR);//
glGenRenderbuffersEXT(1, &g_depthRenderBufferHDR);//
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, g_depthRenderBufferHDR);//
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, m_width / 1, m_height / 1);//
glGenRenderbuffersEXT(1, &g_renderBufferBlurdHDR);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, g_renderBufferBlurdHDR);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, m_width / 4, m_height / 4);
GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT);
switch(status)
{
case GL_FRAMEBUFFER_COMPLETE_EXT:
//MessageBox(NULL,"GL_FRAMEBUFFER_COMPLETE_EXT!","SUCCESS",MB_OK|MB_ICONEXCLAMATION);
break;
case GL_FRAMEBUFFER_UNSUPPORTED_EXT:
MessageBox(NULL,"GL_FRAMEBUFFER_UNSUPPORTED_EXT!","ERROR",MB_OK|MB_ICONEXCLAMATION);
exit(0);
break;
default:
exit(0);
}
g_texFP16.CreateRenderTarget(m_width, m_height, 3, GL_RGB16F_ARB, true);
g_texFP162.CreateRenderTarget(m_width, m_height, 3, GL_RGB16F_ARB, true);
g_texHDR.CreateRenderTarget(m_width / 1, m_height / 1, 3, GL_RGB16F_ARB, true);
g_texHBluredHDR.CreateRenderTarget(m_width / 4, m_height / 4, 3, GL_RGB16F_ARB, true);
g_texVBluredHDR.CreateRenderTarget(m_width / 4, m_height / 4, 3, GL_RGB16F_ARB, true);
g_texWaterReflect.CreateRenderTarget(512, 512, 3, GL_RGB16F_ARB, true);
Image image;
image.Load("data\\textures\\water_nmap.png");
image.ToNormalMap(2);
water.Load2D(image, GL_REPEAT, GL_REPEAT, GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR_MIPMAP_LINEAR, true);
image.Load("data\\textures\\teste.bmp");
m_skyGradient.Load1D(image, GL_CLAMP, GL_LINEAR, GL_LINEAR);
image.Load("data\\textures\\sun.png");
m_texSun.Load2D(image, GL_REPEAT, GL_REPEAT, GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR_MIPMAP_LINEAR, true);
CubeMapFilePath cubeMapFilePath;
strcpy(cubeMapFilePath.posX, "data\\textures\\underwater_nmap.png");
strcpy(cubeMapFilePath.negX, "data\\textures\\underwater_nmap.png");
strcpy(cubeMapFilePath.posY, "data\\textures\\underwater_nmap.png");
strcpy(cubeMapFilePath.negY, "data\\textures\\underwater_nmap.png");
strcpy(cubeMapFilePath.posZ, "data\\textures\\underwater_nmap.png");
strcpy(cubeMapFilePath.negZ, "data\\textures\\underwater_nmap.png");
m_texCubeUnderwaterNormMap.LoadCubeMap(cubeMapFilePath, GL_REPEAT, GL_REPEAT, GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR_MIPMAP_LINEAR, 0);
m_texViewPort.CreateRenderTarget(m_width, m_height, 3, GL_RGB, true);
m_mouseLockedX = m_width * 0.5;
m_mouseLockedY = m_height * 0.5;
CenterMouse();
}
示例14: Problem_2_B_b
void Problem_2_B_b()
{
string strName("testOCR-bill2.raw");
Image imTrainingBill(strName,316,901,3), *pimGray, *pimBin;
int iThresh, iRows = 316, iCols = 901;
cout<<"\n\n ** ** ** ** ** ** ** ** ** ** ** ** **\n";
cout<<"\n Test IMage 2: \n";
imTrainingBill.AllocImageMem();
imTrainingBill.ReadImageData();
pimGray = imTrainingBill.Convert2Gray();
pimGray->WriteImageData();
pimGray->PlotHistogram();
cout<<"\n Enter Threshold for Binary Conversion: ";
cin>>iThresh;
pimBin = pimGray->Convert2Bin(iThresh);
pimBin->WriteImageData();
//Charachter Segmentation
int **iaLabels, iLabelCount;
list<int> plistLabels;
cout<<"\n* Segmenting Test Charachters..";
iaLabels = SegmentCharachters(pimBin,&plistLabels);
//Paint Labeled Image
for(int i = 0;i<iRows;i++)
{
for(int j = 0;j<iCols;j++)
{
Pixel pxP;
if(iaLabels[i][j] == 0)
{
pxP.iBW = (255);
}
else
{
pxP.iBW = (iaLabels[i][j]);
}
pimGray->SetPixel(i,j,pxP);
}
}
pimGray->WriteImageData();
//Charachter Seperation
cout<<"\n* Extracting Test Charachters..";
int i = 1;
Image *pimCharachter[60];
int *iaLabelArr = new int[60];
for(list<int>::iterator it = plistLabels.begin();it != plistLabels.end();++it)
{
int iLabel = *it;
pimCharachter[i-1] = ExtractCharachter(iaLabels, iRows, iCols, iLabel);
iaLabelArr[i-1] = iLabel;
string strName;
char szName[30];
sprintf(szName,"Test_2Char_%d.raw",i);
strName = string::basic_string(szName);
pimCharachter[i-1]->SetImageName(strName);
pimCharachter[i-1]->WriteImageData();
i++;
}
cout<<"\n*Total Number of Charachters in Test2 image: "<<i-1;
//Features
Feature *dfFeatures[60];
for(int j = 0;j<i-1;j++)
{
dfFeatures[j] = ComputeFeatures(pimCharachter[j]);
dfFeatures[j]->iLabel = iaLabelArr[j];
}
//CSV File
WriteFeatureFile(dfFeatures,i-1,"P2A_FeatureSet_test1.csv");
//Identify T and L
char cT;
bool fT = false;
bool fL = false;
bool fO = false;
Image *pimChar;
int iXmin,iXmax,iYmin,iYmax;
list<int> pLabelList;
bool bDone = false;
//.........这里部分代码省略.........
示例15: renderBirmingham
void Southfall::renderBirmingham()
{
Image *BirminghamIM = &imageLibrary->BirminghamIM;
if(BirminghamIM->getScale() < .1)
currentState = GAME;
BirminghamIM->setX(SCREEN_WIDTH/2-BirminghamIM->getWidth()*BirminghamIM->getScale()/2);
BirminghamIM->setY(SCREEN_HEIGHT/2-BirminghamIM->getHeight()*BirminghamIM->getScale()/2);
BirminghamIM->draw();
BirminghamIM->setScale(BirminghamIM->getScale()*.996);
BirminghamIM->setRadians(BirminghamIM->getRadians()+birminghamRot);
birminghamRot += .0001;
}