本文整理汇总了C++中VideoBuffer类的典型用法代码示例。如果您正苦于以下问题:C++ VideoBuffer类的具体用法?C++ VideoBuffer怎么用?C++ VideoBuffer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VideoBuffer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
void main()
{
//SETUP VIDEO BUFFER
static VideoBuffer vid; //create video buffer for each cube
vid.initMode(BG0_SPR_BG1); //set video buffer to BG0_SPR_BG1 mode
vid.attach(0); //attach video buffer to cube with ID 0
//BACKGROUND LAYER
vid.bg0.image(vec(0,0), MyBG0Image); //Set the image `Background` defined in assets.lua to the VideoBuffer's BG0 layer
//SPRITES LAYER
vid.sprites[0].setImage(MyRedSprite); //assign our first sprite
vid.sprites[0].move(15,15); //move it to where we want it
vid.sprites[1].setImage(MyBlueSprite);
vid.sprites[1].move(93, 60);
//FOREGROUND LAYER
vid.bg1.setMask(BG1Mask::filled(vec(4,4), vec(8,8))); //Mask an area in the location and size of our BG1 image
vid.bg1.image(vec(4,4), MyBG1Image); //Place a BG1 image in the same space as the mask.
while (1) { //game looop
System::paint();
}
}
示例2: main
void main()
{
CubeID cube(0);
VideoBuffer vid;
vid.initMode(BG0);
vid.attach(cube);
for (int x = -1; x < 17; x++) {
drawColumn(vid, x);
}
/*
* Scroll horizontally through our background based on the accelerometer's X axis.
*
* We can scroll with pixel accuracy within a column of tiles via bg0.setPanning().
* When we get to either edge of the currently plotted tiles, draw a new column
* of tiles from the source image.
*
* Because BG0 is 1 tile wider and taller than the viewport itself, we can pre-load
* the next column of tiles into the column at the edge before it comes into view.
*/
float x = 0;
int prev_xt = 0;
for (;;) {
// Scroll based on accelerometer tilt
Int2 accel = vid.physicalAccel().xy();
// Floating point pixels
x += accel.x * (40.0f / 128.0f);
// Integer pixels
int xi = x + 0.5f;
// Integer tiles
int xt = x / 8;
while (prev_xt < xt) {
// Fill in new tiles, just past the right edge of the screen
drawColumn(vid, prev_xt + 17);
prev_xt++;
}
while (prev_xt > xt) {
// Fill in new tiles, just past the left edge
drawColumn(vid, prev_xt - 2);
prev_xt--;
}
// pixel-level scrolling within the current column
vid.bg0.setPanning(vec(xi, 0));
System::paint();
}
}
示例3: glGetIntegerv
void CaptureCallback::ContextData::readImage()
{
int width = _graphicsContext->getTraits()->width;
int height = _graphicsContext->getTraits()->height;
if (width!=_width || _height!=height)
{
_width = width;
_height = height;
}
if (_delegate)
{
VideoBuffer *buffer = _delegate->getVideoBuffer();
GLint internalFormat = GL_BGRA; // This is fine for iOS
#ifdef ANDROID // Depending in the platform / android version, the pixel format will change
glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &internalFormat);
if (internalFormat == GL_RGB)
internalFormat = GL_RGBA;
#endif
buffer->pixelFormat = internalFormat;
size_t bpp = buffer->bpp();
size_t bpr = buffer->bpr();
GLubyte *pixelBufferData = (GLubyte *)buffer->data();
glReadPixels(0, 0, width, height, internalFormat, GL_UNSIGNED_BYTE, pixelBufferData);
if (bpr != width*bpp){
// There is some padding expected in the buffer, let's move stuff around
for (int y = height-1; y > 0; --y){
memmove(pixelBufferData + y * bpr, pixelBufferData + y * width * bpp, width * bpp);
}
}
_delegate->didCaptureImage();
}
}
示例4: VideoBuffer
VideoBuffer * SignTool::GetIcon(int toolID, int width, int height)
{
VideoBuffer * newTexture = new VideoBuffer(width, height);
for (int y=0; y<height; y++)
{
for (int x=0; x<width; x++)
{
pixel pc = x==0||x==width-1||y==0||y==height-1 ? PIXPACK(0xA0A0A0) : PIXPACK(0x000000);
newTexture->SetPixel(x, y, PIXR(pc), PIXG(pc), PIXB(pc), 255);
}
}
newTexture->AddCharacter((width/2)-5, (height/2)-5, 0xE021, 32, 64, 128, 255);
newTexture->BlendCharacter((width/2)-5, (height/2)-5, 0xE020, 255, 255, 255, 255);
return newTexture;
}
示例5: setup
void VideoHeader::setup(VideoBuffer & buffer){
//newFrameEvent.init("Playmodes.VideoHeader.newFrame");
this->buffer= &buffer;
fps = buffer.getFps();
position = buffer.size();
oneFrame = (pmTimeDiff)round(1000000.0/(float)fps);
speed = 1;
prevBufferPos = 0;
pct = 1;
pctHasChanged = true;
in = 0;
out = 1;
loopMode = 0;
delay = 0;
}
示例6: GameSave
VideoBuffer * SaveRenderer::Render(unsigned char * saveData, int dataSize, bool decorations, bool fire)
{
GameSave * tempSave;
try {
tempSave = new GameSave((char*)saveData, dataSize);
} catch (std::exception & e) {
//Todo: make this look a little less shit
VideoBuffer * buffer = new VideoBuffer(64, 64);
buffer->BlendCharacter(32, 32, 'x', 255, 255, 255, 255);
return buffer;
}
VideoBuffer * thumb = Render(tempSave, decorations, fire);
delete tempSave;
return thumb;
}
示例7: VideoBuffer
VideoBuffer *DecorationTool::GetIcon(int toolID, int width, int height)
{
VideoBuffer * newTexture = new VideoBuffer(width, height);
for (int y=0; y<height; y++)
{
for (int x=0; x<width; x++)
{
//if (toolID == DECO_LIGH)
// vid_buf[WINDOWW*(y+j)+(x+i)] = PIXRGB(PIXR(pc)-10*j, PIXG(pc)-10*j, PIXB(pc)-10*j);
//else if (toolID == DECO_DARK)
// vid_buf[WINDOWW*(y+j)+(x+i)] = PIXRGB(PIXR(pc)+10*j, PIXG(pc)+10*j, PIXB(pc)+10*j);
if (toolID == DECO_SMUDGE)
newTexture->SetPixel(x, y, 0, 255-5*x, 5*x, 255);
else if (toolID == DECO_DRAW || toolID == DECO_CLEAR)
newTexture->SetPixel(x, y, Red, Green, Blue, Alpha);
else
newTexture->SetPixel(x, y, 50, 50, 50, 255);
}
}
if (toolID == DECO_CLEAR)
{
int reverseRed = (Red+127)%256;
int reverseGreen = (Green+127)%256;
int reverseBlue = (Blue+127)%256;
for (int y=4; y<12; y++)
{
newTexture->SetPixel(y+5, y-1, reverseRed, reverseGreen, reverseBlue, 255);
newTexture->SetPixel(y+6, y-1, reverseRed, reverseGreen, reverseBlue, 255);
newTexture->SetPixel(20-y, y-1, reverseRed, reverseGreen, reverseBlue, 255);
newTexture->SetPixel(21-y, y-1, reverseRed, reverseGreen, reverseBlue, 255);
}
}
else if (toolID == DECO_ADD)
newTexture->AddCharacter(11, 4, '+', Red, Green, Blue, 255);
else if (toolID == DECO_SUBTRACT)
newTexture->AddCharacter(11, 4, '-', Red, Green, Blue, 255);
else if (toolID == DECO_MULTIPLY)
newTexture->AddCharacter(11, 3, 'x', Red, Green, Blue, 255);
else if (toolID == DECO_DIVIDE)
newTexture->AddCharacter(11, 4, '/', Red, Green, Blue, 255);
return newTexture;
}
示例8: Process
RequestBroker::ProcessResponse ThumbRenderRequest::Process(RequestBroker & rb)
{
VideoBuffer * thumbnail = SaveRenderer::Ref().Render(Save, Decorations, Fire);
delete Save;
Save = NULL;
if(thumbnail)
{
thumbnail->Resize(Width, Height, true);
ResultObject = (void*)thumbnail;
rb.requestComplete((Request*)this);
return RequestBroker::Finished;
}
else
{
return RequestBroker::Failed;
}
return RequestBroker::Failed;
}
示例9: main
//.........这里部分代码省略.........
}
}
else if (filename == "webcam" || startsWith(filename, WEBCAM_PREFIX))
{
int webcamnumber = 0;
// If they supplied "/dev/video[number]" parse the "number" here
if(startsWith(filename, WEBCAM_PREFIX) && filename.length() > WEBCAM_PREFIX.length())
{
webcamnumber = atoi(filename.substr(WEBCAM_PREFIX.length()).c_str());
}
int framenum = 0;
cv::VideoCapture cap(webcamnumber);
if (!cap.isOpened())
{
std::cerr << "Error opening webcam" << std::endl;
return 1;
}
while (cap.read(frame))
{
if (framenum == 0)
motiondetector.ResetMotionDetection(&frame);
detectandshow(&alpr, frame, "", outputJson);
sleep_ms(10);
framenum++;
}
}
else if (startsWith(filename, "http://") || startsWith(filename, "https://"))
{
int framenum = 0;
VideoBuffer videoBuffer;
videoBuffer.connect(filename, 5);
cv::Mat latestFrame;
while (program_active)
{
std::vector<cv::Rect> regionsOfInterest;
int response = videoBuffer.getLatestFrame(&latestFrame, regionsOfInterest);
if (response != -1)
{
if (framenum == 0)
motiondetector.ResetMotionDetection(&latestFrame);
detectandshow(&alpr, latestFrame, "", outputJson);
}
// Sleep 10ms
sleep_ms(10);
framenum++;
}
videoBuffer.disconnect();
std::cout << "Video processing ended" << std::endl;
}
else if (hasEndingInsensitive(filename, ".avi") || hasEndingInsensitive(filename, ".mp4") ||
hasEndingInsensitive(filename, ".webm") ||
hasEndingInsensitive(filename, ".flv") || hasEndingInsensitive(filename, ".mjpg") ||
hasEndingInsensitive(filename, ".mjpeg") ||
hasEndingInsensitive(filename, ".mkv")
)
示例10: typeid
RequestBroker::ProcessResponse ImageRequest::Process(RequestBroker & rb)
{
VideoBuffer * image = NULL;
//Have a look at the thumbnail cache
for(std::deque<std::pair<std::string, VideoBuffer*> >::iterator iter = rb.imageCache.begin(), end = rb.imageCache.end(); iter != end; ++iter)
{
if((*iter).first == URL)
{
image = (*iter).second;
#ifdef DEBUG
std::cout << typeid(*this).name() << " " << URL << " found in cache" << std::endl;
#endif
}
}
if(!image)
{
if(HTTPContext)
{
if(http_async_req_status(HTTPContext))
{
pixel * imageData;
char * data;
int status, data_size, imgw, imgh;
data = http_async_req_stop(HTTPContext, &status, &data_size);
if (status == 200 && data)
{
imageData = Graphics::ptif_unpack(data, data_size, &imgw, &imgh);
free(data);
if(imageData)
{
//Success!
image = new VideoBuffer(imageData, imgw, imgh);
free(imageData);
}
else
{
//Error thumbnail
image = new VideoBuffer(32, 32);
image->SetCharacter(14, 14, 'x', 255, 255, 255, 255);
}
if(rb.imageCache.size() >= THUMB_CACHE_SIZE)
{
//Remove unnecessary from thumbnail cache
delete rb.imageCache.front().second;
rb.imageCache.pop_front();
}
rb.imageCache.push_back(std::pair<std::string, VideoBuffer*>(URL, image));
}
else
{
#ifdef DEBUG
std::cout << typeid(*this).name() << " Request for " << URL << " failed with status " << status << std::endl;
#endif
free(data);
return RequestBroker::Failed;
}
}
}
else
{
//Check for ongoing requests
for(std::vector<Request*>::iterator iter = rb.activeRequests.begin(), end = rb.activeRequests.end(); iter != end; ++iter)
{
if((*iter)->Type != Request::Image)
continue;
ImageRequest * otherReq = (ImageRequest*)(*iter);
if(otherReq->URL == URL && otherReq != this)
{
#ifdef DEBUG
std::cout << typeid(*this).name() << " Request for " << URL << " found, appending." << std::endl;
#endif
//Add the current listener to the item already being requested
(*iter)->Children.push_back(this);
return RequestBroker::Duplicate;
}
}
//If it's not already being requested, request it
#ifdef DEBUG
std::cout << typeid(*this).name() << " Creating new request for " << URL << std::endl;
#endif
HTTPContext = http_async_req_start(NULL, (char *)URL.c_str(), NULL, 0, 0);
RequestTime = time(NULL);
}
}
if(image)
{
//Create a copy, to seperate from the cache
std::vector<Request *> children(Children.begin(), Children.end());
Children.clear();
VideoBuffer * myVB = new VideoBuffer(*image);
//.........这里部分代码省略.........
示例11: main
int main(int argc, char *argv[])
{
ui::Engine * engine;
std::string outputPrefix, inputFilename;
std::vector<char> inputFile;
std::string ppmFilename, ptiFilename, ptiSmallFilename, pngFilename, pngSmallFilename;
std::vector<char> ppmFile, ptiFile, ptiSmallFile, pngFile, pngSmallFile;
inputFilename = std::string(argv[1]);
outputPrefix = std::string(argv[2]);
ppmFilename = outputPrefix+".ppm";
ptiFilename = outputPrefix+".pti";
ptiSmallFilename = outputPrefix+"-small.pti";
pngFilename = outputPrefix+".png";
pngSmallFilename = outputPrefix+"-small.png";
readFile(inputFilename, inputFile);
ui::Engine::Ref().g = new Graphics();
engine = &ui::Engine::Ref();
engine->Begin(WINDOWW, WINDOWH);
GameSave * gameSave = NULL;
try
{
gameSave = new GameSave(inputFile);
}
catch (ParseException e)
{
//Render the save again later or something? I don't know
if (e.what() == "Save from newer version")
throw e;
}
Simulation * sim = new Simulation();
Renderer * ren = new Renderer(ui::Engine::Ref().g, sim);
if (gameSave)
{
sim->Load(gameSave);
//Render save
ren->decorations_enable = true;
ren->blackDecorations = true;
int frame = 15;
while(frame)
{
frame--;
ren->render_parts();
ren->render_fire();
ren->clearScreen(1.0f);
}
}
else
{
int w = Graphics::textwidth("Save file invalid")+16, x = (XRES-w)/2, y = (YRES-24)/2;
ren->drawrect(x, y, w, 24, 192, 192, 192, 255);
ren->drawtext(x+8, y+8, "Save file invalid", 192, 192, 240, 255);
}
ren->RenderBegin();
ren->RenderEnd();
VideoBuffer screenBuffer = ren->DumpFrame();
//ppmFile = format::VideoBufferToPPM(screenBuffer);
ptiFile = format::VideoBufferToPTI(screenBuffer);
pngFile = format::VideoBufferToPNG(screenBuffer);
screenBuffer.Resize(1.0f/3.0f, true);
ptiSmallFile = format::VideoBufferToPTI(screenBuffer);
pngSmallFile = format::VideoBufferToPNG(screenBuffer);
//writeFile(ppmFilename, ppmFile);
writeFile(ptiFilename, ptiFile);
writeFile(ptiSmallFilename, ptiSmallFile);
writeFile(pngFilename, pngFile);
writeFile(pngSmallFilename, pngSmallFile);
}
示例12: main
void main()
{
const CubeID cube(0);
static VideoBuffer vid;
vid.attach(cube);
/*
* Blank the screen. This also blanks the one-pixel region between
* the bottom of the fractal and the top of the elapsed time indicator
* below.
*/
vid.initMode(SOLID_MODE);
vid.colormap[0] = RGB565::fromRGB(0xFFFFFF);
System::paint();
/*
* We use STAMP mode in a special way here, to do (slow) true-color
* rendering: The framebuffer is simply set up as an identity mapping
* that shows each of the 16 colors in our colormap. Now we can put
* a row of 16 pixels directly into the colormap, and render the screen
* using 1024 of these little 16x1 pixel "frames".
*
* Clearly this is really slow, and this technique is unlikely to be
* frequently useful, but it's a fun parlour trick :)
*/
SystemTime startTime = SystemTime::now();
vid.initMode(STAMP);
vid.stamp.disableKey();
auto &fb = vid.stamp.initFB<16,1>();
for (unsigned i = 0; i < 16; i++)
fb.plot(vec(i, 0U), i);
for (unsigned y = 0; y < LCD_height - 9; y++)
for (unsigned x = 0; x < LCD_width; x += 16) {
/*
* Render 16 pixels at a time, into a buffer in RAM.
*/
static RGB565 pixels[16];
for (unsigned i = 0; i < 16; i++)
pixels[i] = calculateMandelbrot(vec(x+i, y));
/*
* Now copy to VRAM and start painting. By waiting until
* now to call finish(), we're allowing the calculation above
* to run concurrently with the cube's paint operation.
*
* Note that our "frames" are actually just tiny pieces of the
* screen, so we need to avoid the default frame rate limits
* in order to render at an at all reasonable rate. This is
* where paintUnlimited() comes into play.
*/
System::finish();
vid.stamp.setBox(vec(x,y), vec(16,1));
vid.colormap.set(pixels);
System::paintUnlimited();
}
/*
* Use BG0_ROM mode to draw the elapsed time at the bottom of the screen.
*/
TimeDelta elapsed = SystemTime::now() - startTime;
String<16> message;
message << (elapsed.milliseconds() / 1000) << "."
<< Fixed(elapsed.milliseconds() % 1000, 3) << " sec";
LOG("Elapsed time: %s\n", message.c_str());
vid.initMode(BG0_ROM);
vid.bg0rom.text(vec(1,0), message);
vid.setWindow(LCD_height - 8, 8);
// Kill time (efficiently)
while (1)
System::paint();
}
示例13: main
//.........这里部分代码省略.........
if (fileExists(filename.c_str()))
{
frame = cv::imread( filename );
detectandshow( &alpr, frame, "", outputJson);
}
else
{
std::cerr << "Image file not found: " << filename << std::endl;
}
}
}
else if (filename == "webcam")
{
int framenum = 0;
cv::VideoCapture cap(0);
if (!cap.isOpened())
{
std::cout << "Error opening webcam" << std::endl;
return 1;
}
while (cap.read(frame))
{
detectandshow(&alpr, frame, "", outputJson);
usleep(1000);
framenum++;
}
}
else if (startsWith(filename, "http://") || startsWith(filename, "https://"))
{
int framenum = 0;
VideoBuffer videoBuffer;
videoBuffer.connect(filename, 5);
cv::Mat latestFrame;
while (program_active)
{
int response = videoBuffer.getLatestFrame(&latestFrame);
if (response != -1)
{
detectandshow( &alpr, latestFrame, "", outputJson);
}
// Sleep 10ms
usleep(10000);
}
videoBuffer.disconnect();
std::cout << "Video processing ended" << std::endl;
}
else if (hasEndingInsensitive(filename, ".avi") || hasEndingInsensitive(filename, ".mp4") || hasEndingInsensitive(filename, ".webm") ||
hasEndingInsensitive(filename, ".flv") || hasEndingInsensitive(filename, ".mjpg") || hasEndingInsensitive(filename, ".mjpeg"))
{
if (fileExists(filename.c_str()))
{
int framenum = 0;
cv::VideoCapture cap=cv::VideoCapture();
cap.open(filename);
cap.set(CV_CAP_PROP_POS_MSEC, seektoms);
示例14: main
//.........这里部分代码省略.........
long start[10];
if(ShowVideo) {
namedWindow("Vision",1);
//createTrackbar("Thresh Type", "Vision", &type, 4, NULL); // callback not needed
//createTrackbar("Min Value", "Vision", &defMin, 255, NULL); // callback not needed
//createTrackbar("Max Value", "Vision", &defMax, 255, NULL); // callback not needed
createTrackbar("Percent Tall", "Vision", &defTall, 20, NULL); // callback not needed
createTrackbar("Percent Narr", "Vision", &defNarr, 20, NULL); // callback not needed
} else {
table->PutNumber("Horizontal Percent Error", defTall);
table->PutNumber("Vertical Percent Error", defNarr);
}
for (int i = 0; i<10; i++)
start[i] = getmsofday(); // pre-load with 'now'
for (int i = 0; 1; i++) {
// Receive key-press updates, it is required if you want to output images,
// so the task takes a moment to update the display.
if (waitKey(1) > 0)
break;
string fileStream = "Mask"; // Default if no table present
if (table->IsConnected()) {
NetworkTable *StreamsTable = table->GetSubTable("File Streams");
if (StreamsTable && StreamsTable->ContainsKey("selected")) {
fileStream = StreamsTable->GetString("selected");
}
}
ShowMask = (fileStream == "Mask");
// Grab a frame from the vision API
VideoBuffer buffer = v.grabFrame();
// Put the frame into an OpenCV image matrix with a single color (gray scale)
Mat image(height, width, CV_8UC1, buffer.data(), false); // AKA 'Y'
Mat dst; // this will be a RGB version of the source image
#if defined(YOU_WANT_RGB_COLOR_INSTEAD_OF_GREYSCALE)
// There is more data after the gray scale (Y) that contains U&V
Mat cb(height/2, width/2, CV_8UC1, buffer.data()+(height*width), false); // 'U'
Mat cr(height/2, width/2, CV_8UC1, buffer.data()+(height*width)*5/4, false); // 'V'
// size up cb and cr to be same as y
Mat CB;
resize(cb,CB,cvSize(width,height));
Mat CR;
resize(cr,CR,cvSize(width,height));
// empty image same as full (gray scale) image, but 3 channels:
Mat ycbcr(height,width, CV_8UC3);
Mat in[] = {image, CB, CR};
int fromto[] = {0,0, 1,1, 2,2}; // YUV
// mash 3 channels from 2 matrix into a single 3 channel matrix:
mixChannels(in,3, &ycbcr,1, fromto,3);
// convert that 3 channel YUV matrix into 3 channel RGB (displayable)
cvtColor(ycbcr,image,CV_YCrCb2RGB);
if (ShowMask) {
dst = image.clone(); // make a copy, as we want dst to have the same RGB version
}
#else
示例15: main
void main()
{
unsigned fg = BG0ROMDrawable::SOLID_FG ^ BG0ROMDrawable::BLUE;
unsigned bg = BG0ROMDrawable::SOLID_FG ^ BG0ROMDrawable::BLACK;
vid.initMode(BG0_ROM);
vid.attach(cube);
vid.bg0rom.erase(bg);
vid.bg0rom.fill(vec(0,0), vec(3,3), fg);
synthInit();
float hz = 0;
while (1) {
// Scale to [-1, 1]
auto accel = cube.accel() / 128.f;
// Glide to the target note (half-steps above or below middle C)
float note = 261.6f * pow(1.05946f, 8 + round(accel.y * 24.f));
hz += (note - hz) * 0.4f;
synthesize(hz, accel.x - 0.2f,
clamp(accel.x + 0.5f, 0.f, 1.f));
const Int2 center = LCD_center - vec(24,24)/2;
vid.bg0rom.setPanning(-(center + accel.xy() * 60.f));
System::paint();
}
}