本文整理汇总了C++中QMatrix4x4::ortho方法的典型用法代码示例。如果您正苦于以下问题:C++ QMatrix4x4::ortho方法的具体用法?C++ QMatrix4x4::ortho怎么用?C++ QMatrix4x4::ortho使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类QMatrix4x4
的用法示例。
在下文中一共展示了QMatrix4x4::ortho方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: paintGL
void Scene::paintGL(){
setStates(); // включаем буфер глубины, свет и прочее (возможно можно вынести в initGL)
//glClear(GL_COLOR_BUFFER_BIT); // если включен буфер глубины, то без его очистки мы ничего не увидим
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//glMatrixMode(GL_PROJECTION);
//qgluPerspective(60.0, width / height, 0.01, 15.0);
//glMatrixMode(GL_MODELVIEW);
//пишем матрицы преобразований
QMatrix4x4 LMM; // Local Model matrix (делает преобразования в локальных координатах объекта, для одного объекта их может быть несколько для разных частей объекта)
QMatrix4x4 MM; // Model matrix (выносит координаты объекта в координаты пространства сцены,
//выполняется в следующем порядке - масштабирование, поворот, перенос)
// TranslationMatrix * RotationMatrix * ScaleMatrix * OriginalVector; (в коде это выглядит в обратном порядке)
QMatrix4x4 MVM; // ModelView matrix (View matrix)("масштабирует крутит и перемещает весь мир")
QMatrix4x4 CameraView; // тоже самое что и MVM, но для использования функции LookAt
QMatrix4x4 PM; // Projection matrix // проекционная матрица
QMatrix4x4 MVPM; // ModelViewProjection matrix (projection * view * model)
if (perspective) {
// устанавливаем трёхмерную канву (в перспективной проекции) для рисования (плоскости отсечения)
// угол перспективы, отношение сторон, расстояние до ближней отсекающей плоскости и дальней
PM.perspective(cameraFocusAngle,ratio,0.1f,100.0f); // glFrustum( xmin, xmax, ymin, ymax, near, far) // gluPerspective(fovy, aspect, near, far)
}
else {
// устанавливаем трёхмерную канву (в ортогональной проекции) для рисования (плоскости отсечения)
PM.ortho(-2.0f,2.0f,-2.0f,2.0f,-8.0f,8.0f); // glOrtho(left,right,bottom,top,near,far) // увеличение значений уменьшает фигуры на сцене (по Z задаём больше, чтобы не видеть отсечение фигур)
// переносим по z дальше, обязательное условие для перспективной проекции // по оси z 0 это "глаз", - движение камеры назад, + вперёд.
}
///MVM.translate(0.0f,0.0f,-6.0f); // переносим по z от "глаз", сдвигаем камеру на минус, т.е. в сторону затылка.
// не работает в ортогональной проекции если перенести слишком далеко, за пределы куба отсечения
// оппа, мы видим передние границы пирамиды отсечения, где всё отсекается (тут-то шейдерным сферам и конец)
// изменяем масштаб фигуры (увеличиваем)
///MVM.scale(10.0f); // отрицательное число переворачивает проекцию // влияет только на ортогональную проекцию // убивает Шферы
// указываем угол поворота и ось поворота смотрящую из центра координат к точке x,y,z,
MVM.rotate(m_angle,0.0f,1.0f,0.0f); // поворот вокруг оси центра координат
CameraView.lookAt(cameraEye,cameraCenter,cameraUp); // установка камеры (матрицы трансфрмации)
MVM=CameraView*MVM; // получаем матрицу трансформации итогового вида
// находим проекционную инверсную мтрицу
bool inverted;
QMatrix4x4 PMi=PM.inverted(&inverted);
if (!inverted)
qDebug() << "PMi не конвертится";
MVPM=PM*MVM;
QMatrix4x4 MVPMi=MVPM.inverted(&inverted);
if (!inverted)
qDebug() << "MVPMi не конвертится";
// РИСУЕМ ТРЕУГОЛЬНИК
// инициализируем данные программы матрицы и атрибуты
m_triangle->init();
// зaпихиваем в его программу матрицу ориентации
m_triangle->m_program.setUniformValue(m_triangle->m_matrixUniform, MVPM);
// вызываем функцию рисования объекта (или объектов в for)
m_triangle->draw();
// проводим сброс инициализации параметров
m_triangle->drop();//*/
//РИСУЕМ СФЕРЫ
m_shphere->init();
m_shphere->m_program->setUniformValue(m_shphere->m_matrixUniform, MVPM);
m_shphere->m_program->setUniformValue("PMi", PMi);
m_shphere->m_program->setUniformValue("MVM", MVM);
m_shphere->m_program->setUniformValue("MVPMi", MVPMi);
m_shphere->m_program->setUniformValue("viewport",viewport);
m_shphere->draw();
m_shphere->drop();//*/
}
示例2: render
void QtViewRenderer::render()
{
QOpenGLFunctions* f = QOpenGLContext::currentContext()->functions();
OpenGLStateSaver state(f);
f->glEnable(GL_BLEND);
f->glBlendEquation(GL_FUNC_ADD);
f->glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
f->glDisable(GL_CULL_FACE);
f->glDisable(GL_DEPTH_TEST);
f->glActiveTexture(GL_TEXTURE0);
glClearColor(m_clearColor[0], m_clearColor[1], m_clearColor[2], 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0, 0, m_width, m_height);
QMatrix4x4 ortho;
ortho.ortho(m_viewBounds[0], m_viewBounds[2], m_viewBounds[3], m_viewBounds[1], -1, 1);
m_shader->bind();
m_shader->setUniformValue(m_locationTex, 0);
m_shader->setUniformValue(m_locationProjMtx, ortho);
m_VAO->bind();
int vboSize = 0, eboSize = 0;
for (const auto cmd_list : m_drawLists)
{
if (cmd_list->vtxBuffer().empty() || cmd_list->idxBuffer().empty())
continue;
const DrawList::DrawIdx* idx_buffer_offset = nullptr;
m_VBO->bind();
int vtxSize = cmd_list->vtxBuffer().size() * sizeof(DrawList::DrawVert);
if (vtxSize > vboSize)
{
m_VBO->allocate(vtxSize);
vboSize = vtxSize;
}
m_VBO->write(0, &cmd_list->vtxBuffer().front(), vtxSize);
m_EBO->bind();
int idxSize = cmd_list->idxBuffer().size() * sizeof(DrawList::DrawIdx);
if (idxSize > eboSize)
{
m_EBO->allocate(idxSize);
eboSize = idxSize;
}
m_EBO->write(0, &cmd_list->idxBuffer().front(), idxSize);
for (const auto& pcmd : cmd_list->cmdBuffer())
{
glBindTexture(GL_TEXTURE_2D, pcmd.textureId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glDrawElements(GL_TRIANGLES, static_cast<GLsizei>(pcmd.elemCount), GL_UNSIGNED_INT, idx_buffer_offset);
idx_buffer_offset += pcmd.elemCount;
}
}
m_shader->release();
m_VAO->release();
}
示例3: inputsUpdated
//.........这里部分代码省略.........
State->stateBegin();
}
//-------------------------------------------------------------------------
if( true )
{
glViewport( 0, 0, W, H );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
QList<InterfaceGeometry*> GeometryList;
for( QList< QSharedPointer<InterfacePin> >::iterator it = InpPinLst.begin() ; it != InpPinLst.end() ; it++ )
{
QSharedPointer<InterfacePin> InpPin = *it;
if( !InpPin->isConnectedToActiveNode() )
{
continue;
}
QSharedPointer<InterfacePinControl> GeometryControl = InpPin->connectedPin()->control();
InterfaceGeometry *Geometry = ( GeometryControl.isNull() ? 0 : qobject_cast<InterfaceGeometry *>( GeometryControl->object() ) );
if( Geometry != 0 )
{
GeometryList.append( Geometry );
}
}
if( GeometryList.isEmpty() )
{
QMatrix4x4 pmvMatrix;
pmvMatrix.setToIdentity();
pmvMatrix.ortho( QRect( QPoint( 0, 0 ), QSize( W, H ) ) );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glLoadMatrixf( pmvMatrix.constData() );
//Initialize Modelview Matrix
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
glColor4f( 1.0, 1.0, 1.0, 1.0 );
//glEnable( GL_BLEND );
//glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA );
//glActiveTexture( GL_TEXTURE0 );
OPENGL_PLUGIN_DEBUG;
GLint x0 = 0;
GLint x1 = W;
GLint y0 = 0;
GLint y1 = H;
glBegin( GL_QUADS );
glMultiTexCoord2i( GL_TEXTURE0, x0, y1 ); glMultiTexCoord2f( GL_TEXTURE1, 0, 1 ); glVertex2i( x0, y0 );
glMultiTexCoord2i( GL_TEXTURE0, x1, y1 ); glMultiTexCoord2f( GL_TEXTURE1, 1, 1 ); glVertex2i( x1, y0 );
glMultiTexCoord2i( GL_TEXTURE0, x1, y0 ); glMultiTexCoord2f( GL_TEXTURE1, 1, 0 ); glVertex2i( x1, y1 );
glMultiTexCoord2i( GL_TEXTURE0, x0, y0 ); glMultiTexCoord2f( GL_TEXTURE1, 0, 0 ); glVertex2i( x0, y1 );
glEnd();
}
else
{
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
//Initialize Modelview Matrix
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
glColor4f( 1.0, 1.0, 1.0, 1.0 );
//glEnable( GL_BLEND );
//glBlendFunc( GL_ONE, GL_ONE );
//glActiveTexture( GL_TEXTURE0 );
OPENGL_PLUGIN_DEBUG;
//glTranslatef( 0.0f, 0.0f, -5.0f );
foreach( InterfaceGeometry *Geometry, GeometryList )
{
Geometry->drawGeometry();
}
}
OPENGL_PLUGIN_DEBUG;
}
示例4: init
void GLSLBlurShader::init()
{
QVector<float> kernel = gaussianKernel();
const int size = kernel.size();
const int center = size / 2;
QByteArray vertexSource;
QByteArray fragmentSource;
// Vertex shader
// ===================================================================
QTextStream stream(&vertexSource);
stream << "uniform mat4 u_modelViewProjectionMatrix;\n";
stream << "uniform mat4 u_textureMatrix;\n";
stream << "uniform vec2 pixelSize;\n\n";
stream << "attribute vec4 vertex;\n";
stream << "attribute vec4 texCoord;\n\n";
stream << "varying vec4 samplePos[" << std::ceil(size / 2.0) << "];\n";
stream << "\n";
stream << "void main(void)\n";
stream << "{\n";
stream << " vec4 center = vec4(u_textureMatrix * texCoord).stst;\n";
stream << " vec4 ps = pixelSize.stst;\n\n";
for (int i = 0; i < size; i += 2) {
float offset1, offset2;
if (i < center) {
offset1 = -(1.5 + (center - i - 1) * 2.0);
offset2 = (i + 1) == center ? 0 : offset1 + 2;
} else if (i > center) {
offset1 = 1.5 + (i - center - 1) * 2.0;
offset2 = (i + 1) == size ? 0 : offset1 + 2;
} else {
offset1 = 0;
offset2 = 1.5;
}
stream << " samplePos[" << i / 2 << "] = center + ps * vec4("
<< offset1 << ", " << offset1 << ", " << offset2 << ", " << offset2 << ");\n";
}
stream << "\n";
stream << " gl_Position = u_modelViewProjectionMatrix * vertex;\n";
stream << "}\n";
stream.flush();
// Fragment shader
// ===================================================================
QTextStream stream2(&fragmentSource);
stream2 << "uniform sampler2D texUnit;\n";
stream2 << "varying vec4 samplePos[" << std::ceil(size / 2.0) << "];\n\n";
for (int i = 0; i <= center; i++)
stream2 << "const vec4 kernel" << i << " = vec4(" << kernel[i] << ");\n";
stream2 << "\n";
stream2 << "void main(void)\n";
stream2 << "{\n";
stream2 << " vec4 sum = texture2D(texUnit, samplePos[0].st) * kernel0;\n";
for (int i = 1; i < size; i++)
stream2 << " sum = sum + texture2D(texUnit, samplePos[" << i / 2 << ((i % 2) ? "].pq)" : "].st)")
<< " * kernel" << (i > center ? size - i - 1 : i) << ";\n";
stream2 << " gl_FragColor = sum;\n";
stream2 << "}\n";
stream2.flush();
shader = ShaderManager::instance()->loadShaderFromCode(vertexSource, fragmentSource);
if (shader->isValid()) {
QMatrix4x4 modelViewProjection;
modelViewProjection.ortho(0, displayWidth(), displayHeight(), 0, 0, 65535);
ShaderManager::instance()->pushShader(shader);
shader->setUniform("texUnit", 0);
shader->setUniform("u_textureMatrix", QMatrix4x4());
shader->setUniform("u_modelViewProjectionMatrix", modelViewProjection);
ShaderManager::instance()->popShader();
}
setIsValid(shader->isValid());
}
示例5: doCachedBlur
void BlurEffect::doCachedBlur(EffectWindow *w, const QRegion& region, const float opacity)
{
const QRect screen = effects->virtualScreenGeometry();
const QRegion blurredRegion = blurRegion(w).translated(w->pos()) & screen;
const QRegion expanded = expand(blurredRegion) & screen;
const QRect r = expanded.boundingRect();
// The background texture we get is only partially valid.
CacheEntry it = windows.find(w);
if (it == windows.end()) {
BlurWindowInfo bwi;
bwi.blurredBackground = GLTexture(r.width(),r.height());
bwi.damagedRegion = expanded;
bwi.dropCache = false;
bwi.windowPos = w->pos();
it = windows.insert(w, bwi);
} else if (it->blurredBackground.size() != r.size()) {
it->blurredBackground = GLTexture(r.width(),r.height());
it->dropCache = false;
it->windowPos = w->pos();
} else if (it->windowPos != w->pos()) {
it->dropCache = false;
it->windowPos = w->pos();
}
GLTexture targetTexture = it->blurredBackground;
targetTexture.setFilter(GL_LINEAR);
targetTexture.setWrapMode(GL_CLAMP_TO_EDGE);
shader->bind();
QMatrix4x4 textureMatrix;
QMatrix4x4 modelViewProjectionMatrix;
/**
* Which part of the background texture can be updated ?
*
* Well this is a rather difficult question. We kind of rely on the fact, that
* we need a bigger background region being painted before, more precisely if we want to
* blur region A we need the background region expand(A). This business logic is basically
* done in prePaintWindow:
* data.paint |= expand(damagedArea);
*
* Now "data.paint" gets clipped and becomes what we receive as the "region" variable
* in this function. In theory there is now only one function that does this clipping
* and this is paintSimpleScreen. The clipping has the effect that "damagedRegion"
* is no longer a subset of "region" and we cannot fully validate the cache within one
* rendering pass. If we would now update the "damageRegion & region" part of the cache
* we would wrongly update the part of the cache that is next to the "region" border and
* which lies within "damagedRegion", just because we cannot assume that the framebuffer
* outside of "region" is valid. Therefore the maximal damaged region of the cache that can
* be repainted is given by:
* validUpdate = damagedRegion - expand(damagedRegion - region);
*
* Now you may ask what is with the rest of "damagedRegion & region" that is not part
* of "validUpdate" but also might end up on the screen. Well under the assumption
* that only the occlusion culling can shrink "data.paint", we can control this by reducing
* the opaque area of every window by a margin of the blurring radius (c.f. prePaintWindow).
* This way we are sure that this area is overpainted by a higher opaque window.
*
* Apparently paintSimpleScreen is not the only function that can influence "region".
* In fact every effect's paintWindow that is called before Blur::paintWindow
* can do so (e.g. SlidingPopups). Hence we have to make the compromise that we update
* "damagedRegion & region" of the cache but only mark "validUpdate" as valid.
**/
const QRegion damagedRegion = it->damagedRegion;
const QRegion updateBackground = damagedRegion & region;
const QRegion validUpdate = damagedRegion - expand(damagedRegion - region);
const QRegion horizontal = validUpdate.isEmpty() ? QRegion() : (updateBackground & screen);
const QRegion vertical = blurredRegion & region;
const int horizontalOffset = 0;
const int horizontalCount = horizontal.rectCount() * 6;
const int verticalOffset = horizontalCount;
const int verticalCount = vertical.rectCount() * 6;
GLVertexBuffer *vbo = GLVertexBuffer::streamingBuffer();
uploadGeometry(vbo, horizontal, vertical);
vbo->bindArrays();
if (!validUpdate.isEmpty()) {
const QRect updateRect = (expand(updateBackground) & expanded).boundingRect();
// First we have to copy the background from the frontbuffer
// into a scratch texture (in this case "tex").
tex.bind();
glCopyTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, updateRect.x(), displayHeight() - updateRect.y() - updateRect.height(),
updateRect.width(), updateRect.height());
// Draw the texture on the offscreen framebuffer object, while blurring it horizontally
target->attachTexture(targetTexture);
GLRenderTarget::pushRenderTarget(target);
shader->setDirection(Qt::Horizontal);
shader->setPixelDistance(1.0 / tex.width());
modelViewProjectionMatrix.ortho(0, r.width(), r.height(), 0 , 0, 65535);
modelViewProjectionMatrix.translate(-r.x(), -r.y(), 0);
//.........这里部分代码省略.........
示例6: modelMatrix
void KisOpenGLCanvas2::drawImage()
{
if (!d->displayShader) {
return;
}
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
KisCoordinatesConverter *converter = coordinatesConverter();
d->displayShader->bind();
QMatrix4x4 projectionMatrix;
projectionMatrix.setToIdentity();
projectionMatrix.ortho(0, width(), height(), 0, NEAR_VAL, FAR_VAL);
// Set view/projection matrices
QMatrix4x4 modelMatrix(coordinatesConverter()->imageToWidgetTransform());
modelMatrix.optimize();
modelMatrix = projectionMatrix * modelMatrix;
d->displayShader->setUniformValue(d->displayShader->location(Uniform::ModelViewProjection), modelMatrix);
QMatrix4x4 textureMatrix;
textureMatrix.setToIdentity();
d->displayShader->setUniformValue(d->displayShader->location(Uniform::TextureMatrix), textureMatrix);
QRectF widgetRect(0,0, width(), height());
QRectF widgetRectInImagePixels = converter->documentToImage(converter->widgetToDocument(widgetRect));
qreal scaleX, scaleY;
converter->imageScale(&scaleX, &scaleY);
d->displayShader->setUniformValue(d->displayShader->location(Uniform::ViewportScale), (GLfloat) scaleX);
d->displayShader->setUniformValue(d->displayShader->location(Uniform::TexelSize), (GLfloat) d->openGLImageTextures->texelSize());
QRect ir = d->openGLImageTextures->storedImageBounds();
QRect wr = widgetRectInImagePixels.toAlignedRect();
if (!d->wrapAroundMode) {
// if we don't want to paint wrapping images, just limit the
// processing area, and the code will handle all the rest
wr &= ir;
}
int firstColumn = d->xToColWithWrapCompensation(wr.left(), ir);
int lastColumn = d->xToColWithWrapCompensation(wr.right(), ir);
int firstRow = d->yToRowWithWrapCompensation(wr.top(), ir);
int lastRow = d->yToRowWithWrapCompensation(wr.bottom(), ir);
int minColumn = d->openGLImageTextures->xToCol(ir.left());
int maxColumn = d->openGLImageTextures->xToCol(ir.right());
int minRow = d->openGLImageTextures->yToRow(ir.top());
int maxRow = d->openGLImageTextures->yToRow(ir.bottom());
int imageColumns = maxColumn - minColumn + 1;
int imageRows = maxRow - minRow + 1;
for (int col = firstColumn; col <= lastColumn; col++) {
for (int row = firstRow; row <= lastRow; row++) {
int effectiveCol = col;
int effectiveRow = row;
QPointF tileWrappingTranslation;
if (effectiveCol > maxColumn || effectiveCol < minColumn) {
int translationStep = floor(qreal(col) / imageColumns);
int originCol = translationStep * imageColumns;
effectiveCol = col - originCol;
tileWrappingTranslation.rx() = translationStep * ir.width();
}
if (effectiveRow > maxRow || effectiveRow < minRow) {
int translationStep = floor(qreal(row) / imageRows);
int originRow = translationStep * imageRows;
effectiveRow = row - originRow;
tileWrappingTranslation.ry() = translationStep * ir.height();
}
KisTextureTile *tile =
d->openGLImageTextures->getTextureTileCR(effectiveCol, effectiveRow);
if (!tile) {
warnUI << "OpenGL: Trying to paint texture tile but it has not been created yet.";
continue;
}
/*
* We create a float rect here to workaround Qt's
* "history reasons" in calculation of right()
* and bottom() coordinates of integer rects.
*/
QRectF textureRect(tile->tileRectInTexturePixels());
QRectF modelRect(tile->tileRectInImagePixels().translated(tileWrappingTranslation.x(), tileWrappingTranslation.y()));
//Setup the geometry for rendering
if (KisOpenGL::hasOpenGL3()) {
rectToVertices(d->vertices, modelRect);
d->quadBuffers[0].bind();
d->quadBuffers[0].write(0, d->vertices, 3 * 6 * sizeof(float));
//.........这里部分代码省略.........
示例7: render
void QOpenGLRenderer::render(render::Mesh mesh)
{
//const qreal retinaScale = devicePixelRatio();
glViewport(0, 0, viewportWidth * retinaScale, viewportHeight * retinaScale);
//glClear(GL_COLOR_BUFFER_BIT);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
// Enable depth buffer
glEnable(GL_DEPTH_TEST); // to init
// Enable back face culling
glEnable(GL_CULL_FACE); // to init
glEnable(GL_TEXTURE_2D);
// Note: OpenGL: CCW triangles = front-facing.
// Coord-axis: right = +x, up = +y, to back = -z, to front = +z
m_program->bind();
// Store the 3DMM vertices in GLfloat's
// Todo: Measure time. Possible improvements: 1) See if Vec3f is in contiguous storage, if yes, maybe change my Vertex/Mesh structure.
// 2) Maybe change the datatypes (see http://www.opengl.org/wiki/Vertex_Specification_Best_Practices 'Attribute sizes')
vector<GLfloat> mmVertices;
//render::Mesh mesh = morphableModel.getMean();
mmVertices.clear();
int numTriangles = mesh.tvi.size();
for (int i = 0; i < numTriangles; ++i)
{
// First vertex x, y, z of the triangle
const auto& triangle = mesh.tvi[i];
mmVertices.push_back(mesh.vertex[triangle[0]].position[0]);
mmVertices.push_back(mesh.vertex[triangle[0]].position[1]);
mmVertices.push_back(mesh.vertex[triangle[0]].position[2]);
// Second vertex x, y, z
mmVertices.push_back(mesh.vertex[triangle[1]].position[0]);
mmVertices.push_back(mesh.vertex[triangle[1]].position[1]);
mmVertices.push_back(mesh.vertex[triangle[1]].position[2]);
// Third vertex x, y, z
mmVertices.push_back(mesh.vertex[triangle[2]].position[0]);
mmVertices.push_back(mesh.vertex[triangle[2]].position[1]);
mmVertices.push_back(mesh.vertex[triangle[2]].position[2]);
}
vector<GLfloat> mmColors;
for (int i = 0; i < numTriangles; ++i)
{
// First vertex x, y, z of the triangle
const auto& triangle = mesh.tci[i];
mmColors.push_back(mesh.vertex[triangle[0]].color[0]);
mmColors.push_back(mesh.vertex[triangle[0]].color[1]);
mmColors.push_back(mesh.vertex[triangle[0]].color[2]);
// Second vertex x, y, z
mmColors.push_back(mesh.vertex[triangle[1]].color[0]);
mmColors.push_back(mesh.vertex[triangle[1]].color[1]);
mmColors.push_back(mesh.vertex[triangle[1]].color[2]);
// Third vertex x, y, z
mmColors.push_back(mesh.vertex[triangle[2]].color[0]);
mmColors.push_back(mesh.vertex[triangle[2]].color[1]);
mmColors.push_back(mesh.vertex[triangle[2]].color[2]);
}
vector<GLfloat> mmTex;
for (int i = 0; i < numTriangles; ++i)
{
// First vertex u, v of the triangle
const auto& triangle = mesh.tci[i]; // use tti?
mmTex.push_back(mesh.vertex[triangle[0]].texcrd[0]);
mmTex.push_back(mesh.vertex[triangle[0]].texcrd[1]);
// Second vertex u, v
mmTex.push_back(mesh.vertex[triangle[1]].texcrd[0]);
mmTex.push_back(mesh.vertex[triangle[1]].texcrd[1]);
// Third vertex u, v
mmTex.push_back(mesh.vertex[triangle[2]].texcrd[0]);
mmTex.push_back(mesh.vertex[triangle[2]].texcrd[1]);
}
float aspect = static_cast<float>(viewportWidth) / static_cast<float>(viewportHeight);
QMatrix4x4 matrix;
matrix.ortho(-1.0f*aspect, 1.0f*aspect, -1.0f, 1.0f, 0.1f, 100.0f); // l r b t n f
//matrix.ortho(-70.0f, 70.0f, -70.0f, 70.0f, 0.1f, 1000.0f);
//matrix.perspective(60, aspect, 0.1, 100.0);
matrix.translate(0, 0, -2);
matrix.rotate(15.0f, 1.0f, 0.0f, 0.0f);
matrix.rotate(30.0f, 0.0f, 1.0f, 0.0f);
//matrix.scale(0.009f);
matrix.scale(0.008f);
m_program->setUniformValue(m_matrixUniform, matrix);
glVertexAttribPointer(m_posAttr, 3, GL_FLOAT, GL_FALSE, 0, &mmVertices[0]); // vertices
glVertexAttribPointer(m_colAttr, 3, GL_FLOAT, GL_FALSE, 0, &mmColors[0]); // colors
glVertexAttribPointer(m_texAttr, 2, GL_FLOAT, GL_FALSE, 0, &mmTex[0]); // texCoords
m_program->setAttributeValue(m_texWeightAttr, 1.0f);
m_program->setUniformValue("texture", texture);
glActiveTexture(GL_TEXTURE0 + 1);
glBindTexture(GL_TEXTURE_2D, texture);
glEnableVertexAttribArray(m_posAttr);
glEnableVertexAttribArray(m_colAttr);
glEnableVertexAttribArray(m_texAttr);
//.........这里部分代码省略.........
示例8: paintGL
void GLWidget::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0, 0, this->width(), this->height());
int current_width = width();
int current_height = height();
// Set orthographic Matrix
QMatrix4x4 orthoMatrix;
orthoMatrix.ortho(0.0 + _scrollOffset.x(),
(float)current_width + _scrollOffset.x(),
(float)current_height + _scrollOffset.y(),
0.0 + _scrollOffset.y(),
-100, 100);
// Translate the view to the middle
QMatrix4x4 transformMatrix;
transformMatrix.setToIdentity();
transformMatrix.scale(_zoomFactor);
_shaderProgram->setUniformValue(_mvpMatrixLocation, orthoMatrix * transformMatrix);
_tilePainter->DrawTiles();
_shaderProgram->setUniformValue(_use_color_location, (GLfloat)1.0);
if(_dotsVao.isCreated() && SystemParams::show_grid)
{
int verticesPerDot = 4 + 2;
_dotsVao.bind();
uint nDots = _gridSize.width() * _gridSize.height();
nDots += (_gridSize.width() + 1) * (_gridSize.height() + 1);
for(uint a = 0; a < nDots; a++)
{ glDrawArrays(GL_TRIANGLE_FAN, a * verticesPerDot, verticesPerDot); }
_dotsVao.release();
}
if(_drawBreakVao.isCreated() && _isMouseDown)
{
_shaderProgram->setUniformValue(_use_color_location, (GLfloat)1.0);
glLineWidth(2.0f);
_drawBreakVao.bind();
glDrawArrays(GL_LINES, 0, 2);
_drawBreakVao.release();
}
if(_breakLinesVao.isCreated() && SystemParams::show_grid)
{
_shaderProgram->setUniformValue(_use_color_location, (GLfloat)1.0);
glLineWidth(2.0f);
_breakLinesVao.bind();
glDrawArrays(GL_LINES, 0, _breakLines.size() * 2);
_breakLinesVao.release();
}
if(_cellLinesVao.isCreated() && SystemParams::show_grid)
{
_shaderProgram->setUniformValue(_use_color_location, (GLfloat)1.0);
glLineWidth(0.5f);
_cellLinesVao.bind();
glDrawArrays(GL_LINES, 0, _cellLines.size() * 2);
_cellLinesVao.release();
}
}