本文整理汇总了C++中ralloc_free函数的典型用法代码示例。如果您正苦于以下问题:C++ ralloc_free函数的具体用法?C++ ralloc_free怎么用?C++ ralloc_free使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ralloc_free函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: vc4_screen_destroy
static void
vc4_screen_destroy(struct pipe_screen *pscreen)
{
vc4_bufmgr_destroy(pscreen);
ralloc_free(pscreen);
}
示例2: brwCreateContext
bool
brwCreateContext(int api,
const struct gl_config *mesaVis,
__DRIcontext *driContextPriv,
unsigned major_version,
unsigned minor_version,
uint32_t flags,
unsigned *error,
void *sharedContextPrivate)
{
__DRIscreen *sPriv = driContextPriv->driScreenPriv;
struct intel_screen *screen = sPriv->driverPrivate;
struct dd_function_table functions;
struct brw_context *brw = rzalloc(NULL, struct brw_context);
if (!brw) {
printf("%s: failed to alloc context\n", __FUNCTION__);
*error = __DRI_CTX_ERROR_NO_MEMORY;
return false;
}
/* brwInitVtbl needs to know the chipset generation so that it can set the
* right pointers.
*/
brw->gen = screen->gen;
brwInitVtbl( brw );
brwInitDriverFunctions(screen, &functions);
struct gl_context *ctx = &brw->ctx;
if (!intelInitContext( brw, api, major_version, minor_version,
mesaVis, driContextPriv,
sharedContextPrivate, &functions,
error)) {
ralloc_free(brw);
return false;
}
brw_initialize_context_constants(brw);
/* Reinitialize the context point state. It depends on ctx->Const values. */
_mesa_init_point(ctx);
if (brw->gen >= 6) {
/* Create a new hardware context. Using a hardware context means that
* our GPU state will be saved/restored on context switch, allowing us
* to assume that the GPU is in the same state we left it in.
*
* This is required for transform feedback buffer offsets, query objects,
* and also allows us to reduce how much state we have to emit.
*/
brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
if (!brw->hw_ctx) {
fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
ralloc_free(brw);
return false;
}
}
brw_init_surface_formats(brw);
/* Initialize swrast, tnl driver tables: */
TNLcontext *tnl = TNL_CONTEXT(ctx);
if (tnl)
tnl->Driver.RunPipeline = _tnl_run_pipeline;
ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK;
ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD;
ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER;
if (brw->is_g4x || brw->gen >= 5) {
brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS;
brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45;
brw->has_surface_tile_offset = true;
if (brw->gen < 6)
brw->has_compr4 = true;
brw->has_aa_line_parameters = true;
brw->has_pln = true;
} else {
brw->CMD_VF_STATISTICS = GEN4_3DSTATE_VF_STATISTICS;
brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_965;
}
/* WM maximum threads is number of EUs times number of threads per EU. */
assert(brw->gen <= 7);
if (brw->is_haswell) {
if (brw->gt == 1) {
brw->max_wm_threads = 102;
brw->max_vs_threads = 70;
brw->urb.size = 128;
brw->urb.max_vs_entries = 640;
brw->urb.max_gs_entries = 256;
} else if (brw->gt == 2) {
brw->max_wm_threads = 204;
brw->max_vs_threads = 280;
brw->urb.size = 256;
//.........这里部分代码省略.........
示例3: brw_codegen_gs_prog
static bool
brw_codegen_gs_prog(struct brw_context *brw,
struct brw_program *gp,
struct brw_gs_prog_key *key)
{
struct brw_compiler *compiler = brw->screen->compiler;
const struct gen_device_info *devinfo = &brw->screen->devinfo;
struct brw_stage_state *stage_state = &brw->gs.base;
struct brw_gs_prog_data prog_data;
bool start_busy = false;
double start_time = 0;
memset(&prog_data, 0, sizeof(prog_data));
assign_gs_binding_table_offsets(devinfo, &gp->program, &prog_data);
/* Allocate the references to the uniforms that will end up in the
* prog_data associated with the compiled program, and which will be freed
* by the state cache.
*
* Note: param_count needs to be num_uniform_components * 4, since we add
* padding around uniform values below vec4 size, so the worst case is that
* every uniform is a float which gets padded to the size of a vec4.
*/
int param_count = gp->program.nir->num_uniforms / 4;
prog_data.base.base.param =
rzalloc_array(NULL, const gl_constant_value *, param_count);
prog_data.base.base.pull_param =
rzalloc_array(NULL, const gl_constant_value *, param_count);
prog_data.base.base.image_param =
rzalloc_array(NULL, struct brw_image_param,
gp->program.info.num_images);
prog_data.base.base.nr_params = param_count;
prog_data.base.base.nr_image_params = gp->program.info.num_images;
brw_nir_setup_glsl_uniforms(gp->program.nir, &gp->program,
&prog_data.base.base,
compiler->scalar_stage[MESA_SHADER_GEOMETRY]);
brw_nir_analyze_ubo_ranges(compiler, gp->program.nir,
prog_data.base.base.ubo_ranges);
uint64_t outputs_written = gp->program.info.outputs_written;
brw_compute_vue_map(devinfo,
&prog_data.base.vue_map, outputs_written,
gp->program.info.separate_shader);
int st_index = -1;
if (INTEL_DEBUG & DEBUG_SHADER_TIME)
st_index = brw_get_shader_time_index(brw, &gp->program, ST_GS, true);
if (unlikely(brw->perf_debug)) {
start_busy = brw->batch.last_bo && brw_bo_busy(brw->batch.last_bo);
start_time = get_time();
}
void *mem_ctx = ralloc_context(NULL);
unsigned program_size;
char *error_str;
const unsigned *program =
brw_compile_gs(brw->screen->compiler, brw, mem_ctx, key,
&prog_data, gp->program.nir, &gp->program,
st_index, &program_size, &error_str);
if (program == NULL) {
ralloc_strcat(&gp->program.sh.data->InfoLog, error_str);
_mesa_problem(NULL, "Failed to compile geometry shader: %s\n", error_str);
ralloc_free(mem_ctx);
return false;
}
if (unlikely(brw->perf_debug)) {
if (gp->compiled_once) {
brw_gs_debug_recompile(brw, &gp->program, key);
}
if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
perf_debug("GS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
gp->compiled_once = true;
}
/* Scratch space is used for register spilling */
brw_alloc_stage_scratch(brw, stage_state,
prog_data.base.base.total_scratch,
devinfo->max_gs_threads);
brw_upload_cache(&brw->cache, BRW_CACHE_GS_PROG,
key, sizeof(*key),
program, program_size,
&prog_data, sizeof(prog_data),
&stage_state->prog_offset, &brw->gs.base.prog_data);
ralloc_free(mem_ctx);
return true;
}
示例4: hash_table_dtor
~has_recursion_visitor()
{
hash_table_dtor(this->function_hash);
ralloc_free(this->mem_ctx);
}
示例5: do_wm_prog
/**
* All Mesa program -> GPU code generation goes through this function.
* Depending on the instructions used (i.e. flow control instructions)
* we'll use one of two code generators.
*/
bool do_wm_prog(struct brw_context *brw,
struct gl_shader_program *prog,
struct brw_fragment_program *fp,
struct brw_wm_prog_key *key)
{
struct brw_wm_compile *c;
const GLuint *program;
struct gl_shader *fs = NULL;
GLuint program_size;
if (prog)
fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
c = rzalloc(NULL, struct brw_wm_compile);
/* Allocate the references to the uniforms that will end up in the
* prog_data associated with the compiled program, and which will be freed
* by the state cache.
*/
int param_count;
if (fs) {
param_count = fs->num_uniform_components;
} else {
param_count = fp->program.Base.Parameters->NumParameters * 4;
}
/* The backend also sometimes adds params for texture size. */
param_count += 2 * BRW_MAX_TEX_UNIT;
c->prog_data.param = rzalloc_array(NULL, const float *, param_count);
c->prog_data.pull_param = rzalloc_array(NULL, const float *, param_count);
memcpy(&c->key, key, sizeof(*key));
c->prog_data.barycentric_interp_modes =
brw_compute_barycentric_interp_modes(brw, c->key.flat_shade,
&fp->program);
program = brw_wm_fs_emit(brw, c, &fp->program, prog, &program_size);
if (program == NULL)
return false;
/* Scratch space is used for register spilling */
if (c->last_scratch) {
perf_debug("Fragment shader triggered register spilling. "
"Try reducing the number of live scalar values to "
"improve performance.\n");
c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch);
brw_get_scratch_bo(brw, &brw->wm.scratch_bo,
c->prog_data.total_scratch * brw->max_wm_threads);
}
if (unlikely(INTEL_DEBUG & DEBUG_WM))
fprintf(stderr, "\n");
brw_upload_cache(&brw->cache, BRW_WM_PROG,
&c->key, sizeof(c->key),
program, program_size,
&c->prog_data, sizeof(c->prog_data),
&brw->wm.prog_offset, &brw->wm.prog_data);
ralloc_free(c);
return true;
}
示例6: constant_fold_alu_instr
static bool
constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
{
nir_const_value src[NIR_MAX_VEC_COMPONENTS];
if (!instr->dest.dest.is_ssa)
return false;
/* In the case that any outputs/inputs have unsized types, then we need to
* guess the bit-size. In this case, the validator ensures that all
* bit-sizes match so we can just take the bit-size from first
* output/input with an unsized type. If all the outputs/inputs are sized
* then we don't need to guess the bit-size at all because the code we
* generate for constant opcodes in this case already knows the sizes of
* the types involved and does not need the provided bit-size for anything
* (although it still requires to receive a valid bit-size).
*/
unsigned bit_size = 0;
if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type))
bit_size = instr->dest.dest.ssa.bit_size;
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
if (!instr->src[i].src.is_ssa)
return false;
if (bit_size == 0 &&
!nir_alu_type_get_type_size(nir_op_infos[instr->op].input_sizes[i])) {
bit_size = instr->src[i].src.ssa->bit_size;
}
nir_instr *src_instr = instr->src[i].src.ssa->parent_instr;
if (src_instr->type != nir_instr_type_load_const)
return false;
nir_load_const_instr* load_const = nir_instr_as_load_const(src_instr);
for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i);
j++) {
switch(load_const->def.bit_size) {
case 64:
src[i].u64[j] = load_const->value.u64[instr->src[i].swizzle[j]];
break;
case 32:
src[i].u32[j] = load_const->value.u32[instr->src[i].swizzle[j]];
break;
case 16:
src[i].u16[j] = load_const->value.u16[instr->src[i].swizzle[j]];
break;
case 8:
src[i].u8[j] = load_const->value.u8[instr->src[i].swizzle[j]];
break;
default:
unreachable("Invalid bit size");
}
}
/* We shouldn't have any source modifiers in the optimization loop. */
assert(!instr->src[i].abs && !instr->src[i].negate);
}
if (bit_size == 0)
bit_size = 32;
/* We shouldn't have any saturate modifiers in the optimization loop. */
assert(!instr->dest.saturate);
nir_const_value dest =
nir_eval_const_opcode(instr->op, instr->dest.dest.ssa.num_components,
bit_size, src);
nir_load_const_instr *new_instr =
nir_load_const_instr_create(mem_ctx,
instr->dest.dest.ssa.num_components,
instr->dest.dest.ssa.bit_size);
new_instr->value = dest;
nir_instr_insert_before(&instr->instr, &new_instr->instr);
nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa,
nir_src_for_ssa(&new_instr->def));
nir_instr_remove(&instr->instr);
ralloc_free(instr);
return true;
}
示例7: ralloc_free
brw_blorp_const_color_program::~brw_blorp_const_color_program()
{
ralloc_free(mem_ctx);
}
示例8: ralloc_free
~get_sampler_name()
{
ralloc_free(this->mem_ctx);
}
示例9: ralloc_free
~string_buffer()
{
ralloc_free(m_Ptr);
}
示例10: do_gs_prog
//.........这里部分代码省略.........
* 512 bytes for varyings (a varying component is 4 bytes and
* gl_MaxGeometryOutputComponents = 128)
* 16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
* bytes)
* 16 bytes overhead for gl_Position (we allocate it a slot in the VUE
* even if it's not used)
* 32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
* whenever clip planes are enabled, even if the shader doesn't
* write to gl_ClipDistance)
* 16 bytes overhead since the VUE size must be a multiple of 32 bytes
* (see below)--this causes up to 1 VUE slot to be wasted
* 400 bytes available for varying packing overhead
*
* Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes)
* per interpolation type, so this is plenty.
*
*/
unsigned output_vertex_size_bytes = c.prog_data.base.vue_map.num_slots * 16;
assert(output_vertex_size_bytes <= GEN7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES);
c.prog_data.output_vertex_size_hwords =
ALIGN(output_vertex_size_bytes, 32) / 32;
/* Compute URB entry size. The maximum allowed URB entry size is 32k.
* That divides up as follows:
*
* 64 bytes for the control data header (cut indices or StreamID bits)
* 4096 bytes for varyings (a varying component is 4 bytes and
* gl_MaxGeometryTotalOutputComponents = 1024)
* 4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
* bytes/vertex and gl_MaxGeometryOutputVertices is 256)
* 4096 bytes overhead for gl_Position (we allocate it a slot in the VUE
* even if it's not used)
* 8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
* whenever clip planes are enabled, even if the shader doesn't
* write to gl_ClipDistance)
* 4096 bytes overhead since the VUE size must be a multiple of 32
* bytes (see above)--this causes up to 1 VUE slot to be wasted
* 8128 bytes available for varying packing overhead
*
* Worst-case varying packing overhead is 3/4 of a varying slot per
* interpolation type, which works out to 3072 bytes, so this would allow
* us to accommodate 2 interpolation types without any danger of running
* out of URB space.
*
* In practice, the risk of running out of URB space is very small, since
* the above figures are all worst-case, and most of them scale with the
* number of output vertices. So we'll just calculate the amount of space
* we need, and if it's too large, fail to compile.
*/
unsigned output_size_bytes =
c.prog_data.output_vertex_size_hwords * 32 * gp->program.VerticesOut;
output_size_bytes += 32 * c.prog_data.control_data_header_size_hwords;
assert(output_size_bytes >= 1);
if (output_size_bytes > GEN7_MAX_GS_URB_ENTRY_SIZE_BYTES)
return false;
/* URB entry sizes are stored as a multiple of 64 bytes. */
c.prog_data.base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
c.prog_data.output_topology = prim_to_hw_prim[gp->program.OutputType];
brw_compute_vue_map(brw, &c.input_vue_map, c.key.input_varyings);
/* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
* need to program a URB read length of ceiling(num_slots / 2).
*/
c.prog_data.base.urb_read_length = (c.input_vue_map.num_slots + 1) / 2;
void *mem_ctx = ralloc_context(NULL);
unsigned program_size;
const unsigned *program =
brw_gs_emit(brw, prog, &c, mem_ctx, &program_size);
if (program == NULL) {
ralloc_free(mem_ctx);
return false;
}
/* Scratch space is used for register spilling */
if (c.base.last_scratch) {
perf_debug("Geometry shader triggered register spilling. "
"Try reducing the number of live vec4 values to "
"improve performance.\n");
c.prog_data.base.total_scratch
= brw_get_scratch_size(c.base.last_scratch*REG_SIZE);
brw_get_scratch_bo(brw, &stage_state->scratch_bo,
c.prog_data.base.total_scratch * brw->max_gs_threads);
}
brw_upload_cache(&brw->cache, BRW_GS_PROG,
&c.key, sizeof(c.key),
program, program_size,
&c.prog_data, sizeof(c.prog_data),
&stage_state->prog_offset, &brw->gs.prog_data);
ralloc_free(mem_ctx);
return true;
}
示例11: brw_wm_clear_compile
void
brw_wm_clear_compile(struct brw_context *brw,
struct brw_wm_compile *c)
{
ralloc_free(c);
}
示例12: do_wm_prog
/**
* All Mesa program -> GPU code generation goes through this function.
* Depending on the instructions used (i.e. flow control instructions)
* we'll use one of two code generators.
*/
bool do_wm_prog(struct brw_context *brw,
struct gl_shader_program *prog,
struct brw_fragment_program *fp,
struct brw_wm_prog_key *key)
{
struct gl_context *ctx = &brw->ctx;
void *mem_ctx = ralloc_context(NULL);
struct brw_wm_prog_data prog_data;
const GLuint *program;
struct gl_shader *fs = NULL;
GLuint program_size;
if (prog)
fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
memset(&prog_data, 0, sizeof(prog_data));
/* Allocate the references to the uniforms that will end up in the
* prog_data associated with the compiled program, and which will be freed
* by the state cache.
*/
int param_count;
if (fs) {
param_count = fs->num_uniform_components;
} else {
param_count = fp->program.Base.Parameters->NumParameters * 4;
}
/* The backend also sometimes adds params for texture size. */
param_count += 2 * ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits;
prog_data.base.param =
rzalloc_array(NULL, const gl_constant_value *, param_count);
prog_data.base.pull_param =
rzalloc_array(NULL, const gl_constant_value *, param_count);
prog_data.base.nr_params = param_count;
prog_data.barycentric_interp_modes =
brw_compute_barycentric_interp_modes(brw, key->flat_shade,
key->persample_shading,
&fp->program);
program = brw_wm_fs_emit(brw, mem_ctx, key, &prog_data,
&fp->program, prog, &program_size);
if (program == NULL) {
ralloc_free(mem_ctx);
return false;
}
if (prog_data.total_scratch) {
brw_get_scratch_bo(brw, &brw->wm.base.scratch_bo,
prog_data.total_scratch * brw->max_wm_threads);
}
if (unlikely(INTEL_DEBUG & DEBUG_WM))
fprintf(stderr, "\n");
brw_upload_cache(&brw->cache, BRW_WM_PROG,
key, sizeof(struct brw_wm_prog_key),
program, program_size,
&prog_data, sizeof(prog_data),
&brw->wm.base.prog_offset, &brw->wm.prog_data);
ralloc_free(mem_ctx);
return true;
}
示例13: compile_gs_prog
static void compile_gs_prog( struct brw_context *brw,
struct brw_gs_prog_key *key )
{
struct brw_gs_compile c;
const GLuint *program;
void *mem_ctx;
GLuint program_size;
memset(&c, 0, sizeof(c));
c.key = *key;
c.vue_map = brw->vs.prog_data->base.vue_map;
c.nr_regs = (c.vue_map.num_slots + 1)/2;
mem_ctx = ralloc_context(NULL);
/* Begin the compilation:
*/
brw_init_compile(brw, &c.func, mem_ctx);
c.func.single_program_flow = 1;
/* For some reason the thread is spawned with only 4 channels
* unmasked.
*/
brw_set_mask_control(&c.func, BRW_MASK_DISABLE);
if (brw->gen >= 6) {
unsigned num_verts;
bool check_edge_flag;
/* On Sandybridge, we use the GS for implementing transform feedback
* (called "Stream Out" in the PRM).
*/
switch (key->primitive) {
case _3DPRIM_POINTLIST:
num_verts = 1;
check_edge_flag = false;
break;
case _3DPRIM_LINELIST:
case _3DPRIM_LINESTRIP:
case _3DPRIM_LINELOOP:
num_verts = 2;
check_edge_flag = false;
break;
case _3DPRIM_TRILIST:
case _3DPRIM_TRIFAN:
case _3DPRIM_TRISTRIP:
case _3DPRIM_RECTLIST:
num_verts = 3;
check_edge_flag = false;
break;
case _3DPRIM_QUADLIST:
case _3DPRIM_QUADSTRIP:
case _3DPRIM_POLYGON:
num_verts = 3;
check_edge_flag = true;
break;
default:
assert(!"Unexpected primitive type in Gen6 SOL program.");
return;
}
gen6_sol_program(&c, key, num_verts, check_edge_flag);
} else {
/* On Gen4-5, we use the GS to decompose certain types of primitives.
* Note that primitives which don't require a GS program have already
* been weeded out by now.
*/
switch (key->primitive) {
case _3DPRIM_QUADLIST:
brw_gs_quads( &c, key );
break;
case _3DPRIM_QUADSTRIP:
brw_gs_quad_strip( &c, key );
break;
case _3DPRIM_LINELOOP:
brw_gs_lines( &c );
break;
default:
ralloc_free(mem_ctx);
return;
}
}
/* get the program
*/
program = brw_get_program(&c.func, &program_size);
if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
int i;
printf("gs:\n");
for (i = 0; i < program_size / sizeof(struct brw_instruction); i++)
brw_disasm(stdout, &((struct brw_instruction *)program)[i],
brw->gen);
printf("\n");
}
brw_upload_cache(&brw->cache, BRW_GS_PROG,
&c.key, sizeof(c.key),
program, program_size,
//.........这里部分代码省略.........
示例14: setup_glsl_msaa_blit_shader
//.........这里部分代码省略.........
if (dst_is_msaa) {
arb_sample_shading_extension_string = "#extension GL_ARB_sample_shading : enable";
sample_resolve = ralloc_asprintf(mem_ctx, " out_color = texelFetch(texSampler, i%s(texCoords), gl_SampleID);", texcoord_type);
merge_function = "";
} else {
int i;
int step;
if (src_datatype == GL_INT || src_datatype == GL_UNSIGNED_INT) {
merge_function =
"gvec4 merge(gvec4 a, gvec4 b) { return (a >> gvec4(1)) + (b >> gvec4(1)) + (a & b & gvec4(1)); }\n";
} else {
/* The divide will happen at the end for floats. */
merge_function =
"vec4 merge(vec4 a, vec4 b) { return (a + b); }\n";
}
arb_sample_shading_extension_string = "";
/* We're assuming power of two samples for this resolution procedure.
*
* To avoid losing any floating point precision if the samples all
* happen to have the same value, we merge pairs of values at a time
* (so the floating point exponent just gets increased), rather than
* doing a naive sum and dividing.
*/
assert(_mesa_is_pow_two(samples));
/* Fetch each individual sample. */
sample_resolve = rzalloc_size(mem_ctx, 1);
for (i = 0; i < samples; i++) {
ralloc_asprintf_append(&sample_resolve,
" gvec4 sample_1_%d = texelFetch(texSampler, i%s(texCoords), %d);\n",
i, texcoord_type, i);
}
/* Now, merge each pair of samples, then merge each pair of those,
* etc.
*/
for (step = 2; step <= samples; step *= 2) {
for (i = 0; i < samples; i += step) {
ralloc_asprintf_append(&sample_resolve,
" gvec4 sample_%d_%d = merge(sample_%d_%d, sample_%d_%d);\n",
step, i,
step / 2, i,
step / 2, i + step / 2);
}
}
/* Scale the final result. */
if (src_datatype == GL_UNSIGNED_INT || src_datatype == GL_INT) {
ralloc_asprintf_append(&sample_resolve,
" out_color = sample_%d_0;\n",
samples);
} else {
ralloc_asprintf_append(&sample_resolve,
" gl_FragColor = sample_%d_0 / %f;\n",
samples, (float)samples);
}
}
vs_source = ralloc_asprintf(mem_ctx,
"#version 130\n"
"in vec2 position;\n"
"in %s textureCoords;\n"
"out %s texCoords;\n"
"void main()\n"
"{\n"
" texCoords = textureCoords;\n"
" gl_Position = vec4(position, 0.0, 1.0);\n"
"}\n",
texcoord_type,
texcoord_type);
fs_source = ralloc_asprintf(mem_ctx,
"#version 130\n"
"#extension GL_ARB_texture_multisample : enable\n"
"%s\n"
"#define gvec4 %svec4\n"
"uniform %ssampler2DMS%s texSampler;\n"
"in %s texCoords;\n"
"out gvec4 out_color;\n"
"\n"
"%s" /* merge_function */
"void main()\n"
"{\n"
"%s\n" /* sample_resolve */
"}\n",
arb_sample_shading_extension_string,
vec4_prefix,
vec4_prefix,
sampler_array_suffix,
texcoord_type,
merge_function,
sample_resolve);
}
_mesa_meta_compile_and_link_program(ctx, vs_source, fs_source, name,
&blit->msaa_shaders[shader_index]);
ralloc_free(mem_ctx);
}
示例15: assert
void
program_resource_visitor::process(ir_variable *var)
{
const glsl_type *t = var->type;
const bool row_major =
var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
/* false is always passed for the row_major parameter to the other
* processing functions because no information is available to do
* otherwise. See the warning in linker.h.
*/
/* Only strdup the name if we actually will need to modify it. */
if (var->data.from_named_ifc_block_array) {
/* lower_named_interface_blocks created this variable by lowering an
* interface block array to an array variable. For example if the
* original source code was:
*
* out Blk { vec4 bar } foo[3];
*
* Then the variable is now:
*
* out vec4 bar[3];
*
* We need to visit each array element using the names constructed like
* so:
*
* Blk[0].bar
* Blk[1].bar
* Blk[2].bar
*/
assert(t->is_array());
const glsl_type *ifc_type = var->get_interface_type();
char *name = ralloc_strdup(NULL, ifc_type->name);
size_t name_length = strlen(name);
for (unsigned i = 0; i < t->length; i++) {
size_t new_length = name_length;
ralloc_asprintf_rewrite_tail(&name, &new_length, "[%u].%s", i,
var->name);
/* Note: row_major is only meaningful for uniform blocks, and
* lowering is only applied to non-uniform interface blocks, so we
* can safely pass false for row_major.
*/
recursion(var->type, &name, new_length, row_major, NULL, false);
}
ralloc_free(name);
} else if (var->data.from_named_ifc_block_nonarray) {
/* lower_named_interface_blocks created this variable by lowering a
* named interface block (non-array) to an ordinary variable. For
* example if the original source code was:
*
* out Blk { vec4 bar } foo;
*
* Then the variable is now:
*
* out vec4 bar;
*
* We need to visit this variable using the name:
*
* Blk.bar
*/
const glsl_type *ifc_type = var->get_interface_type();
char *name = ralloc_asprintf(NULL, "%s.%s", ifc_type->name, var->name);
/* Note: row_major is only meaningful for uniform blocks, and lowering
* is only applied to non-uniform interface blocks, so we can safely
* pass false for row_major.
*/
recursion(var->type, &name, strlen(name), row_major, NULL, false);
ralloc_free(name);
} else if (t->without_array()->is_record()) {
char *name = ralloc_strdup(NULL, var->name);
recursion(var->type, &name, strlen(name), row_major, NULL, false);
ralloc_free(name);
} else if (t->is_interface()) {
char *name = ralloc_strdup(NULL, var->type->name);
recursion(var->type, &name, strlen(name), row_major, NULL, false);
ralloc_free(name);
} else if (t->is_array() && t->fields.array->is_interface()) {
char *name = ralloc_strdup(NULL, var->type->fields.array->name);
recursion(var->type, &name, strlen(name), row_major, NULL, false);
ralloc_free(name);
} else {
this->visit_field(t, var->name, row_major, NULL, false);
}
}