本文整理汇总了C++中TYPE_PRECISION函数的典型用法代码示例。如果您正苦于以下问题:C++ TYPE_PRECISION函数的具体用法?C++ TYPE_PRECISION怎么用?C++ TYPE_PRECISION使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了TYPE_PRECISION函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: print_node
//.........这里部分代码省略.........
if (TYPE_LANG_FLAG_3 (node))
fputs (" type_3", file);
if (TYPE_LANG_FLAG_4 (node))
fputs (" type_4", file);
if (TYPE_LANG_FLAG_5 (node))
fputs (" type_5", file);
if (TYPE_LANG_FLAG_6 (node))
fputs (" type_6", file);
mode = TYPE_MODE (node);
fprintf (file, " %s", GET_MODE_NAME (mode));
print_node (file, "size", TYPE_SIZE (node), indent + 4);
print_node (file, "unit size", TYPE_SIZE_UNIT (node), indent + 4);
indent_to (file, indent + 3);
if (TYPE_USER_ALIGN (node))
fprintf (file, " user");
fprintf (file, " align %d symtab %d alias set " HOST_WIDE_INT_PRINT_DEC,
TYPE_ALIGN (node), TYPE_SYMTAB_ADDRESS (node),
(HOST_WIDE_INT) TYPE_ALIAS_SET (node));
if (TYPE_STRUCTURAL_EQUALITY_P (node))
fprintf (file, " structural equality");
else
dump_addr (file, " canonical type ", TYPE_CANONICAL (node));
print_node (file, "attributes", TYPE_ATTRIBUTES (node), indent + 4);
if (INTEGRAL_TYPE_P (node) || code == REAL_TYPE
|| code == FIXED_POINT_TYPE)
{
fprintf (file, " precision %d", TYPE_PRECISION (node));
print_node_brief (file, "min", TYPE_MIN_VALUE (node), indent + 4);
print_node_brief (file, "max", TYPE_MAX_VALUE (node), indent + 4);
}
if (code == ENUMERAL_TYPE)
print_node (file, "values", TYPE_VALUES (node), indent + 4);
else if (code == ARRAY_TYPE)
print_node (file, "domain", TYPE_DOMAIN (node), indent + 4);
else if (code == VECTOR_TYPE)
fprintf (file, " nunits %d", (int) TYPE_VECTOR_SUBPARTS (node));
else if (code == RECORD_TYPE
|| code == UNION_TYPE
|| code == QUAL_UNION_TYPE)
print_node (file, "fields", TYPE_FIELDS (node), indent + 4);
else if (code == FUNCTION_TYPE
|| code == METHOD_TYPE)
{
if (TYPE_METHOD_BASETYPE (node))
print_node_brief (file, "method basetype",
TYPE_METHOD_BASETYPE (node), indent + 4);
print_node (file, "arg-types", TYPE_ARG_TYPES (node), indent + 4);
}
else if (code == OFFSET_TYPE)
print_node_brief (file, "basetype", TYPE_OFFSET_BASETYPE (node),
indent + 4);
if (TYPE_CONTEXT (node))
print_node_brief (file, "context", TYPE_CONTEXT (node), indent + 4);
lang_hooks.print_type (file, node, indent);
if (TYPE_POINTER_TO (node) || TREE_CHAIN (node))
示例2: plain_type_1
static int
plain_type_1 (tree type, int level)
{
if (type == 0)
type = void_type_node;
else if (type == error_mark_node)
type = integer_type_node;
else
type = TYPE_MAIN_VARIANT (type);
switch (TREE_CODE (type))
{
case VOID_TYPE:
return T_VOID;
case BOOLEAN_TYPE:
case INTEGER_TYPE:
{
int size = int_size_in_bytes (type) * BITS_PER_UNIT;
/* Carefully distinguish all the standard types of C,
without messing up if the language is not C.
Note that we check only for the names that contain spaces;
other names might occur by coincidence in other languages. */
if (TYPE_NAME (type) != 0
&& TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
&& DECL_NAME (TYPE_NAME (type)) != 0
&& TREE_CODE (DECL_NAME (TYPE_NAME (type))) == IDENTIFIER_NODE)
{
const char *const name
= IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));
if (!strcmp (name, "char"))
return T_CHAR;
if (!strcmp (name, "unsigned char"))
return T_UCHAR;
if (!strcmp (name, "signed char"))
return T_CHAR;
if (!strcmp (name, "int"))
return T_INT;
if (!strcmp (name, "unsigned int"))
return T_UINT;
if (!strcmp (name, "short int"))
return T_SHORT;
if (!strcmp (name, "short unsigned int"))
return T_USHORT;
if (!strcmp (name, "long int"))
return T_LONG;
if (!strcmp (name, "long unsigned int"))
return T_ULONG;
}
if (size == INT_TYPE_SIZE)
return (TYPE_UNSIGNED (type) ? T_UINT : T_INT);
if (size == CHAR_TYPE_SIZE)
return (TYPE_UNSIGNED (type) ? T_UCHAR : T_CHAR);
if (size == SHORT_TYPE_SIZE)
return (TYPE_UNSIGNED (type) ? T_USHORT : T_SHORT);
if (size == LONG_TYPE_SIZE)
return (TYPE_UNSIGNED (type) ? T_ULONG : T_LONG);
if (size == LONG_LONG_TYPE_SIZE) /* better than nothing */
return (TYPE_UNSIGNED (type) ? T_ULONG : T_LONG);
return 0;
}
case REAL_TYPE:
{
int precision = TYPE_PRECISION (type);
if (precision == FLOAT_TYPE_SIZE)
return T_FLOAT;
if (precision == DOUBLE_TYPE_SIZE)
return T_DOUBLE;
#ifdef EXTENDED_SDB_BASIC_TYPES
if (precision == LONG_DOUBLE_TYPE_SIZE)
return T_LNGDBL;
#else
if (precision == LONG_DOUBLE_TYPE_SIZE)
return T_DOUBLE; /* better than nothing */
#endif
return 0;
}
case ARRAY_TYPE:
{
int m;
if (level >= 6)
return T_VOID;
else
m = plain_type_1 (TREE_TYPE (type), level+1);
if (sdb_n_dims < SDB_MAX_DIM)
sdb_dims[sdb_n_dims++]
= (TYPE_DOMAIN (type)
&& TYPE_MIN_VALUE (TYPE_DOMAIN (type)) != 0
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != 0
&& host_integerp (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)
&& host_integerp (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0)
? (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)
- tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1)
: 0);
return PUSH_DERIVED_LEVEL (DT_ARY, m);
//.........这里部分代码省略.........
示例3: arm_output_c_attributes
static void
arm_output_c_attributes (void)
{
int wchar_size = (int)(TYPE_PRECISION (wchar_type_node) / BITS_PER_UNIT);
arm_emit_eabi_attribute ("Tag_ABI_PCS_wchar_t", 18, wchar_size);
}
示例4: fold_const_call_1
static tree
fold_const_call_1 (built_in_function fn, tree type, tree arg)
{
machine_mode mode = TYPE_MODE (type);
machine_mode arg_mode = TYPE_MODE (TREE_TYPE (arg));
if (integer_cst_p (arg))
{
if (SCALAR_INT_MODE_P (mode))
{
wide_int result;
if (fold_const_call_ss (&result, fn, arg, TYPE_PRECISION (type),
TREE_TYPE (arg)))
return wide_int_to_tree (type, result);
}
return NULL_TREE;
}
if (real_cst_p (arg))
{
gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg_mode));
if (mode == arg_mode)
{
/* real -> real. */
REAL_VALUE_TYPE result;
if (fold_const_call_ss (&result, fn, TREE_REAL_CST_PTR (arg),
REAL_MODE_FORMAT (mode)))
return build_real (type, result);
}
else if (COMPLEX_MODE_P (mode)
&& GET_MODE_INNER (mode) == arg_mode)
{
/* real -> complex real. */
REAL_VALUE_TYPE result_real, result_imag;
if (fold_const_call_cs (&result_real, &result_imag, fn,
TREE_REAL_CST_PTR (arg),
REAL_MODE_FORMAT (arg_mode)))
return build_complex (type,
build_real (TREE_TYPE (type), result_real),
build_real (TREE_TYPE (type), result_imag));
}
else if (INTEGRAL_TYPE_P (type))
{
/* real -> int. */
wide_int result;
if (fold_const_call_ss (&result, fn,
TREE_REAL_CST_PTR (arg),
TYPE_PRECISION (type),
REAL_MODE_FORMAT (arg_mode)))
return wide_int_to_tree (type, result);
}
return NULL_TREE;
}
if (complex_cst_p (arg))
{
gcc_checking_assert (COMPLEX_MODE_P (arg_mode));
machine_mode inner_mode = GET_MODE_INNER (arg_mode);
tree argr = TREE_REALPART (arg);
tree argi = TREE_IMAGPART (arg);
if (mode == arg_mode
&& real_cst_p (argr)
&& real_cst_p (argi))
{
/* complex real -> complex real. */
REAL_VALUE_TYPE result_real, result_imag;
if (fold_const_call_cc (&result_real, &result_imag, fn,
TREE_REAL_CST_PTR (argr),
TREE_REAL_CST_PTR (argi),
REAL_MODE_FORMAT (inner_mode)))
return build_complex (type,
build_real (TREE_TYPE (type), result_real),
build_real (TREE_TYPE (type), result_imag));
}
if (mode == inner_mode
&& real_cst_p (argr)
&& real_cst_p (argi))
{
/* complex real -> real. */
REAL_VALUE_TYPE result;
if (fold_const_call_sc (&result, fn,
TREE_REAL_CST_PTR (argr),
TREE_REAL_CST_PTR (argi),
REAL_MODE_FORMAT (inner_mode)))
return build_real (type, result);
}
return NULL_TREE;
}
return NULL_TREE;
}
示例5: gen_conditions_for_pow_int_base
static void
gen_conditions_for_pow_int_base (tree base, tree expn,
vec<gimple> conds,
unsigned *nconds)
{
gimple base_def;
tree base_val0;
tree int_type;
tree temp, tempn;
tree cst0;
gimple stmt1, stmt2;
int bit_sz, max_exp;
inp_domain exp_domain;
base_def = SSA_NAME_DEF_STMT (base);
base_val0 = gimple_assign_rhs1 (base_def);
int_type = TREE_TYPE (base_val0);
bit_sz = TYPE_PRECISION (int_type);
gcc_assert (bit_sz > 0
&& bit_sz <= MAX_BASE_INT_BIT_SIZE);
/* Determine the max exp argument value according to
the size of the base integer. The max exp value
is conservatively estimated assuming IEEE754 double
precision format. */
if (bit_sz == 8)
max_exp = 128;
else if (bit_sz == 16)
max_exp = 64;
else
{
gcc_assert (bit_sz == MAX_BASE_INT_BIT_SIZE);
max_exp = 32;
}
/* For pow ((double)x, y), generate the following conditions:
cond 1:
temp1 = x;
if (temp1 <= 0)
cond 2:
temp2 = y;
if (temp2 > max_exp_real_cst) */
/* Generate condition in reverse order -- first
the condition for the exp argument. */
exp_domain = get_domain (0, false, false,
max_exp, true, true);
gen_conditions_for_domain (expn, exp_domain,
conds, nconds);
/* Now generate condition for the base argument.
Note it does not use the helper function
gen_conditions_for_domain because the base
type is integer. */
/* Push a separator. */
conds.quick_push (NULL);
temp = create_tmp_var (int_type, "DCE_COND1");
cst0 = build_int_cst (int_type, 0);
stmt1 = gimple_build_assign (temp, base_val0);
tempn = make_ssa_name (temp, stmt1);
gimple_assign_set_lhs (stmt1, tempn);
stmt2 = gimple_build_cond (LE_EXPR, tempn, cst0, NULL_TREE, NULL_TREE);
conds.quick_push (stmt1);
conds.quick_push (stmt2);
(*nconds)++;
}
示例6: ubsan_instrument_float_cast
tree
ubsan_instrument_float_cast (location_t loc, tree type, tree expr)
{
tree expr_type = TREE_TYPE (expr);
tree t, tt, fn, min, max;
enum machine_mode mode = TYPE_MODE (expr_type);
int prec = TYPE_PRECISION (type);
bool uns_p = TYPE_UNSIGNED (type);
/* Float to integer conversion first truncates toward zero, so
even signed char c = 127.875f; is not problematic.
Therefore, we should complain only if EXPR is unordered or smaller
or equal than TYPE_MIN_VALUE - 1.0 or greater or equal than
TYPE_MAX_VALUE + 1.0. */
if (REAL_MODE_FORMAT (mode)->b == 2)
{
/* For maximum, TYPE_MAX_VALUE might not be representable
in EXPR_TYPE, e.g. if TYPE is 64-bit long long and
EXPR_TYPE is IEEE single float, but TYPE_MAX_VALUE + 1.0 is
either representable or infinity. */
REAL_VALUE_TYPE maxval = dconst1;
SET_REAL_EXP (&maxval, REAL_EXP (&maxval) + prec - !uns_p);
real_convert (&maxval, mode, &maxval);
max = build_real (expr_type, maxval);
/* For unsigned, assume -1.0 is always representable. */
if (uns_p)
min = build_minus_one_cst (expr_type);
else
{
/* TYPE_MIN_VALUE is generally representable (or -inf),
but TYPE_MIN_VALUE - 1.0 might not be. */
REAL_VALUE_TYPE minval = dconstm1, minval2;
SET_REAL_EXP (&minval, REAL_EXP (&minval) + prec - 1);
real_convert (&minval, mode, &minval);
real_arithmetic (&minval2, MINUS_EXPR, &minval, &dconst1);
real_convert (&minval2, mode, &minval2);
if (real_compare (EQ_EXPR, &minval, &minval2)
&& !real_isinf (&minval))
{
/* If TYPE_MIN_VALUE - 1.0 is not representable and
rounds to TYPE_MIN_VALUE, we need to subtract
more. As REAL_MODE_FORMAT (mode)->p is the number
of base digits, we want to subtract a number that
will be 1 << (REAL_MODE_FORMAT (mode)->p - 1)
times smaller than minval. */
minval2 = dconst1;
gcc_assert (prec > REAL_MODE_FORMAT (mode)->p);
SET_REAL_EXP (&minval2,
REAL_EXP (&minval2) + prec - 1
- REAL_MODE_FORMAT (mode)->p + 1);
real_arithmetic (&minval2, MINUS_EXPR, &minval, &minval2);
real_convert (&minval2, mode, &minval2);
}
min = build_real (expr_type, minval2);
}
}
else if (REAL_MODE_FORMAT (mode)->b == 10)
{
/* For _Decimal128 up to 34 decimal digits, - sign,
dot, e, exponent. */
char buf[64];
mpfr_t m;
int p = REAL_MODE_FORMAT (mode)->p;
REAL_VALUE_TYPE maxval, minval;
/* Use mpfr_snprintf rounding to compute the smallest
representable decimal number greater or equal than
1 << (prec - !uns_p). */
mpfr_init2 (m, prec + 2);
mpfr_set_ui_2exp (m, 1, prec - !uns_p, GMP_RNDN);
mpfr_snprintf (buf, sizeof buf, "%.*RUe", p - 1, m);
decimal_real_from_string (&maxval, buf);
max = build_real (expr_type, maxval);
/* For unsigned, assume -1.0 is always representable. */
if (uns_p)
min = build_minus_one_cst (expr_type);
else
{
/* Use mpfr_snprintf rounding to compute the largest
representable decimal number less or equal than
(-1 << (prec - 1)) - 1. */
mpfr_set_si_2exp (m, -1, prec - 1, GMP_RNDN);
mpfr_sub_ui (m, m, 1, GMP_RNDN);
mpfr_snprintf (buf, sizeof buf, "%.*RDe", p - 1, m);
decimal_real_from_string (&minval, buf);
min = build_real (expr_type, minval);
}
mpfr_clear (m);
}
else
return NULL_TREE;
if (flag_sanitize_undefined_trap_on_error)
fn = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0);
else
{
/* Create the __ubsan_handle_float_cast_overflow fn call. */
tree data = ubsan_create_data ("__ubsan_float_cast_overflow_data", NULL,
//.........这里部分代码省略.........
示例7: instrument_bool_enum_load
static void
instrument_bool_enum_load (gimple_stmt_iterator *gsi)
{
gimple stmt = gsi_stmt (*gsi);
tree rhs = gimple_assign_rhs1 (stmt);
tree type = TREE_TYPE (rhs);
tree minv = NULL_TREE, maxv = NULL_TREE;
if (TREE_CODE (type) == BOOLEAN_TYPE && (flag_sanitize & SANITIZE_BOOL))
{
minv = boolean_false_node;
maxv = boolean_true_node;
}
else if (TREE_CODE (type) == ENUMERAL_TYPE
&& (flag_sanitize & SANITIZE_ENUM)
&& TREE_TYPE (type) != NULL_TREE
&& TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE
&& (TYPE_PRECISION (TREE_TYPE (type))
< GET_MODE_PRECISION (TYPE_MODE (type))))
{
minv = TYPE_MIN_VALUE (TREE_TYPE (type));
maxv = TYPE_MAX_VALUE (TREE_TYPE (type));
}
else
return;
int modebitsize = GET_MODE_BITSIZE (TYPE_MODE (type));
HOST_WIDE_INT bitsize, bitpos;
tree offset;
enum machine_mode mode;
int volatilep = 0, unsignedp = 0;
tree base = get_inner_reference (rhs, &bitsize, &bitpos, &offset, &mode,
&unsignedp, &volatilep, false);
tree utype = build_nonstandard_integer_type (modebitsize, 1);
if ((TREE_CODE (base) == VAR_DECL && DECL_HARD_REGISTER (base))
|| (bitpos % modebitsize) != 0
|| bitsize != modebitsize
|| GET_MODE_BITSIZE (TYPE_MODE (utype)) != modebitsize
|| TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
return;
location_t loc = gimple_location (stmt);
tree ptype = build_pointer_type (TREE_TYPE (rhs));
tree atype = reference_alias_ptr_type (rhs);
gimple g = gimple_build_assign (make_ssa_name (ptype, NULL),
build_fold_addr_expr (rhs));
gimple_set_location (g, loc);
gsi_insert_before (gsi, g, GSI_SAME_STMT);
tree mem = build2 (MEM_REF, utype, gimple_assign_lhs (g),
build_int_cst (atype, 0));
tree urhs = make_ssa_name (utype, NULL);
g = gimple_build_assign (urhs, mem);
gimple_set_location (g, loc);
gsi_insert_before (gsi, g, GSI_SAME_STMT);
minv = fold_convert (utype, minv);
maxv = fold_convert (utype, maxv);
if (!integer_zerop (minv))
{
g = gimple_build_assign_with_ops (MINUS_EXPR,
make_ssa_name (utype, NULL),
urhs, minv);
gimple_set_location (g, loc);
gsi_insert_before (gsi, g, GSI_SAME_STMT);
}
gimple_stmt_iterator gsi2 = *gsi;
basic_block then_bb, fallthru_bb;
*gsi = create_cond_insert_point (gsi, true, false, true,
&then_bb, &fallthru_bb);
g = gimple_build_cond (GT_EXPR, gimple_assign_lhs (g),
int_const_binop (MINUS_EXPR, maxv, minv),
NULL_TREE, NULL_TREE);
gimple_set_location (g, loc);
gsi_insert_after (gsi, g, GSI_NEW_STMT);
gimple_assign_set_rhs_with_ops (&gsi2, NOP_EXPR, urhs, NULL_TREE);
update_stmt (stmt);
gsi2 = gsi_after_labels (then_bb);
if (flag_sanitize_undefined_trap_on_error)
g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
else
{
tree data = ubsan_create_data ("__ubsan_invalid_value_data", &loc, NULL,
ubsan_type_descriptor (type), NULL_TREE);
data = build_fold_addr_expr_loc (loc, data);
enum built_in_function bcode
= flag_sanitize_recover
? BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE
: BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE_ABORT;
tree fn = builtin_decl_explicit (bcode);
tree val = force_gimple_operand_gsi (&gsi2, ubsan_encode_value (urhs),
true, NULL_TREE, true,
GSI_SAME_STMT);
g = gimple_build_call (fn, 2, data, val);
}
gimple_set_location (g, loc);
gsi_insert_before (&gsi2, g, GSI_SAME_STMT);
//.........这里部分代码省略.........
示例8: merge_types
//.........这里部分代码省略.........
int depth1, depth2;
tree tt1, tt2;
/* ptr_type_node is only used for a null reference,
which is compatible with any reference type. */
if (type1 == ptr_type_node || type2 == object_ptr_type_node)
return type2;
if (type2 == ptr_type_node || type1 == object_ptr_type_node)
return type1;
tt1 = TREE_TYPE (type1);
tt2 = TREE_TYPE (type2);
/* If tt{1,2} haven't been properly loaded, now is a good time
to do it. */
if (!TYPE_SIZE (tt1))
{
load_class (tt1, 1);
safe_layout_class (tt1);
}
if (!TYPE_SIZE (tt2))
{
load_class (tt2, 1);
safe_layout_class (tt2);
}
if (TYPE_ARRAY_P (tt1) || TYPE_ARRAY_P (tt2))
{
if (TYPE_ARRAY_P (tt1) == TYPE_ARRAY_P (tt2))
{
tree el_type1 = TYPE_ARRAY_ELEMENT (tt1);
tree el_type2 = TYPE_ARRAY_ELEMENT (tt2);
tree el_type = NULL_TREE;
if (el_type1 == el_type2)
el_type = el_type1;
else if (TREE_CODE (el_type1) == POINTER_TYPE
&& TREE_CODE (el_type2) == POINTER_TYPE)
el_type = merge_types (el_type1, el_type2);
if (el_type != NULL_TREE)
{
HOST_WIDE_INT len1 = java_array_type_length (tt1);
HOST_WIDE_INT len2 = java_array_type_length (tt2);
if (len1 != len2)
len1 = -1;
else if (el_type1 == el_type2)
return type1;
return promote_type (build_java_array_type (el_type, len1));
}
}
return object_ptr_type_node;
}
if (CLASS_INTERFACE (TYPE_NAME (tt1)))
{
/* FIXME: should see if two interfaces have a common
superinterface. */
if (CLASS_INTERFACE (TYPE_NAME (tt2)))
{
/* This is a kludge, but matches what Sun's verifier does.
It can be tricked, but is safe as long as type errors
(i.e. interface method calls) are caught at run-time. */
return object_ptr_type_node;
}
else
{
if (can_widen_reference_to (tt2, tt1))
return type1;
else
return object_ptr_type_node;
}
}
else if (CLASS_INTERFACE (TYPE_NAME (tt2)))
{
if (can_widen_reference_to (tt1, tt2))
return type2;
else
return object_ptr_type_node;
}
type1 = tt1;
type2 = tt2;
depth1 = class_depth (type1);
depth2 = class_depth (type2);
for ( ; depth1 > depth2; depth1--)
type1 = BINFO_TYPE (BINFO_BASE_BINFO (TYPE_BINFO (type1), 0));
for ( ; depth2 > depth1; depth2--)
type2 = BINFO_TYPE (BINFO_BASE_BINFO (TYPE_BINFO (type2), 0));
while (type1 != type2)
{
type1 = BINFO_TYPE (BINFO_BASE_BINFO (TYPE_BINFO (type1), 0));
type2 = BINFO_TYPE (BINFO_BASE_BINFO (TYPE_BINFO (type2), 0));
}
return promote_type (type1);
}
if (INTEGRAL_TYPE_P (type1) && INTEGRAL_TYPE_P (type2)
&& TYPE_PRECISION (type1) <= 32 && TYPE_PRECISION (type2) <= 32)
return int_type_node;
return TYPE_UNKNOWN;
}
示例9: verify_jvm_instructions
//.........这里部分代码省略.........
case OPCODE_iload_3: type = int_type_node; index = 3; goto load;
case OPCODE_lload_0: type = long_type_node; index = 0; goto load;
case OPCODE_lload_1: type = long_type_node; index = 1; goto load;
case OPCODE_lload_2: type = long_type_node; index = 2; goto load;
case OPCODE_lload_3: type = long_type_node; index = 3; goto load;
case OPCODE_fload_0: type = float_type_node; index = 0; goto load;
case OPCODE_fload_1: type = float_type_node; index = 1; goto load;
case OPCODE_fload_2: type = float_type_node; index = 2; goto load;
case OPCODE_fload_3: type = float_type_node; index = 3; goto load;
case OPCODE_dload_0: type = double_type_node; index = 0; goto load;
case OPCODE_dload_1: type = double_type_node; index = 1; goto load;
case OPCODE_dload_2: type = double_type_node; index = 2; goto load;
case OPCODE_dload_3: type = double_type_node; index = 3; goto load;
case OPCODE_aload_0: type = ptr_type_node; index = 0; goto load;
case OPCODE_aload_1: type = ptr_type_node; index = 1; goto load;
case OPCODE_aload_2: type = ptr_type_node; index = 2; goto load;
case OPCODE_aload_3: type = ptr_type_node; index = 3; goto load;
load:
if (index < 0
|| (index + TYPE_IS_WIDE (type)
>= DECL_MAX_LOCALS (current_function_decl)))
VERIFICATION_ERROR_WITH_INDEX
("invalid local variable index %d in load");
tmp = type_map[index];
if (tmp == TYPE_UNKNOWN)
VERIFICATION_ERROR_WITH_INDEX
("loading local variable %d which has unknown type");
else if (tmp == TYPE_SECOND
|| (TYPE_IS_WIDE (type)
&& type_map[index+1] != void_type_node)
|| (type == ptr_type_node
? TREE_CODE (tmp) != POINTER_TYPE
: type == int_type_node
? (! INTEGRAL_TYPE_P (tmp) || TYPE_PRECISION (tmp) > 32)
: type != tmp))
VERIFICATION_ERROR_WITH_INDEX
("loading local variable %d which has invalid type");
PUSH_TYPE (tmp);
goto note_used;
case OPCODE_istore: type = int_type_node; goto general_store;
case OPCODE_lstore: type = long_type_node; goto general_store;
case OPCODE_fstore: type = float_type_node; goto general_store;
case OPCODE_dstore: type = double_type_node; goto general_store;
case OPCODE_astore: type = object_ptr_type_node; goto general_store;
general_store:
index = wide ? IMMEDIATE_u2 : IMMEDIATE_u1;
wide = 0;
goto store;
case OPCODE_istore_0: type = int_type_node; index = 0; goto store;
case OPCODE_istore_1: type = int_type_node; index = 1; goto store;
case OPCODE_istore_2: type = int_type_node; index = 2; goto store;
case OPCODE_istore_3: type = int_type_node; index = 3; goto store;
case OPCODE_lstore_0: type = long_type_node; index=0; goto store;
case OPCODE_lstore_1: type = long_type_node; index=1; goto store;
case OPCODE_lstore_2: type = long_type_node; index=2; goto store;
case OPCODE_lstore_3: type = long_type_node; index=3; goto store;
case OPCODE_fstore_0: type=float_type_node; index=0; goto store;
case OPCODE_fstore_1: type=float_type_node; index=1; goto store;
case OPCODE_fstore_2: type=float_type_node; index=2; goto store;
case OPCODE_fstore_3: type=float_type_node; index=3; goto store;
case OPCODE_dstore_0: type=double_type_node; index=0; goto store;
case OPCODE_dstore_1: type=double_type_node; index=1; goto store;
case OPCODE_dstore_2: type=double_type_node; index=2; goto store;
case OPCODE_dstore_3: type=double_type_node; index=3; goto store;
case OPCODE_astore_0: type = ptr_type_node; index = 0; goto store;
case OPCODE_astore_1: type = ptr_type_node; index = 1; goto store;
示例10: c_cpp_builtins
//.........这里部分代码省略.........
TARGET_DEC_EVAL_METHOD);
builtin_define_float_constants ("FLT", "F", "%s", float_type_node);
/* Cast the double precision constants when single precision constants are
specified. The correct result is computed by the compiler when using
macros that include a cast. This has the side-effect of making the value
unusable in const expressions. */
if (flag_single_precision_constant)
builtin_define_float_constants ("DBL", "L", "((double)%s)", double_type_node);
else
builtin_define_float_constants ("DBL", "", "%s", double_type_node);
builtin_define_float_constants ("LDBL", "L", "%s", long_double_type_node);
/* For decfloat.h. */
builtin_define_decimal_float_constants ("DEC32", "DF", dfloat32_type_node);
builtin_define_decimal_float_constants ("DEC64", "DD", dfloat64_type_node);
builtin_define_decimal_float_constants ("DEC128", "DL", dfloat128_type_node);
/* For use in assembly language. */
builtin_define_with_value ("__REGISTER_PREFIX__", REGISTER_PREFIX, 0);
builtin_define_with_value ("__USER_LABEL_PREFIX__", user_label_prefix, 0);
/* Misc. */
builtin_define_with_value ("__VERSION__", version_string, 1);
/* APPLE LOCAL begin mainline */
if (flag_gnu89_inline)
cpp_define (pfile, "__GNUC_GNU_INLINE__");
else
cpp_define (pfile, "__GNUC_STDC_INLINE__");
/* APPLE LOCAL end mainline */
/* Definitions for LP64 model. */
if (TYPE_PRECISION (long_integer_type_node) == 64
&& POINTER_SIZE == 64
&& TYPE_PRECISION (integer_type_node) == 32)
{
cpp_define (pfile, "_LP64");
cpp_define (pfile, "__LP64__");
}
/* Other target-independent built-ins determined by command-line
options. */
/* APPLE LOCAL begin blocks */
/* APPLE LOCAL radar 5868913 */
if (flag_blocks)
cpp_define (pfile, "__BLOCKS__=1");
/* APPLE LOCAL end blocks */
if (optimize_size)
cpp_define (pfile, "__OPTIMIZE_SIZE__");
if (optimize)
cpp_define (pfile, "__OPTIMIZE__");
if (fast_math_flags_set_p ())
cpp_define (pfile, "__FAST_MATH__");
if (flag_really_no_inline)
cpp_define (pfile, "__NO_INLINE__");
if (flag_signaling_nans)
cpp_define (pfile, "__SUPPORT_SNAN__");
if (flag_finite_math_only)
cpp_define (pfile, "__FINITE_MATH_ONLY__=1");
else
cpp_define (pfile, "__FINITE_MATH_ONLY__=0");
if (flag_pic)
{
builtin_define_with_int_value ("__pic__", flag_pic);
示例11: builtin_define_type_precision
/* Define NAME with value TYPE precision. */
static void
builtin_define_type_precision (const char *name, tree type)
{
builtin_define_with_int_value (name, TYPE_PRECISION (type));
}
示例12: UI_To_gnu
tree
UI_To_gnu (Uint Input, tree type)
{
tree gnu_ret;
/* We might have a TYPE with biased representation and be passed an
unbiased value that doesn't fit. We always use an unbiased type able
to hold any such possible value for intermediate computations, and
then rely on a conversion back to TYPE to perform the bias adjustment
when need be. */
int biased_type_p
= (TREE_CODE (type) == INTEGER_TYPE
&& TYPE_BIASED_REPRESENTATION_P (type));
tree comp_type = biased_type_p ? get_base_type (type) : type;
if (Input <= Uint_Direct_Last)
gnu_ret = build_cst_from_int (comp_type, Input - Uint_Direct_Bias);
else
{
Int Idx = Uints_Ptr[Input].Loc;
Pos Length = Uints_Ptr[Input].Length;
Int First = Udigits_Ptr[Idx];
tree gnu_base;
gcc_assert (Length > 0);
/* The computations we perform below always require a type at least as
large as an integer not to overflow. REAL types are always fine, but
INTEGER or ENUMERAL types we are handed may be too short. We use a
base integer type node for the computations in this case and will
convert the final result back to the incoming type later on.
The base integer precision must be superior than 16. */
if (TREE_CODE (comp_type) != REAL_TYPE
&& TYPE_PRECISION (comp_type)
< TYPE_PRECISION (long_integer_type_node))
{
comp_type = long_integer_type_node;
gcc_assert (TYPE_PRECISION (comp_type) > 16);
}
gnu_base = build_cst_from_int (comp_type, Base);
gnu_ret = build_cst_from_int (comp_type, First);
if (First < 0)
for (Idx++, Length--; Length; Idx++, Length--)
gnu_ret = fold_build2 (MINUS_EXPR, comp_type,
fold_build2 (MULT_EXPR, comp_type,
gnu_ret, gnu_base),
build_cst_from_int (comp_type,
Udigits_Ptr[Idx]));
else
for (Idx++, Length--; Length; Idx++, Length--)
gnu_ret = fold_build2 (PLUS_EXPR, comp_type,
fold_build2 (MULT_EXPR, comp_type,
gnu_ret, gnu_base),
build_cst_from_int (comp_type,
Udigits_Ptr[Idx]));
}
gnu_ret = convert (type, gnu_ret);
/* We don't need any NOP_EXPR or NON_LVALUE_EXPR on GNU_RET. */
while ((TREE_CODE (gnu_ret) == NOP_EXPR
|| TREE_CODE (gnu_ret) == NON_LVALUE_EXPR)
&& TREE_TYPE (TREE_OPERAND (gnu_ret, 0)) == TREE_TYPE (gnu_ret))
gnu_ret = TREE_OPERAND (gnu_ret, 0);
return gnu_ret;
}
示例13: hashable_expr_equal_p
static bool
hashable_expr_equal_p (const struct hashable_expr *expr0,
const struct hashable_expr *expr1)
{
tree type0 = expr0->type;
tree type1 = expr1->type;
/* If either type is NULL, there is nothing to check. */
if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
return false;
/* If both types don't have the same signedness, precision, and mode,
then we can't consider them equal. */
if (type0 != type1
&& (TREE_CODE (type0) == ERROR_MARK
|| TREE_CODE (type1) == ERROR_MARK
|| TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
|| TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
|| TYPE_MODE (type0) != TYPE_MODE (type1)))
return false;
if (expr0->kind != expr1->kind)
return false;
switch (expr0->kind)
{
case EXPR_SINGLE:
return equal_mem_array_ref_p (expr0->ops.single.rhs,
expr1->ops.single.rhs)
|| operand_equal_p (expr0->ops.single.rhs,
expr1->ops.single.rhs, 0);
case EXPR_UNARY:
if (expr0->ops.unary.op != expr1->ops.unary.op)
return false;
if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
|| expr0->ops.unary.op == NON_LVALUE_EXPR)
&& TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
return false;
return operand_equal_p (expr0->ops.unary.opnd,
expr1->ops.unary.opnd, 0);
case EXPR_BINARY:
if (expr0->ops.binary.op != expr1->ops.binary.op)
return false;
if (operand_equal_p (expr0->ops.binary.opnd0,
expr1->ops.binary.opnd0, 0)
&& operand_equal_p (expr0->ops.binary.opnd1,
expr1->ops.binary.opnd1, 0))
return true;
/* For commutative ops, allow the other order. */
return (commutative_tree_code (expr0->ops.binary.op)
&& operand_equal_p (expr0->ops.binary.opnd0,
expr1->ops.binary.opnd1, 0)
&& operand_equal_p (expr0->ops.binary.opnd1,
expr1->ops.binary.opnd0, 0));
case EXPR_TERNARY:
if (expr0->ops.ternary.op != expr1->ops.ternary.op
|| !operand_equal_p (expr0->ops.ternary.opnd2,
expr1->ops.ternary.opnd2, 0))
return false;
if (operand_equal_p (expr0->ops.ternary.opnd0,
expr1->ops.ternary.opnd0, 0)
&& operand_equal_p (expr0->ops.ternary.opnd1,
expr1->ops.ternary.opnd1, 0))
return true;
/* For commutative ops, allow the other order. */
return (commutative_ternary_tree_code (expr0->ops.ternary.op)
&& operand_equal_p (expr0->ops.ternary.opnd0,
expr1->ops.ternary.opnd1, 0)
&& operand_equal_p (expr0->ops.ternary.opnd1,
expr1->ops.ternary.opnd0, 0));
case EXPR_CALL:
{
size_t i;
/* If the calls are to different functions, then they
clearly cannot be equal. */
if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
expr1->ops.call.fn_from))
return false;
if (! expr0->ops.call.pure)
return false;
if (expr0->ops.call.nargs != expr1->ops.call.nargs)
return false;
for (i = 0; i < expr0->ops.call.nargs; i++)
if (! operand_equal_p (expr0->ops.call.args[i],
expr1->ops.call.args[i], 0))
return false;
//.........这里部分代码省略.........
示例14: arm_output_c_attributes
static void arm_output_c_attributes(void)
{
/* Tag_ABI_PCS_wchar_t. */
asm_fprintf (asm_out_file, "\t.eabi_attribute 18, %d\n",
(int)(TYPE_PRECISION (wchar_type_node) / BITS_PER_UNIT));
}
示例15: ubsan_instrument_shift
tree
ubsan_instrument_shift (location_t loc, enum tree_code code,
tree op0, tree op1)
{
tree t, tt = NULL_TREE;
tree type0 = TREE_TYPE (op0);
tree type1 = TREE_TYPE (op1);
tree op1_utype = unsigned_type_for (type1);
HOST_WIDE_INT op0_prec = TYPE_PRECISION (type0);
tree uprecm1 = build_int_cst (op1_utype, op0_prec - 1);
op0 = unshare_expr (op0);
op1 = unshare_expr (op1);
t = fold_convert_loc (loc, op1_utype, op1);
t = fold_build2 (GT_EXPR, boolean_type_node, t, uprecm1);
/* If this is not a signed operation, don't perform overflow checks.
Also punt on bit-fields. */
if (!INTEGRAL_TYPE_P (type0)
|| TYPE_OVERFLOW_WRAPS (type0)
|| GET_MODE_BITSIZE (TYPE_MODE (type0)) != TYPE_PRECISION (type0))
;
/* For signed x << y, in C99/C11, the following:
(unsigned) x >> (uprecm1 - y)
if non-zero, is undefined. */
else if (code == LSHIFT_EXPR && flag_isoc99 && cxx_dialect < cxx11)
{
tree x = fold_build2 (MINUS_EXPR, op1_utype, uprecm1,
fold_convert (op1_utype, unshare_expr (op1)));
tt = fold_convert_loc (loc, unsigned_type_for (type0), op0);
tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x);
tt = fold_build2 (NE_EXPR, boolean_type_node, tt,
build_int_cst (TREE_TYPE (tt), 0));
}
/* For signed x << y, in C++11 and later, the following:
x < 0 || ((unsigned) x >> (uprecm1 - y))
if > 1, is undefined. */
else if (code == LSHIFT_EXPR && cxx_dialect >= cxx11)
{
tree x = fold_build2 (MINUS_EXPR, op1_utype, uprecm1,
fold_convert (op1_utype, unshare_expr (op1)));
tt = fold_convert_loc (loc, unsigned_type_for (type0),
unshare_expr (op0));
tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x);
tt = fold_build2 (GT_EXPR, boolean_type_node, tt,
build_int_cst (TREE_TYPE (tt), 1));
x = fold_build2 (LT_EXPR, boolean_type_node, unshare_expr (op0),
build_int_cst (type0, 0));
tt = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, x, tt);
}
/* If the condition was folded to 0, no need to instrument
this expression. */
if (integer_zerop (t) && (tt == NULL_TREE || integer_zerop (tt)))
return NULL_TREE;
/* In case we have a SAVE_EXPR in a conditional context, we need to
make sure it gets evaluated before the condition. */
t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), unshare_expr (op0), t);
t = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, t,
tt ? tt : integer_zero_node);
if (flag_sanitize_undefined_trap_on_error)
tt = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0);
else
{
tree data = ubsan_create_data ("__ubsan_shift_data", 1, &loc,
ubsan_type_descriptor (type0),
ubsan_type_descriptor (type1), NULL_TREE,
NULL_TREE);
data = build_fold_addr_expr_loc (loc, data);
enum built_in_function bcode
= (flag_sanitize_recover & SANITIZE_SHIFT)
? BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS
: BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT;
tt = builtin_decl_explicit (bcode);
op0 = unshare_expr (op0);
op1 = unshare_expr (op1);
tt = build_call_expr_loc (loc, tt, 3, data, ubsan_encode_value (op0),
ubsan_encode_value (op1));
}
t = fold_build3 (COND_EXPR, void_type_node, t, tt, void_node);
return t;
}