本文整理汇总了Python中sqlalchemy.types.Text方法的典型用法代码示例。如果您正苦于以下问题:Python types.Text方法的具体用法?Python types.Text怎么用?Python types.Text使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sqlalchemy.types
的用法示例。
在下文中一共展示了types.Text方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fields_map
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def fields_map(self, field_type):
if field_type == "primary":
return ID(stored=True, unique=True)
type_map = {
'date': types.Date,
'datetime': types.DateTime,
'boolean': types.Boolean,
'integer': types.Integer,
'float': types.Float
}
if isinstance(field_type, str):
field_type = type_map.get(field_type, types.Text)
if not isinstance(field_type, type):
field_type = field_type.__class__
if issubclass(field_type, (types.DateTime, types.Date)):
return DATETIME(stored=True, sortable=True)
elif issubclass(field_type, types.Integer):
return NUMERIC(stored=True, numtype=int)
elif issubclass(field_type, types.Float):
return NUMERIC(stored=True, numtype=float)
elif issubclass(field_type, types.Boolean):
return BOOLEAN(stored=True)
return TEXT(stored=True, analyzer=self.analyzer, sortable=False)
示例2: test_should_composite_convert
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def test_should_composite_convert():
registry = Registry()
class CompositeClass:
def __init__(self, col1, col2):
self.col1 = col1
self.col2 = col2
@convert_sqlalchemy_composite.register(CompositeClass, registry)
def convert_composite_class(composite, registry):
return graphene.String(description=composite.doc)
field = convert_sqlalchemy_composite(
composite(CompositeClass, (Column(types.Unicode(50)), Column(types.Unicode(50))), doc="Custom Help Text"),
registry,
mock_resolver,
)
assert isinstance(field, graphene.String)
示例3: test_max_ident_in_varchar_not_present
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def test_max_ident_in_varchar_not_present(self):
"""test [ticket:3504].
Here we are testing not just that the "max" token comes back
as None, but also that these types accept "max" as the value
of "length" on construction, which isn't a directly documented
pattern however is likely in common use.
"""
metadata = self.metadata
Table(
"t",
metadata,
Column("t1", types.String),
Column("t2", types.Text("max")),
Column("t3", types.Text("max")),
Column("t4", types.LargeBinary("max")),
Column("t5", types.VARBINARY("max")),
)
metadata.create_all()
for col in inspect(testing.db).get_columns("t"):
is_(col["type"].length, None)
in_("max", str(col["type"].compile(dialect=testing.db.dialect)))
示例4: _convert_type
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def _convert_type(self, dj_field, sa_type):
kwargs = {}
if sa_type is SA_ARRAY:
internal_type = dj_field.base_field.get_internal_type()
kwargs['item_type'] = self._types.get(internal_type)
if kwargs['item_type'] is None:
raise ConversionError(
'Unable convert array: '
'item type "%s" not found' % internal_type
)
elif sa_type is Geometry:
kwargs['geometry_type'] = 'POINT'
kwargs['srid'] = dj_field.srid
elif sa_type is sa_types.Numeric:
kwargs['scale'] = dj_field.decimal_places,
kwargs['precision'] = dj_field.max_digits
elif sa_type in (sa_types.String, sa_types.Text):
kwargs['length'] = dj_field.max_length
elif sa_type is SA_UUID:
kwargs['as_uuid'] = True
return sa_type(**kwargs)
示例5: fields_map
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def fields_map(self, field_type):
if field_type == "primary":
return {'type': 'keyword'}
type_map = {
'date': types.Date,
'datetime': types.DateTime,
'boolean': types.Boolean,
'integer': types.Integer,
'float': types.Float,
'binary': types.Binary
}
if isinstance(field_type, str):
field_type = type_map.get(field_type, types.Text)
if not isinstance(field_type, type):
field_type = field_type.__class__
if issubclass(field_type, (types.DateTime, types.Date)):
return {'type': 'date'}
elif issubclass(field_type, types.Integer):
return {'type': 'long'}
elif issubclass(field_type, types.Float):
return {'type': 'float'}
elif issubclass(field_type, types.Boolean):
return {'type': 'boolean'}
elif issubclass(field_type, types.Binary):
return {'type': 'binary'}
return {'type': 'string'}
# https://medium.com/@federicopanini/elasticsearch-6-0-removal-of-mapping-types-526a67ff772
示例6: native_type
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def native_type(self, registry):
if self.encrypt_key:
return types.Text
return self.sqlalchemy_type
示例7: get_field
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def get_field(sqlalchemy_type, **column_kwargs):
class Model(declarative_base()):
__tablename__ = 'model'
id_ = Column(types.Integer, primary_key=True)
column = Column(sqlalchemy_type, doc="Custom Help Text", **column_kwargs)
column_prop = inspect(Model).column_attrs['column']
return convert_sqlalchemy_column(column_prop, get_global_registry(), mock_resolver)
示例8: test_should_text_convert_string
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def test_should_text_convert_string():
assert get_field(types.Text()).type == graphene.String
示例9: load_dialect_impl
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def load_dialect_impl(self, dialect):
if dialect.name == "mysql":
return dialect.type_descriptor(
LONGTEXT(charset="utf8mb4", collation="utf8mb4_unicode_ci")
)
else:
return dialect.type_descriptor(Text())
示例10: add_false_positive
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def add_false_positive(context, issue):
"""Add a finding into the database as a new finding
:param context: The Behave context
:param response: An issue data structure (see steps.py)
"""
dbconn = open_database(context)
if dbconn is None:
# There is no false positive db in use, and we cannot store the data,
# so we will assert a failure.
assert False, "Issues were found in scan, but no false positive database is in use."
# Add the finding into the database
db_insert = context.headlessscanner_issues.insert().values(
new_issue=True, # Boolean
# The result from Burp Extender does not include a timestamp,
# so we add the current time
timestamp=datetime.datetime.utcnow(), # DateTime
test_runner_host=socket.gethostbyname(socket.getfqdn()), # Text
scenario_id=issue['scenario_id'], # Text
url=issue['url'], # Text
severity=issue['severity'], # Text
issuetype=issue['issuetype'], # Text
issuename=issue['issuename'], # Text
issuedetail=issue['issuedetail'], # Text
confidence=issue['confidence'], # Text
host=issue['host'], # Text
port=issue['port'], # Text
protocol=issue['protocol'], # Text
messages=json.dumps(issue['messages'])) # Blob
dbconn.execute(db_insert)
dbconn.close()
示例11: __init__
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def __init__(self, mysql_as_long=False, mysql_as_medium=False):
"""Initialize JSON-encoding type."""
super(JsonEncodedType, self).__init__()
if mysql_as_long and mysql_as_medium:
raise TypeError("mysql_as_long and mysql_as_medium are mutually "
"exclusive")
if mysql_as_long:
self.impl = Text().with_variant(mysql.LONGTEXT(), 'mysql')
elif mysql_as_medium:
self.impl = Text().with_variant(mysql.MEDIUMTEXT(), 'mysql')
示例12: test_load_dialect_impl
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def test_load_dialect_impl(self):
dialect = mysql_base.MySQLDialect()
impl = self.sqltype.load_dialect_impl(dialect)
self.assertNotEqual(types.Text, type(impl))
dialect = sqlite_base.SQLiteDialect()
impl = self.sqltype.load_dialect_impl(dialect)
self.assertEqual(types.Text, type(impl))
示例13: __init__
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def __init__(self):
self._types = {
# Django internal type => SQLAlchemy type
'ArrayField': SA_ARRAY,
'AutoField': sa_types.Integer,
'BigAutoField': sa_types.BigInteger,
'BigIntegerField': sa_types.BigInteger,
'BooleanField': sa_types.Boolean,
'CharField': sa_types.String,
'DateField': sa_types.Date,
'DateTimeField': sa_types.DateTime,
'DecimalField': sa_types.Numeric,
'DurationField': sa_types.Interval,
'FileField': sa_types.String,
'FilePathField': sa_types.String,
'FloatField': sa_types.Float,
'GenericIPAddressField': sa_types.String,
'IntegerField': sa_types.Integer,
'JSONField': SA_JSONB,
'NullBooleanField': sa_types.Boolean,
'PointField': Geometry,
'PositiveIntegerField': sa_types.Integer,
'PositiveSmallIntegerField': sa_types.SmallInteger,
'SlugField': sa_types.String,
'SmallIntegerField': sa_types.SmallInteger,
'TextField': sa_types.Text,
'TimeField': sa_types.Time,
'UUIDField': SA_UUID,
# TODO: Add missing GIS fields
}
示例14: merge_stock_info
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def merge_stock_info():
"""
合并 wind,ifind 数据到对应名称的表中
:return:
"""
table_name = 'stock_info'
logging.info("更新 %s 开始", table_name)
has_table = engine_md.has_table(table_name)
ifind_table_name = 'ifind_{table_name}'.format(table_name=table_name)
wind_table_name = 'wind_{table_name}'.format(table_name=table_name)
# ifind_model = TABLE_MODEL_DIC[ifind_table_name]
# wind_model = TABLE_MODEL_DIC[wind_table_name]
# with with_db_session(engine_md) as session:
# session.query(ifind_model, wind_model).filter(ifind_model.c.ths_code == wind_model.c.wind_code)
ifind_sql_str = "select * from {table_name}".format(table_name=ifind_table_name)
wind_sql_str = "select * from {table_name}".format(table_name=wind_table_name)
ifind_df = pd.read_sql(ifind_sql_str, engine_md) # , index_col='ths_code'
wind_df = pd.read_sql(wind_sql_str, engine_md) # , index_col='wind_code'
joined_df = pd.merge(ifind_df, wind_df, how='outer',
left_on='ths_code', right_on='wind_code', indicator='indicator_column')
col_merge_dic = {
'unique_code': (String(20), prefer_left, {'left_key': 'ths_code', 'right_key': 'wind_code'}),
'sec_name': (String(20), prefer_left, {'left_key': 'ths_stock_short_name_stock', 'right_key': 'sec_name'}),
'cn_name': (String(100), get_value, {'key': 'ths_corp_cn_name_stock'}),
'en_name': (String(100), get_value, {'key': 'ths_corp_name_en_stock'}),
'delist_date': (Date, prefer_left, {'left_key': 'ths_delist_date_stock', 'right_key': 'delist_date'}),
'ipo_date': (Date, prefer_left, {'left_key': 'ths_ipo_date_stock', 'right_key': 'ipo_date'}),
'pre_name': (Text, prefer_left, {'left_key': 'ths_corp_name_en_stock', 'right_key': 'prename'}),
'established_date': (Date, get_value, {'key': 'ths_established_date_stock'}),
'exch_city': (String(20), get_value, {'key': 'exch_city'}),
'exch_cn': (String(20), get_value, {'key': 'ths_listing_exchange_stock'}),
'exch_eng': (String(20), get_value, {'key': 'exch_eng'}),
'stock_code': (String(20), prefer_left, {'left_key': 'ths_stock_code_stock', 'right_key': 'trade_code'}),
'mkt': (String(20), get_value, {'key': 'mkt'}),
}
col_merge_rule_dic = {
key: (val[1], val[2]) for key, val in col_merge_dic.items()
}
dtype = {
key: val[0] for key, val in col_merge_dic.items()
}
data_df = merge_data(joined_df, col_merge_rule_dic)
data_count = bunch_insert_on_duplicate_update(data_df, table_name, engine_md, dtype)
logger.info('%s 新增或更新记录 %d 条', table_name, data_count)
if not has_table and engine_md.has_table(table_name):
alter_table_2_myisam(engine_md, [table_name])
build_primary_key([table_name])
return data_df
示例15: open_database
# 需要导入模块: from sqlalchemy import types [as 别名]
# 或者: from sqlalchemy.types import Text [as 别名]
def open_database(context):
"""Opens the database specified in the feature file and creates
tables if not already created
:param context: The Behave context
:return: A database handle, or None if no database in use
"""
if hasattr(context, 'dburl') is False:
return None # No false positives database is in use
dbconn = None
# Try to connect to the database
try:
db_engine = create_engine(context.dburl)
dbconn = db_engine.connect()
except (IOError, exc.OperationalError):
assert False, "Cannot connect to database '%s'" % context.dburl
# Set up the database table to store new findings and false positives.
# We use LargeBinary to store those fields that could contain somehow
# bad Unicode, just in case some component downstream tries to parse
# a string provided as Unicode.
db_metadata = MetaData()
db_metadata.bind = db_engine
context.httpfuzzer_issues = Table('httpfuzzer_issues', db_metadata,
Column('new_issue', types.Boolean),
Column('issue_no', types.Integer, primary_key=True, nullable=False),
Column('timestamp', types.DateTime(timezone=True)),
Column('test_runner_host', types.Text),
Column('scenario_id', types.Text),
Column('url', types.Text),
Column('server_protocol_error', types.Text),
Column('server_timeout', types.Boolean),
Column('server_error_text_detected', types.Boolean),
Column('server_error_text_matched', types.Text),
Column('req_method', types.Text),
Column('req_headers', types.LargeBinary),
Column('req_body', types.LargeBinary),
Column('resp_statuscode', types.Text),
Column('resp_headers', types.LargeBinary),
Column('resp_body', types.LargeBinary),
Column('resp_history', types.LargeBinary))
# Create the table if it doesn't exist
# and otherwise no effect
db_metadata.create_all(db_engine)
return dbconn