本文整理汇总了Python中dataclasses.is_dataclass方法的典型用法代码示例。如果您正苦于以下问题:Python dataclasses.is_dataclass方法的具体用法?Python dataclasses.is_dataclass怎么用?Python dataclasses.is_dataclass使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dataclasses
的用法示例。
在下文中一共展示了dataclasses.is_dataclass方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: coerce_response
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def coerce_response(response_data):
"""Coerce response data to JSON serializable object with camelCase keys
Recursively walk through the response object in case there are things
like nested dataclasses that need to be reformatted
:param response_data: data returned from the API request
:returns: the same data but with keys in camelCase
"""
if is_dataclass(response_data):
coerced = {
snake_to_camel(key): coerce_response(value)
for key, value in asdict(response_data).items()
}
elif isinstance(response_data, dict):
coerced = {
snake_to_camel(key): coerce_response(value)
for key, value in response_data.items()
}
elif isinstance(response_data, list):
coerced = [coerce_response(item) for item in response_data]
else:
coerced = response_data
return coerced
示例2: export_value
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def export_value(obj, key, value):
# export and _asdict are not classmethods
if hasattr(value, "ENTRY_POINT_ORIG_LABEL") and hasattr(value, "config"):
obj[key] = {"plugin": value.ENTRY_POINT_ORIG_LABEL}
export_value(obj[key], "config", value.config)
elif inspect.isclass(value):
obj[key] = value.__qualname__
elif isinstance(value, (pathlib.Path, uuid.UUID)):
obj[key] = str(value)
elif hasattr(value, "export"):
obj[key] = value.export()
elif hasattr(value, "_asdict"):
obj[key] = value._asdict()
elif getattr(type(value), "__module__", None) == "numpy" and isinstance(
getattr(value, "flatten", None), collections.Callable
):
obj[key] = tuple(value.flatten())
elif dataclasses.is_dataclass(value):
obj[key] = export_dict(**dataclasses.asdict(value))
elif getattr(value, "__module__", None) == "typing":
obj[key] = STANDARD_TYPES.get(
str(value).replace("typing.", ""), "generic"
)
示例3: gen_variants
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def gen_variants(self, obj: object, gen, path):
if is_dataclass(obj):
fields = list(obj.__dataclass_fields__.keys())
for field in fields:
x = getattr(obj, field)
for variants, p in self(x, gen, path + (field,)):
new_variants = []
for variant in variants:
d = {
f2: (variant if f2 == field else getattr(obj, f2))
for f2 in fields
}
new_variants.append(type(obj)(**d))
yield (new_variants, p)
else:
yield (gen(obj), path)
示例4: _contains_non_default_init_vars
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def _contains_non_default_init_vars(cls, previous_classes=None):
"""Check whether this dataclass contains non-default init-only vars.
Performs a recursive check through all fields that are declared as
dataclasses to ensure that no nested dataclasses contain init-only
variables. The ``previous_classes`` argument is a set of previously
checked classes to prevent infinite recursion on recursive structures.
:param previous_classes: The set of previously checked classes.
"""
try:
previous_classes.add(cls)
except AttributeError: # NoneType
previous_classes = {cls}
# The identify check (.. is MISSING) is fine, MISSING is a singleton
has_init_vars = any(field.type == InitVar and field.default is MISSING
for field in cls.__dataclass_fields__.values())
children_have_init_vars = any(
child.type._contains_non_default_init_vars(previous_classes)
for child in fields(cls)
if (is_dataclass(child.type)
and child.type not in previous_classes))
return has_init_vars or children_have_init_vars
示例5: _serialize
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def _serialize(self, value, attr, obj, **kwargs):
if self.allow_none and value is None:
return None
for type_, schema_ in self.desc.items():
if _issubclass_safe(type(value), type_):
if is_dataclass(value):
res = schema_._serialize(value, attr, obj, **kwargs)
res['__type'] = str(type_.__name__)
return res
break
elif isinstance(value, _get_type_origin(type_)):
return schema_._serialize(value, attr, obj, **kwargs)
else:
warnings.warn(
f'The type "{type(value).__name__}" (value: "{value}") '
f'is not in the list of possible types of typing.Union '
f'(dataclass: {self.cls.__name__}, field: {self.field.name}). '
f'Value cannot be serialized properly.')
return super()._serialize(value, attr, obj, **kwargs)
示例6: _deserialize
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def _deserialize(self, value, attr, data, **kwargs):
tmp_value = deepcopy(value)
if isinstance(tmp_value, dict) and '__type' in tmp_value:
dc_name = tmp_value['__type']
for type_, schema_ in self.desc.items():
if is_dataclass(type_) and type_.__name__ == dc_name:
del tmp_value['__type']
return schema_._deserialize(tmp_value, attr, data, **kwargs)
for type_, schema_ in self.desc.items():
if isinstance(tmp_value, _get_type_origin(type_)):
return schema_._deserialize(tmp_value, attr, data, **kwargs)
else:
warnings.warn(
f'The type "{type(tmp_value).__name__}" (value: "{tmp_value}") '
f'is not in the list of possible types of typing.Union '
f'(dataclass: {self.cls.__name__}, field: {self.field.name}). '
f'Value cannot be deserialized properly.')
return super()._deserialize(tmp_value, attr, data, **kwargs)
示例7: pydantic_encoder
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def pydantic_encoder(obj: Any) -> Any:
from dataclasses import asdict, is_dataclass
from .main import BaseModel
if isinstance(obj, BaseModel):
return obj.dict()
elif is_dataclass(obj):
return asdict(obj)
# Check the class type and its superclasses for a matching encoder
for base in obj.__class__.__mro__[:-1]:
try:
encoder = ENCODERS_BY_TYPE[base]
except KeyError:
continue
return encoder(obj)
else: # We have exited the for loop without finding a suitable encoder
raise TypeError(f"Object of type '{obj.__class__.__name__}' is not JSON serializable")
示例8: validate
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def validate(self, *, prefix=''):
assert is_dataclass(self), f"You forgot to annotate {type(self)} with @dataclass"
for f in fields(self):
fieldval = getattr(self, f.name)
check_type(prefix + f.name, fieldval, f.type)
if isinstance(fieldval, HParams):
fieldval.validate(prefix=prefix + f.name + '.')
示例9: is_hparam_type
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def is_hparam_type(ty):
if isinstance(ty, type) and issubclass(ty, HParams):
assert is_dataclass(ty)
return True
else:
return False
示例10: force_type
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def force_type(cls, rv, environ=None):
if isinstance(rv, dict) or isinstance(rv, list) or is_dataclass(rv):
reformat = coerce_response(rv)
rv = jsonify(reformat)
return super(SeleneResponse, cls).force_type(rv, environ)
示例11: default
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def default(self, obj):
if dataclasses.is_dataclass(obj) and hasattr(obj, "json"):
return obj.json()
return super().default(obj)
示例12: __init_subclass__
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def __init_subclass__(
cls,
discriminator: Optional[Union[str, bool]] = None,
allow_additional_props: bool = True,
serialise_properties: Union[Tuple[str, ...], bool] = False,
):
# Initialise caches
cls.__schema = {}
cls.__compiled_schema = {}
cls.__definitions = {}
cls.__encode_cache = {}
cls.__decode_cache = {}
cls.__mapped_fields = []
cls.__discriminator_inherited = False
cls.__serialise_properties = serialise_properties
if discriminator is not None:
cls.__discriminator_name = discriminator if isinstance(discriminator, str) else f"{cls.__name__}Type"
else:
dataclass_bases = [
klass for klass in cls.__bases__ if is_dataclass(klass) and issubclass(klass, JsonSchemaMixin)
]
if len(dataclass_bases) > 0:
if not allow_additional_props:
raise TypeError("Dataclass inheritance and additional_props_false=False not currently supported")
base_discriminators = [
base._discriminator() for base in dataclass_bases if base._discriminator() is not None
]
if len(base_discriminators):
if len(base_discriminators) > 1:
raise TypeError("Multiple bases with discriminators is unsupported")
cls.__discriminator_name = base_discriminators[0]
cls.__discriminator_inherited = True
else:
cls.__discriminator_name = None
cls.__allow_additional_props = allow_additional_props
示例13: _get_fields
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def _get_fields(cls, base_fields=True) -> List[JsonSchemaField]:
def _get_fields_uncached():
dataclass_bases = [
klass for klass in cls.__bases__ if is_dataclass(klass) and issubclass(klass, JsonSchemaMixin)
]
base_fields_types = set()
for base in dataclass_bases:
base_fields_types |= {(f.name, f.type) for f in fields(base)}
mapped_fields = []
type_hints = get_type_hints(cls)
for f in fields(cls):
# Skip internal fields
if f.name.startswith("__") or (not base_fields and (f.name, f.type) in base_fields_types):
continue
# Note fields() doesn't resolve forward refs
f.type = type_hints[f.name]
mapped_fields.append(JsonSchemaField(f, cls.field_mapping().get(f.name, f.name)))
if cls.__serialise_properties:
include_properties = None
if isinstance(cls.__serialise_properties, tuple):
include_properties = set(cls.__serialise_properties)
members = inspect.getmembers(cls, inspect.isdatadescriptor)
for name, member in members:
if name != "__weakref__" and (include_properties is None or name in include_properties):
f = Field(MISSING, None, None, None, None, None, None)
f.name = name
f.type = member.fget.__annotations__['return']
mapped_fields.append(JsonSchemaField(f, name, is_property=True))
return mapped_fields
if not base_fields:
return _get_fields_uncached()
if not cls.__mapped_fields:
cls.__mapped_fields = _get_fields_uncached()
return cls.__mapped_fields # type: ignore
示例14: all_json_schemas
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def all_json_schemas(
cls: Type[T], schema_type: SchemaType = DEFAULT_SCHEMA_TYPE, validate_enums: bool = True
) -> JsonDict:
"""Returns JSON schemas for all subclasses"""
definitions = {}
for subclass in cls.__subclasses__():
if is_dataclass(subclass):
definitions.update(
subclass.json_schema(embeddable=True, schema_type=schema_type, validate_enums=validate_enums)
)
else:
definitions.update(subclass.all_json_schemas(schema_type=schema_type, validate_enums=validate_enums))
return definitions
示例15: export_list
# 需要导入模块: import dataclasses [as 别名]
# 或者: from dataclasses import is_dataclass [as 别名]
def export_list(iterable):
for i, value in enumerate(iterable):
export_value(iterable, i, value)
if isinstance(value, (dict, types.MappingProxyType)):
iterable[i] = export_dict(**iterable[i])
elif dataclasses.is_dataclass(value):
iterable[i] = export_dict(**dataclasses.asdict(value))
elif isinstance(value, list):
iterable[i] = export_list(iterable[i])
return iterable