當前位置: 首頁>>代碼示例>>Python>>正文


Python collections.OrderedDict方法代碼示例

本文整理匯總了Python中collections.OrderedDict方法的典型用法代碼示例。如果您正苦於以下問題:Python collections.OrderedDict方法的具體用法?Python collections.OrderedDict怎麽用?Python collections.OrderedDict使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在collections的用法示例。


在下文中一共展示了collections.OrderedDict方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_assignment_map_from_checkpoint

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
    """Compute the union of the current variables and checkpoint variables."""
    assignment_map = {}
    initialized_variable_names = {}

    name_to_variable = collections.OrderedDict()
    for var in tvars:
        name = var.name
        m = re.match("^(.*):\\d+$", name)
        if m is not None:
            name = m.group(1)
        name_to_variable[name] = var

    init_vars = tf.train.list_variables(init_checkpoint)

    assignment_map = collections.OrderedDict()
    for x in init_vars:
        (name, var) = (x[0], x[1])
        if name not in name_to_variable:
            continue
        assignment_map[name] = name
        initialized_variable_names[name] = 1
        initialized_variable_names[name + ":0"] = 1

    return (assignment_map, initialized_variable_names) 
開發者ID:Socialbird-AILab,項目名稱:BERT-Classification-Tutorial,代碼行數:27,代碼來源:modeling.py

示例2: __init__

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def __init__(self):
        self.args = None
        self.alignDistance = 0
        self.samples = collections.OrderedDict()
        self.genome = None
        self.sources = {}
        self.annotationSets = collections.OrderedDict()

        # for storing axes, annotations, etc, by allele
        self.alleleTracks = collections.defaultdict(collections.OrderedDict)
        self.trackCompositor = None

        self.dotplots = {}
        self.info = {}

        self.reset() 
開發者ID:svviz,項目名稱:svviz,代碼行數:18,代碼來源:datahub.py

示例3: __init__

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def __init__(self, chromPartsCollection, pixelWidth, dividerSize=25):
        # length is in genomic coordinates, starts is in pixels
        self.dividerSize = dividerSize
        self.partsToLengths = collections.OrderedDict()
        self.partsToStartPixels = collections.OrderedDict()
        self.chromPartsCollection = chromPartsCollection

        for part in chromPartsCollection:
            self.partsToLengths[part.id] = len(part)

        self.pixelWidth = pixelWidth

        totalLength = sum(self.partsToLengths.values()) + (len(self.partsToLengths)-1)*dividerSize
        self.basesPerPixel = totalLength / float(pixelWidth)

        curStart = 0
        for regionID in self.partsToLengths:
            self.partsToStartPixels[regionID] = curStart
            curStart += (self.partsToLengths[regionID]+dividerSize) / self.basesPerPixel 
開發者ID:svviz,項目名稱:svviz,代碼行數:21,代碼來源:track.py

示例4: compute_f1

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def compute_f1(answer_stats, prefix=''):
  """Computes F1, precision, recall for a list of answer scores.

  Args:
    answer_stats: List of per-example scores.
    prefix (''): Prefix to prepend to score dictionary.

  Returns:
    Dictionary mapping string names to scores.
  """

  has_gold, has_pred, is_correct, _ = list(zip(*answer_stats))
  precision = safe_divide(sum(is_correct), sum(has_pred))
  recall = safe_divide(sum(is_correct), sum(has_gold))
  f1 = safe_divide(2 * precision * recall, precision + recall)

  return OrderedDict({
      prefix + 'n': len(answer_stats),
      prefix + 'f1': f1,
      prefix + 'precision': precision,
      prefix + 'recall': recall
  }) 
開發者ID:google-research-datasets,項目名稱:natural-questions,代碼行數:24,代碼來源:nq_eval.py

示例5: get_metrics_with_answer_stats

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def get_metrics_with_answer_stats(long_answer_stats, short_answer_stats):
  """Generate metrics dict using long and short answer stats."""

  def _get_metric_dict(answer_stats, prefix=''):
    """Compute all metrics for a set of answer statistics."""
    opt_result, pr_table = compute_pr_curves(
        answer_stats, targets=[0.5, 0.75, 0.9])
    f1, precision, recall, threshold = opt_result
    metrics = OrderedDict({
        'best-threshold-f1': f1,
        'best-threshold-precision': precision,
        'best-threshold-recall': recall,
        'best-threshold': threshold,
    })
    for target, recall, precision, _ in pr_table:
      metrics['recall-at-precision>={:.2}'.format(target)] = recall
      metrics['precision-at-precision>={:.2}'.format(target)] = precision

    # Add prefix before returning.
    return dict([(prefix + k, v) for k, v in six.iteritems(metrics)])

  metrics = _get_metric_dict(long_answer_stats, 'long-')
  metrics.update(_get_metric_dict(short_answer_stats, 'short-'))
  return metrics 
開發者ID:google-research-datasets,項目名稱:natural-questions,代碼行數:26,代碼來源:nq_eval.py

示例6: _init_fields

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def _init_fields(self):
        self.name               = None          # User-specified name, defaults to build func name if None.
        self.scope              = None          # Unique TF graph scope, derived from the user-specified name.
        self.static_kwargs      = dict()        # Arguments passed to the user-supplied build func.
        self.num_inputs         = 0             # Number of input tensors.
        self.num_outputs        = 0             # Number of output tensors.
        self.input_shapes       = [[]]          # Input tensor shapes (NC or NCHW), including minibatch dimension.
        self.output_shapes      = [[]]          # Output tensor shapes (NC or NCHW), including minibatch dimension.
        self.input_shape        = []            # Short-hand for input_shapes[0].
        self.output_shape       = []            # Short-hand for output_shapes[0].
        self.input_templates    = []            # Input placeholders in the template graph.
        self.output_templates   = []            # Output tensors in the template graph.
        self.input_names        = []            # Name string for each input.
        self.output_names       = []            # Name string for each output.
        self.vars               = OrderedDict() # All variables (localname => var).
        self.trainables         = OrderedDict() # Trainable variables (localname => var).
        self._build_func        = None          # User-supplied build function that constructs the network.
        self._build_func_name   = None          # Name of the build function.
        self._build_module_src  = None          # Full source code of the module containing the build function.
        self._run_cache         = dict()        # Cached graph data for Network.run(). 
開發者ID:zalandoresearch,項目名稱:disentangling_conditional_gans,代碼行數:22,代碼來源:tfutil.py

示例7: _allreduce_coalesced

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
    if bucket_size_mb > 0:
        bucket_size_bytes = bucket_size_mb * 1024 * 1024
        buckets = _take_tensors(tensors, bucket_size_bytes)
    else:
        buckets = OrderedDict()
        for tensor in tensors:
            tp = tensor.type()
            if tp not in buckets:
                buckets[tp] = []
            buckets[tp].append(tensor)
        buckets = buckets.values()

    for bucket in buckets:
        flat_tensors = _flatten_dense_tensors(bucket)
        dist.all_reduce(flat_tensors)
        flat_tensors.div_(world_size)
        for tensor, synced in zip(
                bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
            tensor.copy_(synced) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:22,代碼來源:dist_utils.py

示例8: test_max_pool_2d

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def test_max_pool_2d():
    test_cases = OrderedDict([('in_w', [10, 20]), ('in_h', [10, 20]),
                              ('in_channel', [1, 3]), ('out_channel', [1, 3]),
                              ('kernel_size', [3, 5]), ('stride', [1, 2]),
                              ('padding', [0, 1]), ('dilation', [1, 2])])

    for in_h, in_w, in_cha, out_cha, k, s, p, d in product(
            *list(test_cases.values())):
        # wrapper op with 0-dim input
        x_empty = torch.randn(0, in_cha, in_h, in_w, requires_grad=True)
        wrapper = MaxPool2d(k, stride=s, padding=p, dilation=d)
        wrapper_out = wrapper(x_empty)

        # torch op with 3-dim input as shape reference
        x_normal = torch.randn(3, in_cha, in_h, in_w)
        ref = nn.MaxPool2d(k, stride=s, padding=p, dilation=d)
        ref_out = ref(x_normal)

        assert wrapper_out.shape[0] == 0
        assert wrapper_out.shape[1:] == ref_out.shape[1:]

        assert torch.equal(wrapper(x_normal), ref_out) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:24,代碼來源:test_wrappers.py

示例9: convert

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def convert(src, dst):
    """Convert keys in pycls pretrained RegNet models to mmdet style."""
    # load caffe model
    regnet_model = torch.load(src)
    blobs = regnet_model['model_state']
    # convert to pytorch style
    state_dict = OrderedDict()
    converted_names = set()
    for key, weight in blobs.items():
        if 'stem' in key:
            convert_stem(key, weight, state_dict, converted_names)
        elif 'head' in key:
            convert_head(key, weight, state_dict, converted_names)
        elif key.startswith('s'):
            convert_reslayer(key, weight, state_dict, converted_names)

    # check if all layers are converted
    for key in blobs:
        if key not in converted_names:
            print(f'not converted: {key}')
    # save checkpoint
    checkpoint = dict()
    checkpoint['state_dict'] = state_dict
    torch.save(checkpoint, dst) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:26,代碼來源:regnet2mmdet.py

示例10: __init__

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def __init__(self, cmd_root, cli_args, folders=None, files=None):
        """Main tool runner."""
        # Run options
        self.cmd_root = cmd_root  # Folder on which the command was executed
        self.config = load_config(cmd_root, cli_args)
        self.file_manager = FileManager(folders=folders, files=files)
        self.folders = folders
        self.files = files
        self.all_results = OrderedDict()
        self.all_tools = {}
        self.test_results = None
        self.failed_checks = set()

        self.check = self.config.get_value('check')
        self.enforce = self.config.get_value('enforce')
        self.diff_mode = self.config.get_value('diff_mode')
        self.file_mode = self.config.get_value('file_mode')
        self.branch = self.config.get_value('branch')
        self.disable_formatters = cli_args.disable_formatters
        self.disable_linters = cli_args.disable_linters
        self.disable_tests = cli_args.disable_tests 
開發者ID:ContinuumIO,項目名稱:ciocheck,代碼行數:23,代碼來源:main.py

示例11: parse_coverage

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def parse_coverage(self):
        """Parse .coverage json report generated by coverage."""
        coverage_string = ("!coverage.py: This is a private format, don't "
                           "read it directly!")
        coverage_path = os.path.join(self.cmd_root, '.coverage')

        covered_lines = {}
        if os.path.isfile(coverage_path):
            with open(coverage_path, 'r') as file_obj:
                data = file_obj.read()
                data = data.replace(coverage_string, '')

            cov = json.loads(data)
            covered_lines = OrderedDict()
            lines = cov['lines']
            for path in sorted(lines):
                covered_lines[path] = lines[path]
        return covered_lines 
開發者ID:ContinuumIO,項目名稱:ciocheck,代碼行數:20,代碼來源:tools.py

示例12: fprop

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def fprop(self, x):

        output = OrderedDict()
        # first convolutional layer
        h_conv1 = tf.nn.relu(self._conv2d(x, self.W_conv1) + self.b_conv1)
        h_pool1 = self._max_pool_2x2(h_conv1)

        # second convolutional layer
        h_conv2 = tf.nn.relu(
            self._conv2d(h_pool1, self.W_conv2) + self.b_conv2)
        h_pool2 = self._max_pool_2x2(h_conv2)

        # first fully connected layer

        h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, self.W_fc1) + self.b_fc1)

        # output layer
        logits = tf.matmul(h_fc1, self.W_fc2) + self.b_fc2

        output = deterministic_dict(locals())
        del output["self"]
        output[self.O_PROBS] = tf.nn.softmax(logits=logits)

        return output 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:27,代碼來源:madry_mnist_model.py

示例13: read_cpg_profiles

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def read_cpg_profiles(filenames, log=None, *args, **kwargs):
    """Read methylation profiles.

    Input files can be gzip compressed.

    Returns
    -------
    dict
        `dict (key, value)`, where `key` is the output name and `value` the CpG
        table.
    """

    cpg_profiles = OrderedDict()
    for filename in filenames:
        if log:
            log(filename)
        #cpg_file = dat.GzipFile(filename, 'r')
        cpg_file = get_fh(filename, 'r')
        output_name = split_ext(filename)
        cpg_profile = dat.read_cpg_profile(cpg_file, sort=True, *args, **kwargs)
        cpg_profiles[output_name] = cpg_profile
        cpg_file.close()
    return cpg_profiles 
開發者ID:kipoi,項目名稱:models,代碼行數:25,代碼來源:dataloader_m.py

示例14: map_cpg_tables

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def map_cpg_tables(cpg_tables, chromo, chromo_pos):
    """Maps values from cpg_tables to `chromo_pos`.

    Positions in `cpg_tables` for `chromo`  must be a subset of `chromo_pos`.
    Inserts `dat.CPG_NAN` for uncovered positions.
    """
    chromo_pos.sort()
    mapped_tables = OrderedDict()
    for name, cpg_table in six.iteritems(cpg_tables):
        cpg_table = cpg_table.loc[cpg_table.chromo == chromo]
        cpg_table = cpg_table.sort_values('pos')
        mapped_table = map_values(cpg_table.value.values,
                                  cpg_table.pos.values,
                                  chromo_pos)
        assert len(mapped_table) == len(chromo_pos)
        mapped_tables[name] = mapped_table
    return mapped_tables 
開發者ID:kipoi,項目名稱:models,代碼行數:19,代碼來源:dataloader_m.py

示例15: __init__

# 需要導入模塊: import collections [as 別名]
# 或者: from collections import OrderedDict [as 別名]
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
        # 初始化權重
        self.params = {}
        # 用高斯分布初始化
        self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
        self.params['b1'] = np.zeros(hidden_size)
        self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
        self.params['b2'] = np.zeros(output_size)

        # 生成層
        self.layers = OrderedDict()
        self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
        self.layers['Relu1'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])

        self.lastLayer = SoftmaxWithLoss() 
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:18,代碼來源:10_two_layer_net.py


注:本文中的collections.OrderedDict方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。