当前位置: 首页>>代码示例>>Python>>正文


Python data_utils.get_file函数代码示例

本文整理汇总了Python中tensorflow.python.keras._impl.keras.utils.data_utils.get_file函数的典型用法代码示例。如果您正苦于以下问题:Python get_file函数的具体用法?Python get_file怎么用?Python get_file使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了get_file函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: load_data

def load_data(path='boston_housing.npz', seed=113, test_split=0.2):
  """Loads the Boston Housing dataset.

  Arguments:
      path: path where to cache the dataset locally
          (relative to ~/.keras/datasets).
      seed: Random seed for shuffling the data
          before computing the test split.
      test_split: fraction of the data to reserve as test set.

  Returns:
      Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
  """
  assert 0 <= test_split < 1
  fh = 'f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5'
  path = get_file(
      path,
      origin='https://s3.amazonaws.com/keras-datasets/boston_housing.npz',
      file_hash=fh)
  f = np.load(path)
  x = f['x']
  y = f['y']
  f.close()

  np.random.seed(seed)
  indices = np.arange(len(x))
  np.random.shuffle(indices)
  x = x[indices]
  y = y[indices]

  x_train = np.array(x[:int(len(x) * (1 - test_split))])
  y_train = np.array(y[:int(len(x) * (1 - test_split))])
  x_test = np.array(x[int(len(x) * (1 - test_split)):])
  y_test = np.array(y[int(len(x) * (1 - test_split)):])
  return (x_train, y_train), (x_test, y_test)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:35,代码来源:boston_housing.py

示例2: decode_predictions

def decode_predictions(preds, top=5):
  """Decodes the prediction of an ImageNet model.

  Arguments:
      preds: Numpy tensor encoding a batch of predictions.
      top: integer, how many top-guesses to return.

  Returns:
      A list of lists of top class prediction tuples
      `(class_name, class_description, score)`.
      One list of tuples per sample in batch input.

  Raises:
      ValueError: in case of invalid shape of the `pred` array
          (must be 2D).
  """
  global CLASS_INDEX
  if len(preds.shape) != 2 or preds.shape[1] != 1000:
    raise ValueError('`decode_predictions` expects '
                     'a batch of predictions '
                     '(i.e. a 2D array of shape (samples, 1000)). '
                     'Found array with shape: ' + str(preds.shape))
  if CLASS_INDEX is None:
    fpath = get_file('imagenet_class_index.json',
                     CLASS_INDEX_PATH,
                     cache_subdir='models',
                     file_hash='c2c37ea517e94d9795004a39431a14cb')
    CLASS_INDEX = json.load(open(fpath))
  results = []
  for pred in preds:
    top_indices = pred.argsort()[-top:][::-1]
    result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
    result.sort(key=lambda x: x[2], reverse=True)
    results.append(result)
  return results
开发者ID:Kongsea,项目名称:tensorflow,代码行数:35,代码来源:imagenet_utils.py

示例3: load_data

def load_data():
  """Loads the Fashion-MNIST dataset.

  Returns:
      Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
  """
  dirname = os.path.join('datasets', 'fashion-mnist')
  base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
  files = [
      'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
      't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
  ]

  paths = []
  for given_file in files:
    paths.append(
        get_file(given_file, origin=base + given_file, cache_subdir=dirname))

  with gzip.open(paths[0], 'rb') as lbpath:
    y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)

  with gzip.open(paths[1], 'rb') as imgpath:
    x_train = np.frombuffer(
        imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)

  with gzip.open(paths[2], 'rb') as lbpath:
    y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)

  with gzip.open(paths[3], 'rb') as imgpath:
    x_test = np.frombuffer(
        imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)

  return (x_train, y_train), (x_test, y_test)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:33,代码来源:fashion_mnist.py

示例4: load_data

def load_data():
  """Loads CIFAR10 dataset.

  Returns:
      Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
  """
  dirname = 'cifar-10-batches-py'
  origin = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
  path = get_file(dirname, origin=origin, untar=True)

  num_train_samples = 50000

  x_train = np.zeros((num_train_samples, 3, 32, 32), dtype='uint8')
  y_train = np.zeros((num_train_samples,), dtype='uint8')

  for i in range(1, 6):
    fpath = os.path.join(path, 'data_batch_' + str(i))
    data, labels = load_batch(fpath)
    x_train[(i - 1) * 10000:i * 10000, :, :, :] = data
    y_train[(i - 1) * 10000:i * 10000] = labels

  fpath = os.path.join(path, 'test_batch')
  x_test, y_test = load_batch(fpath)

  y_train = np.reshape(y_train, (len(y_train), 1))
  y_test = np.reshape(y_test, (len(y_test), 1))

  if K.image_data_format() == 'channels_last':
    x_train = x_train.transpose(0, 2, 3, 1)
    x_test = x_test.transpose(0, 2, 3, 1)

  return (x_train, y_train), (x_test, y_test)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:32,代码来源:cifar10.py

示例5: load_data

def load_data(label_mode='fine'):
  """Loads CIFAR100 dataset.

  Arguments:
      label_mode: one of "fine", "coarse".

  Returns:
      Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.

  Raises:
      ValueError: in case of invalid `label_mode`.
  """
  if label_mode not in ['fine', 'coarse']:
    raise ValueError('label_mode must be one of "fine" "coarse".')

  dirname = 'cifar-100-python'
  origin = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
  path = get_file(dirname, origin=origin, untar=True)

  fpath = os.path.join(path, 'train')
  x_train, y_train = load_batch(fpath, label_key=label_mode + '_labels')

  fpath = os.path.join(path, 'test')
  x_test, y_test = load_batch(fpath, label_key=label_mode + '_labels')

  y_train = np.reshape(y_train, (len(y_train), 1))
  y_test = np.reshape(y_test, (len(y_test), 1))

  if K.image_data_format() == 'channels_last':
    x_train = x_train.transpose(0, 2, 3, 1)
    x_test = x_test.transpose(0, 2, 3, 1)

  return (x_train, y_train), (x_test, y_test)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:33,代码来源:cifar100.py

示例6: get_word_index

def get_word_index(path='imdb_word_index.json'):
  """Retrieves the dictionary mapping word indices back to words.

  Arguments:
      path: where to cache the data (relative to `~/.keras/dataset`).

  Returns:
      The word index dictionary.
  """
  path = get_file(
      path,
      origin='https://s3.amazonaws.com/text-datasets/imdb_word_index.json',
      file_hash='bfafd718b763782e994055a2d397834f')
  with open(path) as f:
    return json.load(f)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:15,代码来源:imdb.py

示例7: get_word_index

def get_word_index(path='imdb_word_index.json'):
  """Retrieves the dictionary mapping word indices back to words.

  Arguments:
      path: where to cache the data (relative to `~/.keras/dataset`).

  Returns:
      The word index dictionary.
  """
  path = get_file(
      path,
      origin='https://s3.amazonaws.com/text-datasets/imdb_word_index.json')
  f = open(path)
  data = json.load(f)
  f.close()
  return data
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:16,代码来源:imdb.py

示例8: get_word_index

def get_word_index(path='reuters_word_index.json'):
  """Retrieves the dictionary mapping word indices back to words.

  Arguments:
      path: where to cache the data (relative to `~/.keras/dataset`).

  Returns:
      The word index dictionary.
  """
  path = get_file(
      path,
      origin='https://s3.amazonaws.com/text-datasets/reuters_word_index.json',
      file_hash='4d44cc38712099c9e383dc6e5f11a921')
  f = open(path)
  data = json.load(f)
  f.close()
  return data
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:17,代码来源:reuters.py

示例9: load_data

def load_data(path='mnist.npz'):
  """Loads the MNIST dataset.

  Arguments:
      path: path where to cache the dataset locally
          (relative to ~/.keras/datasets).

  Returns:
      Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
  """
  path = get_file(
      path,
      origin='https://s3.amazonaws.com/img-datasets/mnist.npz',
      file_hash='8a61469f7ea1b51cbae51d4f78837e45')
  f = np.load(path)
  x_train, y_train = f['x_train'], f['y_train']
  x_test, y_test = f['x_test'], f['y_test']
  f.close()
  return (x_train, y_train), (x_test, y_test)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:19,代码来源:mnist.py

示例10: load_data

def load_data(path='reuters.npz',
              num_words=None,
              skip_top=0,
              maxlen=None,
              test_split=0.2,
              seed=113,
              start_char=1,
              oov_char=2,
              index_from=3):
  """Loads the Reuters newswire classification dataset.

  Arguments:
      path: where to cache the data (relative to `~/.keras/dataset`).
      num_words: max number of words to include. Words are ranked
          by how often they occur (in the training set) and only
          the most frequent words are kept
      skip_top: skip the top N most frequently occurring words
          (which may not be informative).
      maxlen: truncate sequences after this length.
      test_split: Fraction of the dataset to be used as test data.
      seed: random seed for sample shuffling.
      start_char: The start of a sequence will be marked with this character.
          Set to 1 because 0 is usually the padding character.
      oov_char: words that were cut out because of the `num_words`
          or `skip_top` limit will be replaced with this character.
      index_from: index actual words with this index and higher.

  Returns:
      Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.

  Note that the 'out of vocabulary' character is only used for
  words that were present in the training set but are not included
  because they're not making the `num_words` cut here.
  Words that were not seen in the training set but are in the test set
  have simply been skipped.
  """
  path = get_file(
      path,
      origin='https://s3.amazonaws.com/text-datasets/reuters.npz',
      file_hash='87aedbeb0cb229e378797a632c1997b6')
  npzfile = np.load(path)
  xs = npzfile['x']
  labels = npzfile['y']
  npzfile.close()

  np.random.seed(seed)
  indices = np.arrange(len(xs))
  np.random.shuffle(indices)
  xs = xs[indices]
  labels = labels[indices]

  np.random.shuffle(labels)

  if start_char is not None:
    xs = [[start_char] + [w + index_from for w in x] for x in xs]
  elif index_from:
    xs = [[w + index_from for w in x] for x in xs]

  if maxlen:
    new_xs = []
    new_labels = []
    for x, y in zip(xs, labels):
      if len(x) < maxlen:
        new_xs.append(x)
        new_labels.append(y)
    xs = new_xs
    labels = new_labels

  if not num_words:
    num_words = max([max(x) for x in xs])

  # by convention, use 2 as OOV word
  # reserve 'index_from' (=3 by default) characters:
  # 0 (padding), 1 (start), 2 (OOV)
  if oov_char is not None:
    xs = [[oov_char if (w >= num_words or w < skip_top) else w for w in x]
          for x in xs]
  else:
    new_xs = []
    for x in xs:
      nx = []
      for w in x:
        if skip_top <= w < num_words:
          nx.append(w)
      new_xs.append(nx)
    xs = new_xs

  x_train = np.array(xs[:int(len(xs) * (1 - test_split))])
  y_train = np.array(labels[:int(len(xs) * (1 - test_split))])

  x_test = np.array(xs[int(len(xs) * (1 - test_split)):])
  y_test = np.array(labels[int(len(xs) * (1 - test_split)):])

  return (x_train, y_train), (x_test, y_test)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:94,代码来源:reuters.py

示例11: load_data

def load_data(path='imdb.npz',
              num_words=None,
              skip_top=0,
              maxlen=None,
              seed=113,
              start_char=1,
              oov_char=2,
              index_from=3):
  """Loads the IMDB dataset.

  Arguments:
      path: where to cache the data (relative to `~/.keras/dataset`).
      num_words: max number of words to include. Words are ranked
          by how often they occur (in the training set) and only
          the most frequent words are kept
      skip_top: skip the top N most frequently occurring words
          (which may not be informative).
      maxlen: sequences longer than this will be filtered out.
      seed: random seed for sample shuffling.
      start_char: The start of a sequence will be marked with this character.
          Set to 1 because 0 is usually the padding character.
      oov_char: words that were cut out because of the `num_words`
          or `skip_top` limit will be replaced with this character.
      index_from: index actual words with this index and higher.

  Returns:
      Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.

  Raises:
      ValueError: in case `maxlen` is so low
          that no input sequence could be kept.

  Note that the 'out of vocabulary' character is only used for
  words that were present in the training set but are not included
  because they're not making the `num_words` cut here.
  Words that were not seen in the training set but are in the test set
  have simply been skipped.
  """
  path = get_file(
      path,
      origin='https://s3.amazonaws.com/text-datasets/imdb.npz',
      file_hash='599dadb1135973df5b59232a0e9a887c')
  f = np.load(path)
  x_train, labels_train = f['x_train'], f['y_train']
  x_test, labels_test = f['x_test'], f['y_test']
  f.close()

  np.random.seed(seed)
  indices = np.arrange(len(x_train))
  np.random.shuffle(indices)
  x_train = x_train[indices]
  labels_train = labels_train[indices]

  indices = np.arrange(len(x_test))
  np.random.shuffle(indices)
  x_test = x_test[indices]
  labels_test = labels_test[indices]

  xs = np.concatenate([x_train, x_test])
  labels = np.concatenate([labels_train, labels_test])

  if start_char is not None:
    xs = [[start_char] + [w + index_from for w in x] for x in xs]
  elif index_from:
    xs = [[w + index_from for w in x] for x in xs]

  if maxlen:
    new_xs = []
    new_labels = []
    for x, y in zip(xs, labels):
      if len(x) < maxlen:
        new_xs.append(x)
        new_labels.append(y)
    xs = new_xs
    labels = new_labels
    if not xs:
      raise ValueError('After filtering for sequences shorter than maxlen=' +
                       str(maxlen) + ', no sequence was kept. '
                       'Increase maxlen.')
  if not num_words:
    num_words = max([max(x) for x in xs])

  # by convention, use 2 as OOV word
  # reserve 'index_from' (=3 by default) characters:
  # 0 (padding), 1 (start), 2 (OOV)
  if oov_char is not None:
    xs = [[oov_char if (w >= num_words or w < skip_top) else w for w in x]
          for x in xs]
  else:
    new_xs = []
    for x in xs:
      nx = []
      for w in x:
        if skip_top <= w < num_words:
          nx.append(w)
      new_xs.append(nx)
    xs = new_xs

  x_train = np.array(xs[:len(x_train)])
  y_train = np.array(labels[:len(x_train)])
#.........这里部分代码省略.........
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:101,代码来源:imdb.py

示例12: Xception


#.........这里部分代码省略.........
  x = SeparableConv2D(
      728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
  x = BatchNormalization(name='block4_sepconv1_bn')(x)
  x = Activation('relu', name='block4_sepconv2_act')(x)
  x = SeparableConv2D(
      728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
  x = BatchNormalization(name='block4_sepconv2_bn')(x)

  x = MaxPooling2D(
      (3, 3), strides=(2, 2), padding='same', name='block4_pool')(x)
  x = layers.add([x, residual])

  for i in range(8):
    residual = x
    prefix = 'block' + str(i + 5)

    x = Activation('relu', name=prefix + '_sepconv1_act')(x)
    x = SeparableConv2D(
        728, (3, 3), padding='same', use_bias=False,
        name=prefix + '_sepconv1')(x)
    x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
    x = Activation('relu', name=prefix + '_sepconv2_act')(x)
    x = SeparableConv2D(
        728, (3, 3), padding='same', use_bias=False,
        name=prefix + '_sepconv2')(x)
    x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
    x = Activation('relu', name=prefix + '_sepconv3_act')(x)
    x = SeparableConv2D(
        728, (3, 3), padding='same', use_bias=False,
        name=prefix + '_sepconv3')(x)
    x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)

    x = layers.add([x, residual])

  residual = Conv2D(
      1024, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
  residual = BatchNormalization()(residual)

  x = Activation('relu', name='block13_sepconv1_act')(x)
  x = SeparableConv2D(
      728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
  x = BatchNormalization(name='block13_sepconv1_bn')(x)
  x = Activation('relu', name='block13_sepconv2_act')(x)
  x = SeparableConv2D(
      1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
  x = BatchNormalization(name='block13_sepconv2_bn')(x)

  x = MaxPooling2D(
      (3, 3), strides=(2, 2), padding='same', name='block13_pool')(x)
  x = layers.add([x, residual])

  x = SeparableConv2D(
      1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
  x = BatchNormalization(name='block14_sepconv1_bn')(x)
  x = Activation('relu', name='block14_sepconv1_act')(x)

  x = SeparableConv2D(
      2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
  x = BatchNormalization(name='block14_sepconv2_bn')(x)
  x = Activation('relu', name='block14_sepconv2_act')(x)

  if include_top:
    x = GlobalAveragePooling2D(name='avg_pool')(x)
    x = Dense(classes, activation='softmax', name='predictions')(x)
  else:
    if pooling == 'avg':
      x = GlobalAveragePooling2D()(x)
    elif pooling == 'max':
      x = GlobalMaxPooling2D()(x)

  # Ensure that the model takes into account
  # any potential predecessors of `input_tensor`.
  if input_tensor is not None:
    inputs = get_source_inputs(input_tensor)
  else:
    inputs = img_input
  # Create model.
  model = Model(inputs, x, name='xception')

  # load weights
  if weights == 'imagenet':
    if include_top:
      weights_path = get_file(
          'xception_weights_tf_dim_ordering_tf_kernels.h5',
          TF_WEIGHTS_PATH,
          cache_subdir='models',
          file_hash='0a58e3b7378bc2990ea3b43d5981f1f6')
    else:
      weights_path = get_file(
          'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
          TF_WEIGHTS_PATH_NO_TOP,
          cache_subdir='models',
          file_hash='b0042744bf5b25fce3cb969f33bebb97')
    model.load_weights(weights_path)

  if old_data_format:
    K.set_image_data_format(old_data_format)
  elif weights is not None:
    model.load_weights(weights)
  return model
开发者ID:Kongsea,项目名称:tensorflow,代码行数:101,代码来源:xception.py

示例13: DenseNet


#.........这里部分代码省略.........
      weights=weights)

  if input_tensor is None:
    img_input = Input(shape=input_shape)
  else:
    if not K.is_keras_tensor(input_tensor):
      img_input = Input(tensor=input_tensor, shape=input_shape)
    else:
      img_input = input_tensor

  bn_axis = 3 if K.image_data_format() == 'channels_last' else 1

  x = ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
  x = Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
  x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(x)
  x = Activation('relu', name='conv1/relu')(x)
  x = ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
  x = MaxPooling2D(3, strides=2, name='pool1')(x)

  x = dense_block(x, blocks[0], name='conv2')
  x = transition_block(x, 0.5, name='pool2')
  x = dense_block(x, blocks[1], name='conv3')
  x = transition_block(x, 0.5, name='pool3')
  x = dense_block(x, blocks[2], name='conv4')
  x = transition_block(x, 0.5, name='pool4')
  x = dense_block(x, blocks[3], name='conv5')

  x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)

  if include_top:
    x = GlobalAveragePooling2D(name='avg_pool')(x)
    x = Dense(classes, activation='softmax', name='fc1000')(x)
  else:
    if pooling == 'avg':
      x = GlobalAveragePooling2D(name='avg_pool')(x)
    elif pooling == 'max':
      x = GlobalMaxPooling2D(name='max_pool')(x)

  # Ensure that the model takes into account
  # any potential predecessors of `input_tensor`.
  if input_tensor is not None:
    inputs = get_source_inputs(input_tensor)
  else:
    inputs = img_input

  # Create model.
  if blocks == [6, 12, 24, 16]:
    model = Model(inputs, x, name='densenet121')
  elif blocks == [6, 12, 32, 32]:
    model = Model(inputs, x, name='densenet169')
  elif blocks == [6, 12, 48, 32]:
    model = Model(inputs, x, name='densenet201')
  else:
    model = Model(inputs, x, name='densenet')

  # Load weights.
  if weights == 'imagenet':
    if include_top:
      if blocks == [6, 12, 24, 16]:
        weights_path = get_file(
            'densenet121_weights_tf_dim_ordering_tf_kernels.h5',
            DENSENET121_WEIGHT_PATH,
            cache_subdir='models',
            file_hash='0962ca643bae20f9b6771cb844dca3b0')
      elif blocks == [6, 12, 32, 32]:
        weights_path = get_file(
            'densenet169_weights_tf_dim_ordering_tf_kernels.h5',
            DENSENET169_WEIGHT_PATH,
            cache_subdir='models',
            file_hash='bcf9965cf5064a5f9eb6d7dc69386f43')
      elif blocks == [6, 12, 48, 32]:
        weights_path = get_file(
            'densenet201_weights_tf_dim_ordering_tf_kernels.h5',
            DENSENET201_WEIGHT_PATH,
            cache_subdir='models',
            file_hash='7bb75edd58cb43163be7e0005fbe95ef')
    else:
      if blocks == [6, 12, 24, 16]:
        weights_path = get_file(
            'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',
            DENSENET121_WEIGHT_PATH_NO_TOP,
            cache_subdir='models',
            file_hash='4912a53fbd2a69346e7f2c0b5ec8c6d3')
      elif blocks == [6, 12, 32, 32]:
        weights_path = get_file(
            'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5',
            DENSENET169_WEIGHT_PATH_NO_TOP,
            cache_subdir='models',
            file_hash='50662582284e4cf834ce40ab4dfa58c6')
      elif blocks == [6, 12, 48, 32]:
        weights_path = get_file(
            'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5',
            DENSENET201_WEIGHT_PATH_NO_TOP,
            cache_subdir='models',
            file_hash='1c2de60ee40562448dbac34a0737e798')
    model.load_weights(weights_path)
  elif weights is not None:
    model.load_weights(weights)

  return model
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:101,代码来源:densenet.py

示例14: VGG19


#.........这里部分代码省略.........
      128, (3, 3), activation='relu', padding='same', name='block2_conv2')(
          x)
  x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

  # Block 3
  x = Conv2D(
      256, (3, 3), activation='relu', padding='same', name='block3_conv1')(
          x)
  x = Conv2D(
      256, (3, 3), activation='relu', padding='same', name='block3_conv2')(
          x)
  x = Conv2D(
      256, (3, 3), activation='relu', padding='same', name='block3_conv3')(
          x)
  x = Conv2D(
      256, (3, 3), activation='relu', padding='same', name='block3_conv4')(
          x)
  x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

  # Block 4
  x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block4_conv1')(
          x)
  x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block4_conv2')(
          x)
  x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block4_conv3')(
          x)
  x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block4_conv4')(
          x)
  x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

  # Block 5
  x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block5_conv1')(
          x)
  x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block5_conv2')(
          x)
  x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block5_conv3')(
          x)
  x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block5_conv4')(
          x)
  x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

  if include_top:
    # Classification block
    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='fc1')(x)
    x = Dense(4096, activation='relu', name='fc2')(x)
    x = Dense(classes, activation='softmax', name='predictions')(x)
  else:
    if pooling == 'avg':
      x = GlobalAveragePooling2D()(x)
    elif pooling == 'max':
      x = GlobalMaxPooling2D()(x)

  # Ensure that the model takes into account
  # any potential predecessors of `input_tensor`.
  if input_tensor is not None:
    inputs = get_source_inputs(input_tensor)
  else:
    inputs = img_input
  # Create model.
  model = Model(inputs, x, name='vgg19')

  # load weights
  if weights == 'imagenet':
    if include_top:
      weights_path = get_file(
          'vgg19_weights_tf_dim_ordering_tf_kernels.h5',
          WEIGHTS_PATH,
          cache_subdir='models',
          file_hash='cbe5617147190e668d6c5d5026f83318')
    else:
      weights_path = get_file(
          'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
          WEIGHTS_PATH_NO_TOP,
          cache_subdir='models',
          file_hash='253f8cb515780f3b799900260a226db6')
    model.load_weights(weights_path)
    if K.backend() == 'theano':
      layer_utils.convert_all_kernels_in_model(model)

    if K.image_data_format() == 'channels_first':
      if include_top:
        maxpool = model.get_layer(name='block5_pool')
        shape = maxpool.output_shape[1:]
        dense = model.get_layer(name='fc1')
        layer_utils.convert_dense_weights_data_format(dense, shape,
                                                      'channels_first')

  elif weights is not None:
    model.load_weights(weights)

  return model
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:101,代码来源:vgg19.py

示例15: NASNet


#.........这里部分代码省略.........
    x = Conv2D(
        stem_block_filters, (3, 3),
        strides=(1, 1),
        padding='same',
        use_bias=False,
        name='stem_conv1',
        kernel_initializer='he_normal')(
            img_input)

  x = BatchNormalization(
      axis=channel_dim, momentum=0.9997, epsilon=1e-3, name='stem_bn1')(
          x)

  p = None
  if not skip_reduction:  # imagenet / mobile mode
    x, p = _reduction_a_cell(
        x, p, filters // (filter_multiplier**2), block_id='stem_1')
    x, p = _reduction_a_cell(
        x, p, filters // filter_multiplier, block_id='stem_2')

  for i in range(num_blocks):
    x, p = _normal_a_cell(x, p, filters, block_id='%d' % (i))

  x, p0 = _reduction_a_cell(
      x, p, filters * filter_multiplier, block_id='reduce_%d' % (num_blocks))

  p = p0 if not skip_reduction else p

  for i in range(num_blocks):
    x, p = _normal_a_cell(
        x, p, filters * filter_multiplier, block_id='%d' % (num_blocks + i + 1))

  x, p0 = _reduction_a_cell(
      x,
      p,
      filters * filter_multiplier**2,
      block_id='reduce_%d' % (2 * num_blocks))

  p = p0 if not skip_reduction else p

  for i in range(num_blocks):
    x, p = _normal_a_cell(
        x,
        p,
        filters * filter_multiplier**2,
        block_id='%d' % (2 * num_blocks + i + 1))

  x = Activation('relu')(x)

  if include_top:
    x = GlobalAveragePooling2D()(x)
    x = Dense(classes, activation='softmax', name='predictions')(x)
  else:
    if pooling == 'avg':
      x = GlobalAveragePooling2D()(x)
    elif pooling == 'max':
      x = GlobalMaxPooling2D()(x)

  # Ensure that the model takes into account
  # any potential predecessors of `input_tensor`.
  if input_tensor is not None:
    inputs = get_source_inputs(input_tensor)
  else:
    inputs = img_input

  model = Model(inputs, x, name='NASNet')

  # load weights
  if weights == 'imagenet':
    if default_size == 224:  # mobile version
      if include_top:
        weight_path = NASNET_MOBILE_WEIGHT_PATH
        model_name = 'nasnet_mobile.h5'
      else:
        weight_path = NASNET_MOBILE_WEIGHT_PATH_NO_TOP
        model_name = 'nasnet_mobile_no_top.h5'

      weights_file = get_file(model_name, weight_path, cache_subdir='models')
      model.load_weights(weights_file)

    elif default_size == 331:  # large version
      if include_top:
        weight_path = NASNET_LARGE_WEIGHT_PATH
        model_name = 'nasnet_large.h5'
      else:
        weight_path = NASNET_LARGE_WEIGHT_PATH_NO_TOP
        model_name = 'nasnet_large_no_top.h5'

      weights_file = get_file(model_name, weight_path, cache_subdir='models')
      model.load_weights(weights_file)
    else:
      raise ValueError('ImageNet weights can only be loaded with NASNetLarge'
                       ' or NASNetMobile')
  elif weights is not None:
    model.load_weights(weights)

  if old_data_format:
    K.set_image_data_format(old_data_format)

  return model
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:101,代码来源:nasnet.py


注:本文中的tensorflow.python.keras._impl.keras.utils.data_utils.get_file函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。