当前位置: 首页>>代码示例>>Python>>正文


Python cloudstorage.open方法代码示例

本文整理汇总了Python中cloudstorage.open方法的典型用法代码示例。如果您正苦于以下问题:Python cloudstorage.open方法的具体用法?Python cloudstorage.open怎么用?Python cloudstorage.open使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cloudstorage的用法示例。


在下文中一共展示了cloudstorage.open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: flush

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def flush(self):
    """Flush pool contents."""
    start_time = time.time()
    for filename, data in self._append_buffer.iteritems():
      with files.open(filename, "a") as f:
        if len(data) > _FILES_API_MAX_SIZE:
          raise errors.Error("Bad data of length: %s" % len(data))
        if self._ctx:
          operation.counters.Increment(
              COUNTER_IO_WRITE_BYTES, len(data))(self._ctx)
        f.write(data)
    if self._ctx:
      operation.counters.Increment(
          COUNTER_IO_WRITE_MSEC,
          int((time.time() - start_time) * 1000))(self._ctx)
    self._append_buffer = {}
    self._size = 0 
开发者ID:elsigh,项目名称:browserscope,代码行数:19,代码来源:output_writers.py

示例2: _split_input_from_namespace

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def _split_input_from_namespace(cls,
                                  app,
                                  namespace,
                                  entity_kind_name,
                                  shard_count):
    key_ranges = super(ConsistentKeyReader, cls)._split_input_from_namespace(
        app, namespace, entity_kind_name, shard_count)
    assert len(key_ranges) == shard_count

    # The KeyRanges calculated by the base class may not include keys for
    # entities that have unapplied jobs. So use an open key range for the first
    # and last KeyRanges to ensure that they will be processed.
    try:
      last_key_range_index = key_ranges.index(None) - 1
    except ValueError:
      last_key_range_index = shard_count - 1

    if last_key_range_index != -1:
      key_ranges[0].key_start = None
      key_ranges[0].include_start = False
      key_ranges[last_key_range_index].key_end = None
      key_ranges[last_key_range_index].include_end = False
    return key_ranges 
开发者ID:elsigh,项目名称:browserscope,代码行数:25,代码来源:input_readers.py

示例3: next

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def next(self):
    """Returns the next input from this input reader, a block of bytes.

    Returns:
      The next input from this input reader in the form of a cloudstorage
      ReadBuffer that supports a File-like interface (read, readline, seek,
      tell, and close). An error may be raised if the file can not be opened.

    Raises:
      StopIteration: The list of files has been exhausted.
    """
    # A generator or for-loop is not used to ensure we can easily serialize
    # the state of the iteration
    if self._index >= len(self._filenames):
      raise StopIteration()
    else:
      options = {}
      if self._buffer_size:
        options["read_buffer_size"] = self._buffer_size
      if self._account_id:
        options["_account_id"] = self._account_id
      handle = cloudstorage.open(self._filenames[self._index], **options)
      self._index += 1
      return handle 
开发者ID:elsigh,项目名称:browserscope,代码行数:26,代码来源:input_readers.py

示例4: _create

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def _create(cls, writer_spec, filename_suffix):
    """Helper method that actually creates the file in cloud storage."""
    # GoogleCloudStorage format for filenames, Initial slash is required
    filename = "/%s/%s" % (writer_spec[cls.BUCKET_NAME_PARAM], filename_suffix)

    content_type = writer_spec.get(cls.CONTENT_TYPE_PARAM, None)

    options = {}
    if cls.ACL_PARAM in writer_spec:
      options["x-goog-acl"] = writer_spec.get(cls.ACL_PARAM)

    account_id = writer_spec.get(cls._ACCOUNT_ID_PARAM, None)

    writer = cloudstorage.open(filename, mode="w",
                               content_type=content_type,
                               options=options,
                               _account_id=account_id)

    return cls(writer, writer_spec=writer_spec) 
开发者ID:singhj,项目名称:locality-sensitive-hashing,代码行数:21,代码来源:output_writers.py

示例5: testAlertSummaryObjectChangeNotification

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def testAlertSummaryObjectChangeNotification(self):
    data_dir = 'test/data/notifications'
    file_name = 'google-platform-demo-2014-02-04.json'
    project_date = main.MatchProjectDate(file_name)

    compute_engine_alert = main.Alert(parent=main.Alert.entity_group)
    compute_engine_alert.name = 'Test Compute Engine Alert Alert'
    compute_engine_alert.range = main.AlertRange.ONE_DAY
    compute_engine_alert.target_value = 'Total'
    compute_engine_alert.trigger = main.AlertTrigger.TOTAL_CHANGE
    compute_engine_alert.trigger_value = 10.00
    compute_engine_alert.project = project_date[0]
    compute_engine_alert.put()
    subscription = main.Subscription.getInstance(project_date[0])
    subscription.daily_summary = False
    local_notification = open(os.sep.join([data_dir, file_name])).read()
    notification_dict = json.loads(local_notification)
    project_date = main.MatchProjectDate(file_name)
    subscription = main.Subscription.getInstance(project_date[0])
    subscription.daily_summary = True
    response = self.testapp.post_json('/objectChangeNotification',
                                      notification_dict)
    logging.debug(repr(response))
    self.assertEqual(response.status_int, 200) 
开发者ID:googlearchive,项目名称:billing-export-python,代码行数:26,代码来源:test.py

示例6: upload

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def upload(self, attachment, content):
        ''' Uploads a file to the default cloud storage bucket

        Args:
        attachment - The models.Attachment metadata for the file
        content - The file content
        '''
        filename = '/{}/{}'.format(self.bucket_name,
                                   attachment.stored_filename)
        write_retry_params = gcs.RetryParams(backoff_factor=1.1)
        gcs_file = gcs.open(
            filename,
            'w',
            content_type=attachment.mime_type,
            retry_params=write_retry_params)
        gcs_file.write(content)
        gcs_file.close() 
开发者ID:duo-labs,项目名称:isthislegit,代码行数:19,代码来源:storage.py

示例7: get

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def get(self):
        # Get the default Cloud Storage Bucket name and create a file name for
        # the object in Cloud Storage.
        bucket = app_identity.get_default_gcs_bucket_name()

        # Cloud Storage file names are in the format /bucket/object.
        filename = '/{}/blobstore_serving_demo'.format(bucket)

        # Create a file in Google Cloud Storage and write something to it.
        with cloudstorage.open(filename, 'w') as filehandle:
            filehandle.write('abcde\n')

        # In order to read the contents of the file using the Blobstore API,
        # you must create a blob_key from the Cloud Storage file name.
        # Blobstore expects the filename to be in the format of:
        # /gs/bucket/object
        blobstore_filename = '/gs{}'.format(filename)
        blob_key = blobstore.create_gs_key(blobstore_filename)

        # BlobstoreDownloadHandler serves the file from Google Cloud Storage to
        # your computer using blob_key.
        self.send_blob(blob_key) 
开发者ID:GoogleCloudPlatform,项目名称:python-docs-samples,代码行数:24,代码来源:main.py

示例8: create_file

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def create_file(self, filename):
        """Create a file."""

        self.response.write('Creating file {}\n'.format(filename))

        # The retry_params specified in the open call will override the default
        # retry params for this particular file handle.
        write_retry_params = cloudstorage.RetryParams(backoff_factor=1.1)
        with cloudstorage.open(
            filename, 'w', content_type='text/plain', options={
                'x-goog-meta-foo': 'foo', 'x-goog-meta-bar': 'bar'},
                retry_params=write_retry_params) as cloudstorage_file:
            cloudstorage_file.write('abcde\n')
            cloudstorage_file.write('f'*1024*4 + '\n')
        self.tmp_filenames_to_clean_up.append(filename)
# [END write]

# [START read] 
开发者ID:GoogleCloudPlatform,项目名称:python-docs-samples,代码行数:20,代码来源:main.py

示例9: process_export

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def process_export():
    bucket = int(flask.request.values['bucket'])
    filename = '/ilps-search-log.appspot.com/search_log.%d.gz' % bucket
    with gcs.open(filename, 'w' , 'text/plain', {'content-encoding': 'gzip'}) as f:
        bucket_size = int(flask.request.values['bucket_size'])
        offset = bucket * bucket_size
        with gzip.GzipFile('', fileobj=f, mode='wb') as gz:
            ndb.get_context().clear_cache()
            for s in Session.query(Session.shared == True).iter(batch_size=10,
                    offset=offset, limit=bucket_size):
                ndb.get_context().clear_cache()
                gc.collect()
                s.user_id = ''
                print >>gz, json.dumps(s.to_dict(), default=util.default,
                        ensure_ascii=False).encode('utf-8')
    response = 'Written: %s' % str(blobstore.create_gs_key('/gs' + filename))
    app.logger.info(response)
    return response, 200 
开发者ID:varepsilon,项目名称:cas-eval,代码行数:20,代码来源:main.py

示例10: load_main_energy_data

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def load_main_energy_data(project_id, gs_path):
  """Load main energy data from the specified file.

  Load main energy data from the specified file.

  Args:
    project_id: string, GCP project id.
    gs_path: string, path to the data file.
  Returns:
    pd.DataFrame, main energy data.
  """
  with gcs.open(gs_path) as f:
    data = pd.read_csv(f,
                       delimiter=' ',
                       header=None,
                       names=['time',
                              'main_watts',
                              'main_va',
                              'main_RMS'])
  data.time = data.time.apply(lambda x: datetime.fromtimestamp(x))
  data.set_index('time', drop=True, inplace=True)
  data.index = data.index.floor('S')
  return data 
开发者ID:GoogleCloudPlatform,项目名称:professional-services,代码行数:25,代码来源:util.py

示例11: _open_file

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def _open_file(cls, writer_spec, filename_suffix, use_tmp_bucket=False):
    """Opens a new gcs file for writing."""
    if use_tmp_bucket:
      bucket = cls._get_tmp_gcs_bucket(writer_spec)
      account_id = cls._get_tmp_account_id(writer_spec)
    else:
      bucket = cls._get_gcs_bucket(writer_spec)
      account_id = cls._get_account_id(writer_spec)


    filename = "/%s/%s" % (bucket, filename_suffix)

    content_type = writer_spec.get(cls.CONTENT_TYPE_PARAM, None)

    options = {}
    if cls.ACL_PARAM in writer_spec:
      options["x-goog-acl"] = writer_spec.get(cls.ACL_PARAM)

    return cloudstorage.open(filename, mode="w", content_type=content_type,
                             options=options, _account_id=account_id) 
开发者ID:GoogleCloudPlatform,项目名称:python-compat-runtime,代码行数:22,代码来源:output_writers.py

示例12: _open_file

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def _open_file(cls, writer_spec, filename_suffix, use_tmp_bucket=False):
    """Opens a new gcs file for writing."""
    if use_tmp_bucket:
      bucket = cls._get_tmp_gcs_bucket(writer_spec)
      account_id = cls._get_tmp_account_id(writer_spec)
    else:
      bucket = cls._get_gcs_bucket(writer_spec)
      account_id = cls._get_account_id(writer_spec)

    # GoogleCloudStorage format for filenames, Initial slash is required
    filename = "/%s/%s" % (bucket, filename_suffix)

    content_type = writer_spec.get(cls.CONTENT_TYPE_PARAM, None)

    options = {}
    if cls.ACL_PARAM in writer_spec:
      options["x-goog-acl"] = writer_spec.get(cls.ACL_PARAM)

    return cloudstorage.open(filename, mode="w", content_type=content_type,
                             options=options, _account_id=account_id) 
开发者ID:GoogleCloudPlatform,项目名称:appengine-mapreduce,代码行数:22,代码来源:output_writers.py

示例13: _next_seg

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def _next_seg(self):
    """Get next seg."""
    if self._seg:
      self._seg.close()
    self._seg_index += 1
    if self._seg_index > self._last_seg_index:
      self._seg = None
      return

    filename = self._seg_prefix + str(self._seg_index)
    stat = cloudstorage.stat(filename)
    writer = output_writers._GoogleCloudStorageOutputWriter
    if writer._VALID_LENGTH not in stat.metadata:
      raise ValueError(
          "Expect %s in metadata for file %s." %
          (writer._VALID_LENGTH, filename))
    self._seg_valid_length = int(stat.metadata[writer._VALID_LENGTH])
    if self._seg_valid_length > stat.st_size:
      raise ValueError(
          "Valid length %s is too big for file %s of length %s" %
          (self._seg_valid_length, filename, stat.st_size))
    self._seg = cloudstorage.open(filename) 
开发者ID:GoogleCloudPlatform,项目名称:appengine-mapreduce,代码行数:24,代码来源:gcs_file_seg_reader.py

示例14: testAppendAndFlush

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def testAppendAndFlush(self):
    self.pool.append("a")
    self.assertRaises(cloudstorage.errors.NotFoundError, cloudstorage.open,
                      self.filename)
    self.pool.append("b")
    self.assertRaises(cloudstorage.errors.NotFoundError, cloudstorage.open,
                      self.filename)
    self.pool.flush()
    self.assertRaises(cloudstorage.errors.NotFoundError, cloudstorage.open,
                      self.filename)
    # File handle does need to be explicitly closed.
    self.filehandle.close()
    self.assertEquals(32 * 1024, cloudstorage.stat(self.filename).st_size)
    self.assertEquals(
        ["a", "b"],
        list(records.RecordsReader(cloudstorage.open(self.filename)))) 
开发者ID:GoogleCloudPlatform,项目名称:appengine-mapreduce,代码行数:18,代码来源:output_writers_test.py

示例15: testAppendAndForceFlush

# 需要导入模块: import cloudstorage [as 别名]
# 或者: from cloudstorage import open [as 别名]
def testAppendAndForceFlush(self):
    self.pool.append("a")
    self.assertRaises(cloudstorage.errors.NotFoundError, cloudstorage.open,
                      self.filename)
    self.pool.append("b")
    self.assertRaises(cloudstorage.errors.NotFoundError, cloudstorage.open,
                      self.filename)
    self.pool.flush(True)
    self.assertRaises(cloudstorage.errors.NotFoundError, cloudstorage.open,
                      self.filename)
    # File handle does need to be explicitly closed.
    self.filehandle.close()
    # Check the file size contains the padding.
    self.assertEquals(256 * 1024, cloudstorage.stat(self.filename).st_size)
    self.assertEquals(
        ["a", "b"],
        list(records.RecordsReader(cloudstorage.open(self.filename)))) 
开发者ID:GoogleCloudPlatform,项目名称:appengine-mapreduce,代码行数:19,代码来源:output_writers_test.py


注:本文中的cloudstorage.open方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。