当前位置: 首页>>代码示例>>Python>>正文


Python LOGGER.info方法代码示例

本文整理汇总了Python中common.LOGGER.info方法的典型用法代码示例。如果您正苦于以下问题:Python LOGGER.info方法的具体用法?Python LOGGER.info怎么用?Python LOGGER.info使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在common.LOGGER的用法示例。


在下文中一共展示了LOGGER.info方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __call__

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
    def __call__(self):
        """
        Actually run the job
        """
        # Get the name of the volume
        ec2_helper = EC2Helper()
        iops = None
        if self._instance_details.iops_support:
            iops = 500

        zone = ec2_helper.get_cheapest_spot_price(self._instance_type, self._spot_price)
        if zone is not None:
            volume, snapshot_name = ec2_helper.create_volume(self._snapshot_id, zone, iops=iops)
            LOGGER.info('obs_id: {0}, volume_name: {1}'.format(self._obs_id, snapshot_name))
            user_data_mime = self.get_mime_encoded_user_data(volume.id)

            if self._spot_price is not None:
                ec2_helper.run_spot_instance(
                    self._ami_id,
                    self._spot_price,
                    user_data_mime,
                    self._instance_type,
                    volume.id,
                    self._created_by,
                    '{1}-{2}-{0}'.format(self._name, snapshot_name, self._counter),
                    self._instance_details,
                    zone,
                    ephemeral=True)
        else:
            LOGGER.error('Cannot get a spot instance of {0} for ${1}'.format(self._instance_type, self._spot_price))
开发者ID:ICRAR,项目名称:aws-chiles02,代码行数:32,代码来源:run_cvel.py

示例2: _upload

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
 def _upload(retries_left=amount_of_retries):
     try:
         LOGGER.info(
             'Start uploading part: #{0}, source_path: {1}'.format(
                 part_num,
                 source_path
             )
         )
         conn = get_s3_connection(aws_access_key_id, aws_secret_access_key)
         bucket = conn.get_bucket(bucket_name)
         for mp in bucket.get_all_multipart_uploads():
             if mp.id == multipart_id:
                 with FileChunkIO(source_path, 'r', offset=offset, bytes=bytes_to_copy) as fp:
                     mp.upload_part_from_file(fp=fp, part_num=part_num)
                 break
     except Exception, exc:
         if retries_left:
             _upload(retries_left=retries_left - 1)
         else:
             LOGGER.info(
                 'Failed uploading part: #{0}, source_path: {1}'.format(
                     part_num,
                     source_path
                 )
             )
             raise exc
开发者ID:ICRAR,项目名称:aws-chiles02,代码行数:28,代码来源:s3_helper.py

示例3: copy_files

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
def copy_files(frequency_id, processes, days):
    s3_helper = S3Helper()
    bucket = s3_helper.get_bucket(CHILES_BUCKET_NAME)
    LOGGER.info('Scanning bucket: {0}, frequency_id: {1}'.format(bucket, frequency_id))

    # Create the queue
    queue = multiprocessing.JoinableQueue()

    # Start the consumers
    for x in range(processes):
        consumer = Consumer(queue)
        consumer.start()

    for key in bucket.list(prefix='CVEL/{0}'.format(frequency_id)):
        LOGGER.info('Checking {0}'.format(key.key))
        # Ignore the key
        if key.key.endswith('/data.tar.gz') or key.key.endswith('/data.tar'):
            elements = key.key.split('/')
            if elements[2] in days:
                directory = '/mnt/output/Chiles/split_vis/{0}/'.format(elements[2])

                # Queue the copy of the file
                temp_file = os.path.join(directory, 'data.tar.gz' if key.key.endswith('/data.tar.gz') else 'data.tar')
                queue.put(Task(key, temp_file, directory, frequency_id))

    # Add a poison pill to shut things down
    for x in range(processes):
        queue.put(None)

    # Wait for the queue to terminate
    queue.join()
开发者ID:ICRAR,项目名称:chiles_pipeline,代码行数:33,代码来源:copy_clean_input_standalone.py

示例4: copy_files

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
def copy_files(args):
    # Create the queue
    queue = multiprocessing.JoinableQueue()
    # Start the consumers
    for x in range(PROCESSES):
        consumer = Consumer(queue)
        consumer.start()

    # Look in the output directory
    for root, dir_names, filenames in os.walk(args.product_dir):
        LOGGER.debug('root: {0}, dir_names: {1}, filenames: {2}'.format(root, dir_names, filenames))
        for match in fnmatch.filter(dir_names, '13B-266*calibrated_deepfield.ms'):
            result_dir = join(root, match)
            LOGGER.info('Queuing result_dir: {0}'.format(result_dir))

            queue.put(
                CopyTask(
                    args.bucket,
                    match,
                    result_dir,
                    args.aws_access_key_id,
                    args.aws_secret_access_key
                )
            )

    # Add a poison pill to shut things down
    for x in range(PROCESSES):
        queue.put(None)

    # Wait for the queue to terminate
    queue.join()
开发者ID:ICRAR,项目名称:aws-chiles02,代码行数:33,代码来源:copy_final_products_to_s3.py

示例5: copy_files

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
def copy_files(processes, bottom_frequency, frequency_range):
    # Create the directory
    if not exists(DIRECTORY):
        os.makedirs(DIRECTORY)

    # Scan the bucket
    s3_helper = S3Helper()
    bucket = s3_helper.get_bucket(CHILES_BUCKET_NAME)
    LOGGER.info('Scanning bucket: {0}/CLEAN'.format(bucket))

    # Create the queue
    queue = multiprocessing.JoinableQueue()

    # Start the consumers
    for x in range(processes):
        consumer = Consumer(queue)
        consumer.start()

    for key in bucket.list(prefix='CLEAN/'):
        LOGGER.info('Checking {0}'.format(key.key))
        # Ignore the key
        if key.key.endswith('.image.tar.gz') or key.key.endswith('.image.tar'):
            # Do we need this file?
            basename_key = basename(key.key)
            if in_frequency_range(basename_key, bottom_frequency, frequency_range):
                # Queue the copy of the file
                temp_file = os.path.join(DIRECTORY, basename_key)
                queue.put(Task(key, temp_file, DIRECTORY))

    # Add a poison pill to shut things down
    for x in range(processes):
        queue.put(None)

    # Wait for the queue to terminate
    queue.join()
开发者ID:ICRAR,项目名称:aws-chiles02,代码行数:37,代码来源:copy_makecube_input.py

示例6: add_file_to_bucket_multipart

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
    def add_file_to_bucket_multipart(self, bucket_name, key_name, source_path, parallel_processes=2, reduced_redundancy=True):
        """
        Parallel multipart upload.
        """
        LOGGER.info('bucket_name: {0}, key_name: {1}, filename: {2}, parallel_processes: {3}, reduced_redundancy: {4}'.format(
            bucket_name, key_name, source_path, parallel_processes, reduced_redundancy))

        source_size = os.stat(source_path).st_size
        bytes_per_chunk = 10 * 1024 * 1024
        chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))
        if chunk_amount < 10000:
            bucket = self.get_bucket(bucket_name)

            headers = {'Content-Type': mimetypes.guess_type(key_name)[0] or 'application/octet-stream'}
            mp = bucket.initiate_multipart_upload(key_name, headers=headers, reduced_redundancy=reduced_redundancy)

            LOGGER.info('bytes_per_chunk: {0}, chunk_amount: {1}'.format(bytes_per_chunk, chunk_amount))

            # You can only upload 10,000 chunks
            pool = Pool(processes=parallel_processes)
            for i in range(chunk_amount):
                offset = i * bytes_per_chunk
                remaining_bytes = source_size - offset
                bytes_to_copy = min([bytes_per_chunk, remaining_bytes])
                part_num = i + 1
                pool.apply_async(upload_part, [self._aws_access_key_id, self._aws_secret_access_key, bucket_name, mp.id, part_num, source_path, offset, bytes_to_copy])
            pool.close()
            pool.join()

            if len(mp.get_all_parts()) == chunk_amount:
                mp.complete_upload()
            else:
                mp.cancel_upload()
        else:
            raise S3UploadException('Too many chunks')
开发者ID:ICRAR,项目名称:chiles_pipeline,代码行数:37,代码来源:s3_helper.py

示例7: copy_files

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
def copy_files(frequency_id):
    s3_helper = S3Helper()
    # Look in the output directory
    LOGGER.info('directory_data: {0}'.format(CHILES_CLEAN_OUTPUT))
    for dir_name in os.listdir(CHILES_CLEAN_OUTPUT):
        LOGGER.info('dir_name: {0}'.format(dir_name))
        result_dir = join(CHILES_CLEAN_OUTPUT, dir_name)
        if isdir(result_dir) and dir_name.startswith('cube_') and dir_name.endswith('.image'):
            LOGGER.info('dir_name: {0}'.format(dir_name))
            output_tar_filename = join(CHILES_CLEAN_OUTPUT, dir_name + '.tar')

            if can_be_multipart_tar(result_dir):
                LOGGER.info('Using add_tar_to_bucket_multipart')
                s3_helper.add_tar_to_bucket_multipart(
                    CHILES_BUCKET_NAME,
                    '/CLEAN/{0}/{1}'.format(frequency_id, basename(output_tar_filename)),
                    result_dir)
            else:
                LOGGER.info('Using make_tarfile, then adding file to bucket')
                make_tarfile(output_tar_filename, result_dir)

                s3_helper.add_file_to_bucket(
                    CHILES_BUCKET_NAME,
                    'CVEL/{0}/{1}/data.tar'.format(frequency_id, basename(output_tar_filename)),
                    output_tar_filename)

                # Clean up
                os.remove(output_tar_filename)
开发者ID:ICRAR,项目名称:chiles_pipeline,代码行数:30,代码来源:copy_clean_output.py

示例8: __call__

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
 def __call__(self):
     # noinspection PyBroadException
     try:
         LOGGER.info('Copying {0} to s3:{1}'.format(self._filename, self._bucket_location))
         s3_helper = S3Helper()
         s3_helper.add_file_to_bucket(
             CHILES_BUCKET_NAME,
             self._bucket_location,
             self._filename)
     except Exception:
         LOGGER.exception('CopyTask died')
开发者ID:ICRAR,项目名称:chiles_pipeline,代码行数:13,代码来源:copy_log_files.py

示例9: get_mime_encoded_user_data

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
def get_mime_encoded_user_data(volume_id, setup_disks, in_user_data, now):
    """
    AWS allows for a multipart m
    """
    user_data = MIMEMultipart()
    user_data.attach(get_cloud_init())

    data_formatted = in_user_data.format(volume_id, now, PIP_PACKAGES)
    LOGGER.info(data_formatted)
    user_data.attach(MIMEText(setup_disks + data_formatted))
    return user_data.as_string()
开发者ID:ICRAR,项目名称:chiles_pipeline,代码行数:13,代码来源:run_split_standalone.py

示例10: get_mime_encoded_user_data

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
    def get_mime_encoded_user_data(self, volume_id):
        """
        AWS allows for a multipart m
        """
        user_data = MIMEMultipart()
        user_data.attach(get_cloud_init())

        # Build the strings we need
        cvel_pipeline = self.build_cvel_pipeline()

        data_formatted = self._user_data.format(cvel_pipeline, self._obs_id, volume_id, self._now, self._counter, PIP_PACKAGES)
        LOGGER.info(data_formatted)
        user_data.attach(MIMEText(self._setup_disks + data_formatted))
        return user_data.as_string()
开发者ID:ICRAR,项目名称:aws-chiles02,代码行数:16,代码来源:run_cvel.py

示例11: start_servers

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
def start_servers(
        ami_id,
        user_data,
        setup_disks,
        instance_type,
        obs_id,
        created_by,
        name,
        instance_details,
        spot_price,
        ebs,
        bottom_frequency,
        frequency_range):
    LOGGER.info('obs_id: {0}, bottom_frequency: {1}, frequency_range: {2}'.format(obs_id, bottom_frequency, frequency_range))
    ec2_helper = EC2Helper()
    zone = ec2_helper.get_cheapest_spot_price(instance_type, spot_price)

    if zone is not None:
        # Swap size
        if ebs is None:
            swap_size = 1
        else:
            ephemeral_size = instance_details.number_disks * instance_details.size
            swap_size = min(int(ephemeral_size * 0.75), 16)

        user_data_mime = get_mime_encoded_user_data(
            user_data,
            obs_id,
            setup_disks,
            bottom_frequency,
            frequency_range,
            swap_size
        )
        LOGGER.info('{0}'.format(user_data_mime))

        ec2_helper.run_spot_instance(
            ami_id,
            spot_price,
            user_data_mime,
            instance_type,
            None,
            created_by,
            name + '- {0}'.format(obs_id),
            instance_details=instance_details,
            zone=zone,
            ebs_size=ebs,
            number_ebs_volumes=4,
            ephemeral=True)
    else:
        LOGGER.error('Cannot get a spot instance of {0} for ${1}'.format(instance_type, spot_price))
开发者ID:ICRAR,项目名称:aws-chiles02,代码行数:52,代码来源:run_makecube.py

示例12: copy_files

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
def copy_files(date, vis_file):
    s3_helper = S3Helper()
    # Look in the output directory
    for root, dir_names, filenames in os.walk(CHILES_CVEL_OUTPUT):
        LOGGER.info('root: {0}, dir_names: {1}, filenames: {2}'.format(root, dir_names, filenames))
        for match in fnmatch.filter(dir_names, vis_file):
            result_dir = join(root, match)
            LOGGER.info('Working on: {0}'.format(result_dir))

            if can_be_multipart_tar(result_dir):
                LOGGER.info('Using add_tar_to_bucket_multipart')
                s3_helper.add_tar_to_bucket_multipart(
                    CHILES_BUCKET_NAME,
                    'CVEL/{0}/{1}/data.tar'.format(vis_file, date),
                    result_dir)
            else:
                LOGGER.info('Using make_tarfile, then adding file to bucket')
                output_tar_filename = join(root, match + '.tar')
                make_tarfile(output_tar_filename, result_dir)

                s3_helper.add_file_to_bucket(
                    CHILES_BUCKET_NAME,
                    'CVEL/{0}/{1}/data.tar'.format(vis_file, date),
                    output_tar_filename)

                # Clean up
                os.remove(output_tar_filename)

            shutil.rmtree(result_dir, ignore_errors=True)
开发者ID:ICRAR,项目名称:chiles_pipeline,代码行数:31,代码来源:copy_cvel_output.py

示例13: add_tar_to_bucket_multipart

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
    def add_tar_to_bucket_multipart(self, bucket_name, key_name, source_path, gzip=False, parallel_processes=2, reduced_redundancy=True, bufsize=10*1024*1024):
        """
        Parallel multipart upload.
        """
        LOGGER.info(
            'bucket_name: {0}, key_name: {1}, source_path: {2}, parallel_processes: {3}, reduced_redundancy: {4}, bufsize: {5}'.format(
                bucket_name,
                key_name,
                source_path,
                parallel_processes,
                reduced_redundancy,
                bufsize
            )
        )
        bucket = self.get_bucket(bucket_name)

        headers = {'Content-Type': mimetypes.guess_type(key_name)[0] or 'application/octet-stream'}
        mp = bucket.initiate_multipart_upload(key_name, headers=headers, reduced_redundancy=reduced_redundancy)
        s3_feeder = S3Feeder(mp.id, bufsize, bucket_name, parallel_processes, self._aws_access_key_id, self._aws_secret_access_key)

        if gzip:
            mode = "w|gz"
        else:
            mode = "w|"
        tar = tarfile.open(mode=mode, fileobj=s3_feeder, bufsize=int(bufsize / 10))

        complete = True
        # noinspection PyBroadException
        try:
            for entry in os.listdir(source_path):
                full_filename = join(source_path, entry)
                LOGGER.info(
                    'tar: [full_filename: {0}, entry: {1}]'.format(
                        full_filename,
                        entry
                    )
                )
                tar.add(full_filename, arcname=entry)

            tar.close()
            s3_feeder.close()
        except Exception:
            complete = False
            s3_feeder.close()

        # Finish the upload
        if complete:
            mp.complete_upload()
        else:
            mp.cancel_upload()
开发者ID:ICRAR,项目名称:chiles_pipeline,代码行数:52,代码来源:s3_helper.py

示例14: get_cvel

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
def get_cvel():
    s3_helper = S3Helper()
    bucket = s3_helper.get_bucket(CHILES_BUCKET_NAME)
    cvel_data = {}
    for key in bucket.list(prefix='CVEL/'):
        LOGGER.info('Checking {0}'.format(key.key))
        if key.key.endswith('data.tar.gz') or key.key.endswith('data.tar'):
            elements = key.key.split('/')
            data_list = cvel_data.get(str(elements[1]))
            if data_list is None:
                data_list = []
                cvel_data[str(elements[1])] = data_list
            data_list.append(str(elements[2]))

    return cvel_data
开发者ID:ICRAR,项目名称:aws-chiles02,代码行数:17,代码来源:run_cvel.py

示例15: get_mime_encoded_user_data

# 需要导入模块: from common import LOGGER [as 别名]
# 或者: from common.LOGGER import info [as 别名]
def get_mime_encoded_user_data(instance_details, setup_disks, user_data):
    """
    AWS allows for a multipart m
    """
    # Split the frequencies
    min_freq = 940
    max_freq = 1424
    LOGGER.info("min_freq: {0}, max_freq: {1}".format(min_freq, max_freq))

    # Build the mime message
    mime_data = MIMEMultipart()
    mime_data.attach(get_cloud_init())

    swap_size = get_swap_size(instance_details)
    data_formatted = user_data.format("TODO", min_freq, max_freq, swap_size, PIP_PACKAGES)
    mime_data.attach(MIMEText(setup_disks + data_formatted))
    return mime_data.as_string()
开发者ID:ICRAR,项目名称:aws-chiles02,代码行数:19,代码来源:run_clean_all.py


注:本文中的common.LOGGER.info方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。