当前位置: 首页>>代码示例>>Python>>正文


Python io.open方法代码示例

本文整理汇总了Python中io.open方法的典型用法代码示例。如果您正苦于以下问题:Python io.open方法的具体用法?Python io.open怎么用?Python io.open使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在io的用法示例。


在下文中一共展示了io.open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_dockerfile

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def get_dockerfile(args):
    if args.dockerfile:
        return io.open(args.dockerfile, "rb").read()
    else:
        cmd = bash_cmd_preamble + [
            "apt-get update -qq",
            "apt-get install -qqy cloud-init net-tools",
            "echo $CLOUD_CONFIG_B64 | base64 --decode > /etc/cloud/cloud.cfg.d/99_aegea.cfg",
            "cloud-init init",
            "cloud-init modules --mode=config",
            "cloud-init modules --mode=final"
        ]
        return dockerfile.format(base_image=args.base_image,
                                 maintainer=ARN.get_iam_username(),
                                 label=" ".join(args.tags),
                                 cloud_config_b64=base64.b64encode(get_cloud_config(args)).decode(),
                                 run=json.dumps(cmd)).encode() 
开发者ID:kislyuk,项目名称:aegea,代码行数:19,代码来源:build_docker_image.py

示例2: write_conf

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def write_conf(self, extra=''):
        if self.ssl:
            serverpem = os.path.join(thisdir, 'test.pem')
            ssl = """
server.ssl_certificate: r'%s'
server.ssl_private_key: r'%s'
""" % (serverpem, serverpem)
        else:
            ssl = ''

        conf = self.config_template % {
            'host': self.host,
            'port': self.port,
            'error_log': self.error_log,
            'access_log': self.access_log,
            'ssl': ssl,
            'extra': extra,
        }
        with io.open(self.config_file, 'w', encoding='utf-8') as f:
            f.write(str(conf)) 
开发者ID:cherrypy,项目名称:cherrypy,代码行数:22,代码来源:helper.py

示例3: read_links_file

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def read_links_file(self,file_path):
        '''
        Read links and associated categories for specified articles 
        in text file seperated by a space

        Args:
            file_path (str): The path to text file with news article links
                             and category

        Returns:
            articles: Array of tuples that contains article link & cateogory
                      ex. [('IPO','www.cs.columbia.edu')]
        '''

        articles = []
        with open(file_path) as f:
            for line in f:
                line = line.strip()
                #Ignore blank lines
                if len(line) != 0:
                    link,category = line.split(' ')
                    articles.append((category.rstrip(),link.strip()))

        return articles 
开发者ID:skillachie,项目名称:news-corpus-builder,代码行数:26,代码来源:news_corpus_generator.py

示例4: _get_data

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def _get_data(self):
        archive_file_name, archive_hash = self._archive_file
        data_file_name, data_hash = self._data_file[self._segment]
        path = os.path.join(self._root, data_file_name)
        if not os.path.exists(path) or not check_sha1(path, data_hash):
            namespace = 'gluon/dataset/'+self._namespace
            downloaded_file_path = download(_get_repo_file_url(namespace, archive_file_name),
                                            path=self._root,
                                            sha1_hash=archive_hash)

            with zipfile.ZipFile(downloaded_file_path, 'r') as zf:
                for member in zf.namelist():
                    filename = os.path.basename(member)
                    if filename:
                        dest = os.path.join(self._root, filename)
                        with zf.open(member) as source, \
                             open(dest, "wb") as target:
                            shutil.copyfileobj(source, target)

        data, label = self._read_batch(path)

        self._data = nd.array(data, dtype=data.dtype).reshape((-1, self._seq_len))
        self._label = nd.array(label, dtype=label.dtype).reshape((-1, self._seq_len)) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:25,代码来源:text.py

示例5: _override_epochs

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def _override_epochs(notebook):
    """Overrides the number of epochs in the notebook to 1 epoch. Note this operation is idempotent.

    Args:
        notebook : string
            notebook name in folder/notebook format
    """
    notebook_path = os.path.join(*([NOTEBOOKS_DIR] + notebook.split('/'))) + ".ipynb"

    # Read the notebook and set epochs to num_epochs.
    with io.open(notebook_path, 'r', encoding='utf-8') as f:
        notebook = f.read()

    # Set number of epochs to 1.
    modified_notebook = re.sub(EPOCHS_REGEX, 'epochs = 1', notebook)

    # Replace the original notebook with the modified one.
    with io.open(notebook_path, 'w', encoding='utf-8') as f:
        f.write(modified_notebook) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:21,代码来源:straight_dope_test_utils.py

示例6: _override_relative_paths

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def _override_relative_paths(notebook):
    """Overrides the relative path for the data and image directories to point
    to the right places. This is required as we run the notebooks in a different
    directory hierarchy more suitable for testing.

    Args:
        notebook : string
            notebook name in folder/notebook format
    """
    notebook_path = os.path.join(*([NOTEBOOKS_DIR] + notebook.split('/'))) + ".ipynb"

    # Read the notebook and set epochs to num_epochs.
    with io.open(notebook_path, 'r', encoding='utf-8') as f:
        notebook = f.read()

    # Update the location for the data directory.
    modified_notebook = re.sub(RELATIVE_PATH_REGEX, NOTEBOOKS_DIR, notebook)

    # Replace the original notebook with the modified one.
    with io.open(notebook_path, 'w', encoding='utf-8') as f:
        f.write(modified_notebook) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:23,代码来源:straight_dope_test_utils.py

示例7: main

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def main():
    parser = argparse.ArgumentParser(
        description="Jupyter Notebooks to markdown"
    )

    parser.add_argument("notebook", nargs=1, help="The notebook to be converted.")
    parser.add_argument("-o", "--output", help="output markdown file")
    args = parser.parse_args()

    old_ipynb = args.notebook[0]
    new_ipynb = 'tmp.ipynb'
    md_file = args.output
    print(md_file)
    if not md_file:
        md_file = os.path.splitext(old_ipynb)[0] + '.md'


    clear_notebook(old_ipynb, new_ipynb)
    os.system('jupyter nbconvert ' + new_ipynb + ' --to markdown --output ' + md_file)
    with open(md_file, 'a') as f:
        f.write('<!-- INSERT SOURCE DOWNLOAD BUTTONS -->')
    os.system('rm ' + new_ipynb) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:24,代码来源:ipynb2md.py

示例8: write_raw_text_to_files

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def write_raw_text_to_files(all_files, urls_path, tmp_dir, is_training):
  """Write text to files."""

  def write_to_file(all_files, urls_path, tmp_dir, filename):
    with io.open(os.path.join(tmp_dir, filename + ".source"), "w") as fstory:
      with io.open(os.path.join(tmp_dir, filename + ".target"),
                   "w") as fsummary:
        for example in example_generator(all_files, urls_path, sum_token=True):
          story, summary = _story_summary_split(example)
          fstory.write(story + "\n")
          fsummary.write(summary + "\n")

  filename = "cnndm.train" if is_training else "cnndm.dev"
  tf.logging.info("Writing %s" % filename)
  write_to_file(all_files, urls_path, tmp_dir, filename)

  if not is_training:
    test_urls_path = generator_utils.maybe_download(tmp_dir, "all_test.txt",
                                                    _TEST_URLS)
    filename = "cnndm.test"
    tf.logging.info("Writing %s" % filename)
    write_to_file(all_files, test_urls_path, tmp_dir, filename) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:24,代码来源:cnn_dailymail.py

示例9: testGunzipFile

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def testGunzipFile(self):
    tmp_dir = self.get_temp_dir()
    (_, tmp_file_path) = tempfile.mkstemp(dir=tmp_dir)

    # Create a test zip file and unzip it.
    with gzip.open(tmp_file_path + ".gz", "wb") as gz_file:
      gz_file.write(bytes("test line", "utf-8"))
    generator_utils.gunzip_file(tmp_file_path + ".gz", tmp_file_path + ".txt")

    # Check that the unzipped result is as expected.
    lines = []
    for line in io.open(tmp_file_path + ".txt", "rb"):
      lines.append(line.decode("utf-8").strip())
    self.assertEqual(len(lines), 1)
    self.assertEqual(lines[0], "test line")

    # Clean up.
    os.remove(tmp_file_path + ".gz")
    os.remove(tmp_file_path + ".txt")
    os.remove(tmp_file_path) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:22,代码来源:generator_utils_test.py

示例10: load_csv

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def load_csv(path):
    """Load data from a CSV file.

    Args:
        path (str): A path to the CSV format file containing data.
        dense (boolean): An optional variable indicating if the return matrix
                         should be dense.  By default, it is false.

    Returns:
        Data matrix X and target vector y
    """

    with open(path) as f:
        line = f.readline().strip()

    X = np.loadtxt(path, delimiter=',',
                   skiprows=0 if is_number(line.split(',')[0]) else 1)

    y = np.array(X[:, 0]).flatten()
    X = X[:, 1:]

    return X, y 
开发者ID:jeongyoonlee,项目名称:Kaggler,代码行数:24,代码来源:data_io.py

示例11: build_index

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def build_index():
    repo_directory = get_config()['repo_directory']
    index_path = path.join(repo_directory, 'pages', 'index.json')
    page_path = path.join(repo_directory, 'pages')

    tree_generator = os.walk(page_path)
    folders = next(tree_generator)[1]
    commands, new_index = {}, {}
    for folder in folders:
        pages = next(tree_generator)[2]
        for page in pages:
            command_name = path.splitext(page)[0]
            if command_name not in commands:
                commands[command_name] = {'name': command_name,
                                          'platform': [folder]}
            else:
                commands[command_name]['platform'].append(folder)
    command_list = [item[1] for item in
                    sorted(commands.items(), key=itemgetter(0))]
    new_index['commands'] = command_list

    with open(index_path, mode='w') as f:
        json.dump(new_index, f) 
开发者ID:lord63,项目名称:tldr.py,代码行数:25,代码来源:cli.py

示例12: parse_page

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def parse_page(page):
    """Parse the command man page."""
    colors = get_config()['colors']
    with io.open(page, encoding='utf-8') as f:
        lines = f.readlines()
    output_lines = []
    for line in lines[1:]:
        if is_headline(line):
            continue
        elif is_description(line):
            output_lines.append(click.style(line.replace('>', ' '),
                                            fg=colors['description']))
        elif is_old_usage(line):
            output_lines.append(click.style(line, fg=colors['usage']))
        elif is_code_example(line):
            line = '  ' + line if line.startswith('`') else line[2:]
            output_lines.append(click.style(line.replace('`', ''),
                                            fg=colors['command']))
        elif is_line_break(line):
            output_lines.append(click.style(line))
        else:
            output_lines.append(click.style('- ' + line, fg=colors['usage']))
    return output_lines 
开发者ID:lord63,项目名称:tldr.py,代码行数:25,代码来源:parser.py

示例13: save_webdriver_logs_by_type

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def save_webdriver_logs_by_type(self, log_type, test_name):
        """Get webdriver logs of the specified type and write them to a log file

        :param log_type: browser, client, driver, performance, server, syslog, crashlog or logcat
        :param test_name: test that has generated these logs
        """
        try:
            logs = self.driver_wrapper.driver.get_log(log_type)
        except Exception:
            return

        if len(logs) > 0:
            from toolium.driver_wrappers_pool import DriverWrappersPool
            log_file_name = '{}_{}.txt'.format(get_valid_filename(test_name), log_type)
            log_file_name = os.path.join(DriverWrappersPool.logs_directory, log_file_name)
            with open(log_file_name, 'a+', encoding='utf-8') as log_file:
                driver_type = self.driver_wrapper.config.get('Driver', 'type')
                log_file.write(
                    u"\n{} '{}' test logs with driver = {}\n\n".format(datetime.now(), test_name, driver_type))
                for entry in logs:
                    timestamp = datetime.fromtimestamp(float(entry['timestamp']) / 1000.).strftime(
                        '%Y-%m-%d %H:%M:%S.%f')
                    log_file.write(u'{}\t{}\t{}\n'.format(timestamp, entry['level'], entry['message'].rstrip())) 
开发者ID:Telefonica,项目名称:toolium,代码行数:25,代码来源:driver_utils.py

示例14: get_msr_paraphrase

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def get_msr_paraphrase() -> Dict[str, List[Dict[str, str]]]:

    url = 'https://raw.githubusercontent.com/wasiahmad/paraphrase_identification/master/dataset/msr-paraphrase-corpus/msr_paraphrase_{}.txt'  # NOQA
    root = download.get_cache_directory(os.path.join('datasets', 'msr_paraphrase'))

    def creator(path):
        dataset = {}
        fieldnames = ('quality', 'id1', 'id2', 'string1', 'string2')
        for split in ('train', 'test'):
            data_path = gdown.cached_download(url.format(split))
            with io.open(data_path, 'r', encoding='utf-8') as f:
                f.readline()  # skip header
                reader = csv.DictReader(f, delimiter='\t', fieldnames=fieldnames)
                dataset[split] = [dict(row) for row in reader]

        with io.open(path, 'wb') as f:
            pickle.dump(dataset, f)
        return dataset

    def loader(path):
        with io.open(path, 'rb') as f:
            return pickle.load(f)

    pkl_path = os.path.join(root, 'msr_paraphrase.pkl')
    return download.cache_or_load_file(pkl_path, creator, loader) 
开发者ID:tofunlp,项目名称:lineflow,代码行数:27,代码来源:msr_paraphrase.py

示例15: get_conll2000

# 需要导入模块: import io [as 别名]
# 或者: from io import open [as 别名]
def get_conll2000() -> Dict[str, List[str]]:

    url = 'https://www.clips.uantwerpen.be/conll2000/chunking/{}.txt.gz'
    root = download.get_cache_directory(os.path.join('datasets', 'conll2000'))

    def creator(path):
        dataset = {}
        for split in ('train', 'test'):
            data_path = gdown.cached_download(url.format(split))
            with gzip.open(data_path) as f:
                data = f.read().decode('utf-8').split('\n\n')

            dataset[split] = data

        with io.open(path, 'wb') as f:
            pickle.dump(dataset, f)
        return dataset

    def loader(path):
        with io.open(path, 'rb') as f:
            return pickle.load(f)

    pkl_path = os.path.join(root, 'conll2000.pkl')
    return download.cache_or_load_file(pkl_path, creator, loader) 
开发者ID:tofunlp,项目名称:lineflow,代码行数:26,代码来源:conll2000.py


注:本文中的io.open方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。