本文整理汇总了Python中utils.mkdir函数的典型用法代码示例。如果您正苦于以下问题:Python mkdir函数的具体用法?Python mkdir怎么用?Python mkdir使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了mkdir函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: downloadAssignments
def downloadAssignments(url, handin=False):
assignments = getAssignments(url)
os.mkdir("assignments")
for k, v in assignments.items():
dirpath = mkdir("assignments", k)
for item in v:
print("Processing %s/%s" % (k, item["Nafn"][0]))
path = mkdir(dirpath, item["Nafn"][0])
assignment = getAssignment(item["Nafn"][1])
# Description
descr = os.path.join(path, "description")
os.mkdir(descr)
jsondump(os.path.join(descr,"description.json"), assignment["description"])
genHtml(os.path.join(descr, "description.html"), assignment["description"]["description"], assignment["description"]["title"])
for item in assignment["description"]["files"]:
save(descr, item["url"])
if handin:
if "grade" in assignment:
grade = os.path.join(path, "grade")
os.mkdir(grade)
jsondump(os.path.join(grade, "grade.json"), assignment["grade"])
for item in assignment["grade"]["files"]:
save(grade, item["url"])
if "handin" in assignment:
handin = os.path.join(path, "handin")
os.mkdir(handin)
jsondump(os.path.join(handin, "handin.json"), assignment["handin"])
if "statistics" in assignment:
jsondump(os.path.join(path, "stats.json"), assignment["statistics"])
if assignment["statistics"]["image"] is not None:
save(os.path.join(path, "stats.jpg"), assignment["statistics"]["image"])
示例2: _combine_filename
def _combine_filename(self, names, max_length=60):
# expect the parameter 'names' be something like this:
# ['css/foo.css', 'css/jquery/datepicker.css']
# The combined filename is then going to be
# "/tmp/foo.datepicker.css"
first_ext = os.path.splitext(names[0])[-1]
save_dir = self.handler.application.settings.get('combined_static_dir')
if save_dir is None:
save_dir = gettempdir()
save_dir = os.path.join(save_dir, 'combined')
mkdir(save_dir)
combined_name = []
for name in names:
name, ext = os.path.splitext(os.path.basename(name))
if ext != first_ext:
raise ValueError("Mixed file extensions (%s, %s)" %\
(first_ext, ext))
combined_name.append(name)
if sum(len(x) for x in combined_name) > max_length:
combined_name = [x.replace('.min','.m').replace('.pack','.p')
for x in combined_name]
combined_name = [re.sub(r'-[\d\.]+', '', x) for x in combined_name]
while sum(len(x) for x in combined_name) > max_length:
try:
combined_name = [x[-2] == '.' and x[:-2] or x[:-1]
for x in combined_name]
except IndexError:
break
combined_name.append(first_ext[1:])
return os.path.join(save_dir, '.'.join(combined_name))
示例3: copy_single_distro_files
def copy_single_distro_files(self, d, dirtree, symlink_ok):
distros = os.path.join(dirtree, "images")
distro_dir = os.path.join(distros,d.name)
utils.mkdir(distro_dir)
kernel = utils.find_kernel(d.kernel) # full path
initrd = utils.find_initrd(d.initrd) # full path
if kernel is None:
raise CX("kernel not found: %(file)s, distro: %(distro)s" %
{ "file" : d.kernel, "distro" : d.name })
if initrd is None:
raise CX("initrd not found: %(file)s, distro: %(distro)s" %
{ "file" : d.initrd, "distro" : d.name })
# Kernels referenced by remote URL are passed through to koan directly,
# no need for copying the kernel locally:
if not utils.file_is_remote(kernel):
b_kernel = os.path.basename(kernel)
dst1 = os.path.join(distro_dir, b_kernel)
utils.linkfile(kernel, dst1, symlink_ok=symlink_ok,
api=self.api, logger=self.logger)
if not utils.file_is_remote(initrd):
b_initrd = os.path.basename(initrd)
dst2 = os.path.join(distro_dir, b_initrd)
utils.linkfile(initrd, dst2, symlink_ok=symlink_ok,
api=self.api, logger=self.logger)
示例4: makeFileList
def makeFileList(name) :
fName = self.inputFilesListFile(name)
if os.path.exists(fName) and self.useCachedFileLists() : return
fileNames = eval(self.sampleDict[name].filesCommand)
assert fileNames, "The command '%s' produced an empty list of files"%self.sampleDict[name].filesCommand
utils.mkdir(os.path.dirname(fName))
utils.writePickle(fName, zip(fileNames,map(nEventsFile, fileNames)))
示例5: peek
def peek(dat, folder, force=False):
g = Globals()
outf = g.default_repo_dir
mkdir(outf + "peek")
mkdir(outf + "peek/" + dat)
print("\nPeeking " + folder + " for " + dat)
dir = outf + "samples/" + dat + "/" + folder
print("dir", dir)
if (not force) and (os.path.exists(outf + 'peek/%s/%s.png' % (dat, folder.replace("/", "_")))):
print("Already peeked before. Now exit.")
return
dataset = dset.ImageFolder(root=dir,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=96,
shuffle=True, num_workers=2)
for i, data in enumerate(dataloader, 0):
img, _ = data
saveImage(img, outf + 'peek/%s/%s.png' %
(dat, folder.replace("/", "_")), nrow=12)
break
示例6: _downLoad_lyricFromHttp
def _downLoad_lyricFromHttp(self, lyric):
## print 'downLoad_lyric'
cwd = os.getcwd()
print 'lyric.lrc:',lyric.lrc
url = lyric.lrc
p = re.compile(r'lrc/(\w+)/(\w+)/(\S+)')
m = p.search(url)
#m = re.search(r"lrc/(\w+)/(\w+)/(\S+)", url)
lyric_save_path = u'www/'
lyric_save_path += m.group(0)
mkdir(os.path.dirname(lyric_save_path))
lyric_content = ""
f = urllib2.urlopen(url)
data = f.read()
lyric_save_absolute_path = os.path.join(cwd,lyric_save_path)
## print 'downLoad_lyric lyric_save_absolute_path:',lyric_save_absolute_path
with open(lyric_save_absolute_path, "wb") as code:
code.write(data)
lyric_content += data
lyric.local_path = lyric_save_path
lyric.sizes = len(data)
return lyric_content
示例7: install_logstash
def install_logstash():
"""Install logstash as a systemd service."""
logstash_unit_override = '/etc/systemd/system/logstash.service.d'
logstash_source_url = ctx_properties['logstash_rpm_source_url']
logstash_log_path = '/var/log/cloudify/logstash'
ctx.logger.info('Installing Logstash...')
utils.set_selinux_permissive()
utils.copy_notice(LOGSTASH_SERVICE_NAME)
utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME)
install_logstash_output_jdbc_plugin()
install_postgresql_jdbc_driver()
utils.mkdir(logstash_log_path)
utils.chown('logstash', 'logstash', logstash_log_path)
ctx.logger.debug('Creating systemd unit override...')
utils.mkdir(logstash_unit_override)
utils.deploy_blueprint_resource(
'{0}/restart.conf'.format(CONFIG_PATH),
'{0}/restart.conf'.format(logstash_unit_override),
LOGSTASH_SERVICE_NAME)
示例8: create_cloudify_user
def create_cloudify_user():
utils.create_service_user(
user=utils.CLOUDIFY_USER,
group=utils.CLOUDIFY_GROUP,
home=utils.CLOUDIFY_HOME_DIR
)
utils.mkdir(utils.CLOUDIFY_HOME_DIR)
示例9: save
def save(self, filepath=None):
if filepath is None:
filepath = self.workdir
utils.mkdir(filepath)
filename = os.path.join(filepath, "%s.pkl" % self.get_name())
utils.writepickle(self, filename)
logger.info("Saved method %s to %s" % (self.get_name(), filename))
示例10: build_src_project
def build_src_project(bindings, jamaicaoutput, targetdir, syscalls, interfaceResolver, debug, classrefs):
"""
Construct the software portion of the project. Copy the C source code for the Jamaica project,
refactoring the functions that are implemented on the FPGA.
Also copies the FPGA interface and build scripts.
bindings:
A map {id -> java method signature} that gives the ID of each hardware method.
Generated from prepare_hls_project.build_from_functions
jamaicaoutput:
Absolute path of the jamaica builder output directory which contains the source C files
targetdir:
Absolute path to place output files
"""
if not os.path.isfile(join(jamaicaoutput, "Main__nc.o")):
raise CaicosError("Cannot find file " + str(join(jamaicaoutput, "Main__nc.o")) +
". Ensure that the application has first be been built by Jamaica Builder.")
mkdir(targetdir)
copy_files(project_path("projectfiles", "juniper_fpga_interface"), join(targetdir, "juniper_fpga_interface"))
copy_files(project_path("projectfiles", "malloc_preload"), join(targetdir, "malloc_preload"))
refactor_src(bindings, jamaicaoutput, join(targetdir, "src"), debug)
if debug:
copy_files(project_path("debug_software"), join(targetdir, "src"))
generate_interrupt_handler(join(targetdir, "src", "caicos_interrupts.c"), syscalls, interfaceResolver, classrefs)
shutil.copy(join(jamaicaoutput, "Main__nc.o"), join(targetdir, "src"))
shutil.copy(project_path("projectfiles", "include", "juniperoperations.h"), join(targetdir, "src"))
shutil.copy(project_path("projectfiles", "scripts", "run.sh"), targetdir)
make_executable([join(targetdir, "run.sh")])
示例11: export
def export(self, path, module_name, manifest_name, parameters=None):
files_path = os.path.join(path, 'files')
utils.mkdir(files_path)
templates_path = os.path.join(path, 'templates')
utils.mkdir(templates_path)
manifests_path = os.path.join(path, 'manifests')
app_manifest = os.path.join(manifests_path, manifest_name+'.pp')
fh = open(app_manifest, "r+")
offset=0
for line in fh:
offset += len(line)
if line.startswith("class"):
break
fh.seek(offset-2)
if parameters:
fh.write(self.write_parameters(parameters))
fh.write("{\n\n")
for name, file in self.files.iteritems():
fh.write(file.export(path, module_name))
fh.write("}")
fh.close()
示例12: install_amqpinflux
def install_amqpinflux():
amqpinflux_rpm_source_url = \
ctx.node.properties['amqpinflux_rpm_source_url']
# injected as an input to the script
ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \
os.environ['INFLUXDB_ENDPOINT_IP']
ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \
utils.get_rabbitmq_endpoint_ip()
amqpinflux_user = 'amqpinflux'
amqpinflux_group = 'amqpinflux'
amqpinflux_venv = '{0}/env'.format(AMQPINFLUX_HOME)
ctx.logger.info('Installing AQMPInflux...')
utils.set_selinux_permissive()
utils.copy_notice('amqpinflux')
utils.mkdir(AMQPINFLUX_HOME)
utils.yum_install(amqpinflux_rpm_source_url)
_install_optional(amqpinflux_venv)
utils.create_service_user(amqpinflux_user, AMQPINFLUX_HOME)
_deploy_broker_configuration(amqpinflux_group)
ctx.logger.info('Fixing permissions...')
utils.chown(amqpinflux_user, amqpinflux_group, AMQPINFLUX_HOME)
utils.systemd.configure('amqpinflux')
示例13: configure_manager
def configure_manager(manager_config_path,
manager_config):
'''Sets config defaults and creates the config file'''
_, temp_config = tempfile.mkstemp()
config = ConfigParser()
config.add_section('Credentials')
config.set('Credentials', 'subscription_id',
manager_config['subscription_id'])
config.set('Credentials', 'tenant_id',
manager_config['tenant_id'])
config.set('Credentials', 'client_id',
manager_config['client_id'])
config.set('Credentials', 'client_secret',
manager_config['client_secret'])
config.add_section('Azure')
config.set('Azure', 'location',
manager_config['location'])
with open(temp_config, 'w') as temp_config_file:
config.write(temp_config_file)
utils.mkdir(os.path.dirname(manager_config_path), use_sudo=True)
utils.move(temp_config, manager_config_path)
# Install prerequisites for the azure-storage Python package
utils.yum_install('gcc', service_name='azure-storage')
utils.yum_install('python-devel', service_name='azure-storage')
utils.yum_install('openssl-devel', service_name='azure-storage')
utils.yum_install('libffi-devel', service_name='azure-storage')
utils.yum_install('python-cffi', service_name='azure-storage')
示例14: _install_rabbitmq
def _install_rabbitmq():
erlang_rpm_source_url = ctx.node.properties['erlang_rpm_source_url']
rabbitmq_rpm_source_url = ctx.node.properties['rabbitmq_rpm_source_url']
# TODO: maybe we don't need this env var
os.putenv('RABBITMQ_FD_LIMIT',
str(ctx.node.properties['rabbitmq_fd_limit']))
rabbitmq_log_path = '/var/log/cloudify/rabbitmq'
rabbitmq_username = ctx.node.properties['rabbitmq_username']
rabbitmq_password = ctx.node.properties['rabbitmq_password']
rabbitmq_cert_public = ctx.node.properties['rabbitmq_cert_public']
rabbitmq_ssl_enabled = ctx.node.properties['rabbitmq_ssl_enabled']
rabbitmq_cert_private = ctx.node.properties['rabbitmq_cert_private']
ctx.logger.info('Installing RabbitMQ...')
utils.set_selinux_permissive()
utils.copy_notice('rabbitmq')
utils.mkdir(rabbitmq_log_path)
utils.yum_install(erlang_rpm_source_url)
utils.yum_install(rabbitmq_rpm_source_url)
utils.logrotate('rabbitmq')
utils.deploy_blueprint_resource(
'{0}/kill-rabbit'.format(CONFIG_PATH),
'/usr/local/bin/kill-rabbit')
utils.chmod('500', '/usr/local/bin/kill-rabbit')
utils.systemd.configure('rabbitmq')
ctx.logger.info('Configuring File Descriptors Limit...')
utils.deploy_blueprint_resource(
'{0}/rabbitmq_ulimit.conf'.format(CONFIG_PATH),
'/etc/security/limits.d/rabbitmq.conf')
utils.systemd.systemctl('daemon-reload')
utils.chown('rabbitmq', 'rabbitmq', rabbitmq_log_path)
utils.systemd.start('cloudify-rabbitmq')
time.sleep(10)
utils.wait_for_port(5672)
ctx.logger.info('Enabling RabbitMQ Plugins...')
# Occasional timing issues with rabbitmq starting have resulted in
# failures when first trying to enable plugins
utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_management'],
retries=5)
utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_tracing'], retries=5)
_clear_guest_permissions_if_guest_exists()
_create_user_and_set_permissions(rabbitmq_username, rabbitmq_password)
_set_security(
rabbitmq_ssl_enabled,
rabbitmq_cert_private,
rabbitmq_cert_public)
utils.systemd.stop('cloudify-rabbitmq', retries=5)
示例15: save_training_TP_FP_using_voc
def save_training_TP_FP_using_voc(evaluation, img_names, in_path, out_folder_name=None, neg_thresh=0.3):
'''use the voc scores to decide if a patch should be saved as a TP or FP or not
'''
assert out_folder_name is not None
general_path = utils.get_path(neural=True, data_fold=utils.TRAINING, in_or_out=utils.IN, out_folder_name=out_folder_name)
path_true = general_path+'truepos_from_selective_search/'
utils.mkdir(path_true)
path_false = general_path+'falsepos_from_selective_search/'
utils.mkdir(path_false)
for img_name in img_names:
good_detections = defaultdict(list)
bad_detections = defaultdict(list)
try:
img = cv2.imread(in_path+img_name, flags=cv2.IMREAD_COLOR)
except:
print 'Cannot open image'
sys.exit(-1)
for roof_type in utils.ROOF_TYPES:
detection_scores = evaluation.detections.best_score_per_detection[img_name][roof_type]
for detection, score in detection_scores:
if score > 0.5:
#true positive
good_detections[roof_type].append(detection)
if score < neg_thresh:
#false positive
bad_detections[roof_type].append(detection)
for roof_type in utils.ROOF_TYPES:
extraction_type = 'good'
save_training_FP_and_TP_helper(img_name, evaluation, good_detections[roof_type], path_true, general_path, img, roof_type, extraction_type, (0,255,0))
extraction_type = 'background'
save_training_FP_and_TP_helper(img_name, evaluation, bad_detections[roof_type], path_false, general_path, img, roof_type, extraction_type, (0,0,255))