本文整理汇总了Python中string.zfill函数的典型用法代码示例。如果您正苦于以下问题:Python zfill函数的具体用法?Python zfill怎么用?Python zfill使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了zfill函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: resnum
def resnum(self,uniqueid):
"""Given a uniqueid this function returns the residue number"""
import string
if len(string.split(uniqueid,','))>1:
return string.split(uniqueid,':')[0]+':'+string.zfill(string.split(uniqueid,':')[1],self.length_of_residue_numbers)+','+string.split(uniqueid,',')[1]
else:
return string.split(uniqueid,':')[0]+':'+string.zfill(string.split(uniqueid,':')[1],self.length_of_residue_numbers)
示例2: _get_id
def _get_id(self, tracklist):
# fill in self.id and self.toc.
# if the argument is a string ending in .rdb, the part
# upto the suffix is taken as the id.
if type(tracklist) == type(""):
if tracklist[-4:] == ".rdb":
self.id = tracklist[:-4]
self.toc = ""
return
t = []
for i in range(2, len(tracklist), 4):
t.append((None, (int(tracklist[i : i + 2]), int(tracklist[i + 2 : i + 4]))))
tracklist = t
ntracks = len(tracklist)
self.id = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
if ntracks <= _DB_ID_NTRACKS:
nidtracks = ntracks
else:
nidtracks = _DB_ID_NTRACKS - 1
min = 0
sec = 0
for track in tracklist:
start, length = track
min = min + length[0]
sec = sec + length[1]
min = min + sec / 60
sec = sec % 60
self.id = self.id + _dbid(min) + _dbid(sec)
for i in range(nidtracks):
start, length = tracklist[i]
self.id = self.id + _dbid(length[0]) + _dbid(length[1])
self.toc = string.zfill(ntracks, 2)
for track in tracklist:
start, length = track
self.toc = self.toc + string.zfill(length[0], 2) + string.zfill(length[1], 2)
示例3: __init__
def __init__(self):
if not pathExists('/etc/enigma2/lamedb'):
return
file = open('/etc/enigma2/lamedb')
readlines = file.readlines()
f_service = False
i = 0
for n in xrange(0, len(readlines)):
if readlines[n] == 'services\n':
f_service = True
continue
if not f_service:
continue
if readlines[n] == 'end\n':
break
if i == 0:
referens = [ x.upper() for x in readlines[n].split(':') ]
if referens[0] == 'S':
serviceid = zfill(referens[4], 4) + ':' + zfill(referens[7], 8) + ':' + zfill(referens[5], 4) + ':' + zfill(referens[6], 4)
else:
serviceid = referens[0] + ':' + referens[1] + ':' + referens[2] + ':' + referens[3]
if i == 2:
provider = readlines[n].split(':')[1].split(',')[0].rstrip('\n')
i += 1
if i == 3:
i = 0
self.CashServiceList[serviceid] = provider
file.close()
示例4: proc_meta
def proc_meta(IPADDR, VIDEOID, DB):
PROC = {"transfered": 0}
metadata = DB[IPADDR][VIDEOID]
PROC["status"] = get_value(metadata, "recMovieAiring.jsonForClient.video.state", "unknown")
PROC["airdate"] = get_value(metadata, "recMovieAiring.jsonForClient.airDate", "")
PROC["desc"] = get_value(metadata, "recMovie.jsonForClient.plot", "")
PROC["title"] = get_value(metadata, "recMovie.jsonForClient.title", "")
PROC["date"] = get_value(metadata, "recMovie.jsonForClient.releaseYear", "")
PROC["series"] = get_value(metadata, "recSeries.jsonForClient.title", PROC["title"])
PROC["season"] = get_value(metadata, "recEpisode.jsonForClient.seasonNumber", "0")
PROC["episode"] = get_value(metadata, "recEpisode.jsonForClient.episodeNumber", "0")
PROC["title"] = get_value(metadata, "recEpisode.jsonForClient.title", PROC["title"])
PROC["desc"] = get_value(metadata, "recEpisode.jsonForClient.description", PROC["desc"])
PROC["airdate"] = get_value(metadata, "recEpisode.jsonForClient.originalAirDate", PROC["airdate"])
PROC["date"] = get_value(metadata, "recEpisode.jsonForClient.airDate", PROC["date"])
PROC["status"] = get_value(metadata, "recEpisode.jsonForClient.video.state", PROC["status"])
if metadata.has_key("recSeason"): # is a TV show!!!!
PROC["type"] = "tv"
if string.zfill(PROC["episode"], 2) == "00":
PROC["name"] = PROC["series"] + " - " + PROC["date"][:10]
else:
PROC["name"] = (
PROC["series"] + " - S" + string.zfill(PROC["season"], 2) + "E" + string.zfill(PROC["episode"], 2)
)
if PROC["title"] != "":
PROC["name"] = PROC["name"] + " - " + PROC["title"]
else: # is a Movie!!
PROC["type"] = "movie"
PROC["name"] = PROC["title"] + " (" + str(PROC["date"]) + ")"
PROC["clean"] = clean(PROC["name"])
return PROC
示例5: frameCount2timeCode
def frameCount2timeCode (frames, fps = DEFAULT_FPS_VALUE):
"""
Convert a framecount to a timecode string. Provide fps in variable 'fps', default value is 25fps.
This function is the inverse of timeCode2frameCount.
>>> import Timecode
>>> Timecode.frameCount2timeCode (0, 25)
'00:00:00:00'
>>> Timecode.frameCount2timeCode (1, 25)
'00:00:00:01'
>>> Timecode.frameCount2timeCode (25, 25)
'00:00:01:00'
>>> Timecode.frameCount2timeCode (24, 24)
'00:00:01:00'
>>> Timecode.frameCount2timeCode (2159999, 25)
'23:59:59:24'
>>> Timecode.frameCount2timeCode (2073599, 24)
'23:59:59:23'
"""
HH, MM, SS, FF = frameCount2timeCodeElements (frames, fps)
return (string.zfill (HH, 2) + ":" + string.zfill (MM, 2) + ":"\
+ string.zfill (SS, 2) + ":" + string.zfill (FF, 2))
示例6: addPerson
def addPerson(first_name=None, last_name=None,
start_date=None, default_birthplace_address_city=None,
default_address_text=None, description=None,
function=None, **kw):
"""
This creates a single temporary person with all appropriate parameters
"""
global result_list
global uid
if not (first_name or last_name):
return
uid_string = 'new_%s' % zfill(uid, 3)
if listbox is not None:
# Use input parameters instead of default
# if available in listbox
line = listbox[zfill(uid, 3)]
if line.has_key('last_name') and line.has_key('first_name') :
first_name = line['first_name']
last_name = line['last_name']
person = context.newContent(
portal_type='Person',
uid=uid_string,
first_name=first_name,
last_name=last_name,
start_date=start_date,
default_birthplace_address_city = default_birthplace_address_city,
default_address_text=default_address_text,
function=function,
description=description,
temp_object=1,
is_indexable=0,
)
result_list.append(person)
uid += 1
示例7: Tfidf
def Tfidf(filelist) :
path = 'D:\\anaconda project\TEST1\\'
corpus = [] #存取100份文档的分词结果
for ff in filelist :
fname = path + ff+"-seg.txt"
f = open(fname,'r+')
content = f.read()
f.close()
corpus.append(content)
vectorizer = CountVectorizer()
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(vectorizer.fit_transform(corpus))
word = vectorizer.get_feature_names() #所有文本的关键字
weight = tfidf.toarray() #对应的tfidf矩阵
sFilePath = 'D:\\anaconda project\TEST2\\'
if not os.path.exists(sFilePath) :
os.mkdir(sFilePath)
# 这里将每份文档词语的TF-IDF写入tfidffile文件夹中保存
for i in range(len(weight)) :
print u"--Writing all the tf-idf in the",i,u" file into ",sFilePath+'\\'+string.zfill(i,5)+'.txt',"--"
f = open(sFilePath+'/'+string.zfill(i,5)+'.txt','w+')
for j in range(len(word)) :
f.write(word[j]+" "+str(weight[i][j])+"\n")
f.close()
示例8: RGBtoHex
def RGBtoHex(color):
"""\
Convert float (R, G, B) tuple to RRGGBB hex value (without #).
"""
import string
return string.zfill(str(hex(int(color[0] * 255))[2:]), 2) + string.zfill(str(hex(int(color[1] * 255))[2:]), 2) + string.zfill(str(hex(int(color[2] * 255))[2:]), 2)
示例9: advance
def advance(ntstep):
for i in range(0,ntstep):
print 'timestep = ',i+1
esbgk1.advance(numIter)
print 'converged';
esbgk1.updateTime()
if ((i+1)%output_Coeff == 0) :
if(fgamma>0):
coeff=macroFields.coeff[cellSites].asNumPyArray()
print 'BGK:',coeff[cellno,0],'cx^2',coeff[cellno,1],'cx',coeff[cellno,2]
if(fgamma==2):
coeffg=macroFields.coeffg[cellSites].asNumPyArray()
print 'ESBGK:',coeffg[cellno,0],'cx^2',coeffg[cellno,1],'cx',coeffg[cellno,2]
print ' :','cy^2',coeffg[cellno,3],'cy',coeffg[cellno,4],'cz^2',coeffg[cellno,5],'cz',coeffg[cellno,6]
print 'cxcy',coeffg[cellno,7],'cxcz',coeffg[cellno,8],'cycz',coeffg[cellno,9]
if ((i+1)%output_interval == 0) :
"""
dens=macroFields.density[cellSites].asNumPyArray()
print 'density',dens[105],dens[115],dens[125],dens[135]
press=macroFields.pressure[cellSites].asNumPyArray()
print 'pressure',press[105],press[115],press[125],press[135]
"""
dsfname = "output_"+string.zfill(str(i+1),5)+".dat"
#esbgk1.OutputDsfBLOCK(dsfname)
filename = "macro_"+string.zfill(str(i+1),5)+".dat"
tecplotESBGK.esbgkTecplotFile(meshes,macroFields,filename)
示例10: savePictures
def savePictures(self, albumPath, pictures, comments=False):
"""
Save a list of pictures.
Args:
albumPath: the path to the album in the directory tree.
pictures: a list of pictures, where the first element is the url
and the second is a list of comments.
comments: indicates wether obtain comments of the picture or not.
"""
myCounter = 1
for pic in pictures:
picName = string.zfill(myCounter, CONSTANT_FILL) + '_' + pic[1] + JPG
fileName = os.path.join(albumPath, picName)
picInfo = self.tnt.getPicture(pic[0], comments)
if not os.path.exists(fileName):
if self.console:
print '| Descargando foto ' + picName + '...'
urllib.urlretrieve(picInfo[0], fileName)
commentsFileName = string.zfill(myCounter, CONSTANT_FILL) + '_' + pic[1] + TXT
if comments and not os.path.exists(commentsFileName) and picInfo[1] != []:
if self.console:
print '| Descargando sus comentarios...'
file2write = open(commentsFileName, 'w')
for comment in picInfo[1]:
file2write.write('******************\r\n')
file2write.write(comment[0].encode('utf-8') + ' (' + comment[1].encode('utf-8') + '):\r\n')
file2write.write(comment[2].encode('utf-8') + '\r\n')
file2write.close()
myCounter += 1
sleep(0.5)
示例11: calctfidf
def calctfidf(fenci_list) :
corpus = [] #存取100份文档的分词结果
for f_fc in fenci_list:
f = open(f_fc,'r')
content = f.read()
f.close()
corpus.append(content)
vectorizer = CountVectorizer()
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(vectorizer.fit_transform(corpus))
word = vectorizer.get_feature_names() #所有文本的关键字
weight = tfidf.toarray() #对应的tfidf矩阵
if not os.path.exists(tfidfpath) :
os.mkdir(tfidfpath)
# 这里将每份文档词语的TF-IDF写入tfidffile文件夹中保存
tfidf_list=[]
for i in range(len(weight)) :
print u"Writing all the tf-idf into the",i,u" file into ",tfidfpath+'\\'+string.zfill(i,5)+'.txt',"--------"
tfidffile=os.path.abspath(tfidfpath)+'\\'+string.zfill(i,5)+'.txt'
tfidf_list.append(tfidffile)
f = open(tfidffile,'w')
for j in range(len(word)) :
f.write(word[j]+" "+str(weight[i][j])+"\n")
f.close()
示例12: plot_skin_temp
def plot_skin_temp(file_name, cntr_lvl=None, save_frames=False):
file, vars = peek(file_name, show_vars=False)
lon = vars['lon'].get_value()
lat = vars['lat'].get_value()
skin_temp = vars['skin_temp']
skin_temp = skin_temp_var.get_value()[0]
valid_date = str(vars['valid_date'].get_value()[0])
valid_time = zfill(str(vars['valid_time'].get_value()[0]), 4)
valid_when = valid_date[6:] + ' ' \
+ cardinal_2_month(int(valid_date[4:6])) + ' ' \
+ valid_date[0:4] \
+ ' ' + valid_time[:2] + ':' \
+ valid_time[2:] + ' UTC'
m = set_default_basemap(lon,lat)
# must plot using 2d lon and lat
LON, LAT = p.meshgrid(lon,lat)
p.figure()
if cntr_lvl is not None:
m.contourf(LON,LAT,skin_temp, cntr_lvl)
else:
m.contourf(LON,LAT,skin_temp)
m.drawcoastlines()
m.drawmeridians(n.array(n.arange(lon.min(), lon.max() + a_small_number, 15.)), labels=[1,0,0,1])
m.drawparallels(n.array(n.arange(lat.min(), lat.max() + a_small_number, 15.)), labels=[1,0,0,1])
p.colorbar(orientation='horizontal', shrink=0.7, fraction=0.02, pad=0.07, aspect=70)
title_string = 'Surface pressure (K) valid at' \
+ '\n' + valid_when + ' ' \
+ ' from LAPS'
p.title(title_string)
if save_frames:
p.savefig('frames/frame_' + zfill(str(frame_number),3) +'_skin_temp_' + str(int(lvl[lvl_idx])) + '.png')
return
示例13: display_time
def display_time(self):
length = len(self.TITLE)
while True:
if self.q == 1: # 退出
break
if self.song_time >= 0 and self.douban.playingsong:
minute = int(self.song_time) / 60
sec = int(self.song_time) % 60
show_time = string.zfill(str(minute), 2) + ':' + string.zfill(str(sec), 2)
self.get_volume() # 获取音量
self.TITLE = self.TITLE[:length - 1] + ' ' + self.douban.playingsong['kbps'] + 'kbps ' + colored(show_time, 'cyan') + ' rate: ' + colored(self.rate[int(round(self.douban.playingsong['rating_avg'])) - 1], 'red') + ' vol: '
if self.is_muted:
self.TITLE += '✖'
else:
self.TITLE += self.volume.strip() + '%'
if self.loop:
self.TITLE += ' ' + colored('↺', 'red')
else:
self.TITLE += ' ' + colored('→', 'red')
self.TITLE += '\r'
self.display()
if not self.pause:
self.song_time -= 1
else:
self.TITLE = self.TITLE[:length]
time.sleep(1)
示例14: main
def main():
# fix UnicodeDecodeError: 'ascii'
#mm1 = sys.getdefaultencoding()
#mm2 = sys.stdin.encoding
#mm3 = sys.stdout.encoding
#print "python: %s, sys stdin: %s, sys stdout: %s" % (mm1, mm2, mm3)
#reload(sys)
#sys.setdefaultencoding('utf-8')
try:
opts,args = getopt.getopt(sys.argv[1:], "t:h", ["test", "help"])
except getopt.GetoptError:
usage()
if (len(args) != 3):
usage()
firstTag = args[0]
secondTag = args[1]
ip = args[2]
testVal = validateOpts(opts)
# Make Baseline DiffLog
print 'Get changes betweens tags...'
diffLogData = diffBetweenTag(firstTag, secondTag)
if len(diffLogData) == 0:
print "Maybe differ data Noe. do opertion with other baseline tag"
sys.exit(-1)
# Convert DiffLog to CSV Data
print 'Building spreadsheet...'
csvData = diffLog2csv(diffLogData, ip)
if len(csvData) == 0:
print "Maybe differ data Noe. do opertion with other baseline tag"
sys.exit(-1)
# Make Temporary CSV FILE
csvTempFileName = "diffData.csv"
try:
wFile = open(csvTempFileName, 'w')
wFile.write(csvData)
wFile.close()
except IOError:
print "File operation failed:", csvTempFileName, "...aborting"
sys.exit(-1)
# Convert CSV to Excel
print 'Saving file...'
#time setting
now = time.localtime()
date = str(now[0])+ string.zfill(now[1], 2) + string.zfill(now[2], 2) + "_" + string.zfill(now[3], 2) + string.zfill(now[4], 2) + string.zfill(now[5], 2)
# outputFileName = "Tag_Compare_Result.xls"# % (firstTag, secondTag)
outputFileName = "Tag_Compare_Result_%s.xls" % date
# outputFileName = "Tag_Compare_Result_%s_%s_%s.xls" % (firstTag.replace('/', '_'), secondTag.replace('/', '_'), date)
csvData2xls(outputFileName, firstTag, secondTag)
# Remove Temporary CSV FILE
os.remove(csvTempFileName)
#close
mssqlCursor.close()
mssqlDB.close()
示例15: main
def main():
initial = 10 # Size of the smallest graph
final = 10**5 # Size of the largest graph
resolution = 10 # Number of graphs per 10 fold increase in vertex size
mult = math.pow(initial, 1.0/resolution) # The exponent
for filename in sys.argv[1:]:
n = initial
if(filename.endswith(".edges")):
fname = filename[:-6]
print fname
else:
continue
# Read all the nodes
nodefile = open(fname + ".nodes")
in_nodes = nodefile.readlines()
nodefile.close()
# Read all the edges
edgefile = open(fname + ".edges")
in_edges = edgefile.readlines()
edgefile.close()
for count in range(1 + resolution * int(math.log(final/initial, resolution))):
if( len(in_nodes) < int(round(n))):
print >>sys.stderr, "Not enough nodes", len(in_nodes), "<" ,int(round(n))
break
out_nodefile = "subgraph/"+fname+string.zfill(count, 2)+".nodes"
out_edgefile = "subgraph/"+fname+string.zfill(count, 2)+".edges"
print string.zfill(int(n), 6)
print out_nodefile, out_edgefile
generate_subgraph(int(round(n)), in_nodes, in_edges, out_nodefile, out_edgefile)
n *= mult