本文整理汇总了Golang中qiniu/log.Debug函数的典型用法代码示例。如果您正苦于以下问题:Golang Debug函数的具体用法?Golang Debug怎么用?Golang Debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Debug函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Cache
func (this *DirCache) Cache(cacheRootPath string, cacheResultFile string) (fileCount int64) {
if _, err := os.Stat(cacheResultFile); err != nil {
log.Debug(fmt.Sprintf("No cache file `%s' found, will create one", cacheResultFile))
} else {
os.Remove(cacheResultFile + ".old")
if rErr := os.Rename(cacheResultFile, cacheResultFile+".old"); rErr != nil {
log.Error(fmt.Sprintf("Unable to rename cache file, plz manually delete `%s' and `%s.old'",
cacheResultFile, cacheResultFile))
log.Error(rErr)
return
}
}
cacheResultFileH, err := os.OpenFile(cacheResultFile, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666)
if err != nil {
log.Error(fmt.Sprintf("Failed to open cache file `%s'", cacheResultFile))
return
}
defer cacheResultFileH.Close()
bWriter := bufio.NewWriter(cacheResultFileH)
walkStart := time.Now()
log.Debug(fmt.Sprintf("Walk `%s' start from `%s'", cacheRootPath, walkStart.String()))
filepath.Walk(cacheRootPath, func(path string, fi os.FileInfo, err error) error {
var retErr error
//log.Debug(fmt.Sprintf("Walking through `%s'", cacheRootPath))
if err != nil {
retErr = err
} else {
if !fi.IsDir() {
relPath := strings.TrimPrefix(strings.TrimPrefix(path, cacheRootPath), string(os.PathSeparator))
fsize := fi.Size()
//Unit is 100ns
flmd := fi.ModTime().UnixNano() / 100
//log.Debug(fmt.Sprintf("Hit file `%s' size: `%d' mode time: `%d`", relPath, fsize, flmd))
fmeta := fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", relPath, fsize, flmd))
if _, err := bWriter.WriteString(fmeta); err != nil {
log.Error(fmt.Sprintf("Failed to write data `%s' to cache file", fmeta))
retErr = err
}
fileCount += 1
}
}
return retErr
})
if err := bWriter.Flush(); err != nil {
log.Error(fmt.Sprintf("Failed to flush to cache file `%s'", cacheResultFile))
}
walkEnd := time.Now()
log.Debug(fmt.Sprintf("Walk `%s' end at `%s'", cacheRootPath, walkEnd.String()))
log.Debug(fmt.Sprintf("Walk `%s' last for `%s'", cacheRootPath, time.Since(walkStart)))
return
}
示例2: Cache
func (this *DirCache) Cache(cacheRootPath string, cacheResultFile string) (fileCount int64) {
cacheResultFileH, err := os.Create(cacheResultFile)
if err != nil {
log.Errorf("Failed to open cache file `%s'", cacheResultFile)
return
}
defer cacheResultFileH.Close()
bWriter := bufio.NewWriter(cacheResultFileH)
walkStart := time.Now()
log.Debug(fmt.Sprintf("Walk `%s' start from `%s'", cacheRootPath, walkStart.String()))
filepath.Walk(cacheRootPath, func(path string, fi os.FileInfo, err error) error {
var retErr error
//log.Debug(fmt.Sprintf("Walking through `%s'", cacheRootPath))
if err != nil {
retErr = err
} else {
if !fi.IsDir() {
relPath := strings.TrimPrefix(strings.TrimPrefix(path, cacheRootPath), string(os.PathSeparator))
fsize := fi.Size()
//Unit is 100ns
flmd := fi.ModTime().UnixNano() / 100
//log.Debug(fmt.Sprintf("Hit file `%s' size: `%d' mode time: `%d`", relPath, fsize, flmd))
fmeta := fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", relPath, fsize, flmd))
if _, err := bWriter.WriteString(fmeta); err != nil {
log.Errorf("Failed to write data `%s' to cache file", fmeta)
retErr = err
}
fileCount += 1
}
}
return retErr
})
if err := bWriter.Flush(); err != nil {
log.Errorf("Failed to flush to cache file `%s'", cacheResultFile)
}
walkEnd := time.Now()
log.Debug(fmt.Sprintf("Walk `%s' end at `%s'", cacheRootPath, walkEnd.String()))
log.Debug(fmt.Sprintf("Walk `%s' last for `%s'", cacheRootPath, time.Since(walkStart)))
return
}
示例3: ListBucket
func (this *AliListBucket) ListBucket(listResultFile string) (err error) {
//open result file
fp, openErr := os.Create(listResultFile)
if openErr != nil {
err = openErr
return
}
defer fp.Close()
bw := bufio.NewWriter(fp)
//list bucket by prefix
marker := ""
prefixLen := len(this.Prefix)
ossClient := oss.NewClient(this.DataCenter, this.AccessKeyId, this.AccessKeySecret, 0)
maxRetryTimes := 5
retryTimes := 1
log.Info("Listing the oss bucket...")
for {
lbr, lbrErr := ossClient.GetBucket(this.Bucket, this.Prefix, marker, "", "")
if lbrErr != nil {
err = lbrErr
log.Error("Parse list result error, ", "marker=[", marker, "]", lbrErr)
if retryTimes <= maxRetryTimes {
log.Debug("Retry marker=", marker, "] for ", retryTimes, "time...")
retryTimes += 1
continue
} else {
break
}
} else {
retryTimes = 1
}
for _, object := range lbr.Contents {
lmdTime, lmdPErr := time.Parse("2006-01-02T15:04:05.999Z", object.LastModified)
if lmdPErr != nil {
log.Error("Parse object last modified error, ", lmdPErr)
lmdTime = time.Now()
}
bw.WriteString(fmt.Sprintln(fmt.Sprintf("%s\t%d\t%d", object.Key[prefixLen:], object.Size, lmdTime.UnixNano()/100)))
}
if !lbr.IsTruncated {
break
}
marker = lbr.NextMarker
}
fErr := bw.Flush()
if fErr != nil {
log.Error("Write data to buffer writer failed", fErr)
err = fErr
return
}
return err
}
示例4: batchRename
func batchRename(client rs.Client, entries []qshell.RenameEntryPath) {
ret, err := qshell.BatchRename(client, entries)
if err != nil {
fmt.Println("Batch rename error", err)
}
if len(ret) > 0 {
for i, entry := range entries {
item := ret[i]
if item.Data.Error != "" {
log.Errorf("Rename '%s' => '%s' Failed, Code :%d", entry.OldKey, entry.NewKey, item.Code)
} else {
log.Debug(fmt.Sprintf("Rename '%s' => '%s' Success, Code :%d", entry.OldKey, entry.NewKey, item.Code))
}
}
}
}
示例5: batchChgm
func batchChgm(client rs.Client, entries []qshell.ChgmEntryPath) {
ret, err := qshell.BatchChgm(client, entries)
if err != nil {
fmt.Println("Batch chgm error", err)
}
if len(ret) > 0 {
for i, entry := range entries {
item := ret[i]
if item.Data.Error != "" {
log.Errorf("Chgm '%s' => '%s' Failed, Code :%d", entry.Key, entry.MimeType, item.Code)
} else {
log.Debug(fmt.Sprintf("Chgm '%s' => '%s' Success, Code :%d", entry.Key, entry.MimeType, item.Code))
}
}
}
}
示例6: batchDelete
func batchDelete(client rs.Client, entries []rs.EntryPath) {
ret, err := qshell.BatchDelete(client, entries)
if err != nil {
fmt.Println("Batch delete error", err)
}
if len(ret) > 0 {
for i, entry := range entries {
item := ret[i]
if item.Data.Error != "" {
log.Errorf("Delete '%s' => '%s' Failed, Code: %d", entry.Bucket, entry.Key, item.Code)
} else {
log.Debug(fmt.Sprintf("Delete '%s' => '%s' Success, Code: %d", entry.Bucket, entry.Key, item.Code))
}
}
}
}
示例7: batchCopy
func batchCopy(client rs.Client, entries []qshell.CopyEntryPath) {
ret, err := qshell.BatchCopy(client, entries)
if err != nil {
fmt.Println("Batch move error", err)
}
if len(ret) > 0 {
for i, entry := range entries {
item := ret[i]
if item.Data.Error != "" {
log.Errorf("Copy '%s:%s' => '%s:%s' Failed, Code :%d",
entry.SrcBucket, entry.SrcKey, entry.DestBucket, entry.DestKey, item.Code)
} else {
log.Debug(fmt.Sprintf("Copy '%s:%s' => '%s:%s' Success, Code :%d",
entry.SrcBucket, entry.SrcKey, entry.DestBucket, entry.DestKey, item.Code))
}
}
}
}
示例8: put
func put(c rpc.Client, l rpc.Logger, ret interface{}, key string, hasKey bool, f io.ReaderAt, fsize int64, extra *PutExtra) error {
once.Do(initWorkers)
blockCnt := BlockCount(fsize)
if extra == nil {
extra = new(PutExtra)
}
//load the progress file
var progressWLock = sync.RWMutex{}
if extra.ProgressFile != "" {
progressRecord := ProgressRecord{}
if _, pStatErr := os.Stat(extra.ProgressFile); pStatErr == nil {
progressFp, openErr := os.Open(extra.ProgressFile)
if openErr == nil {
func() {
defer progressFp.Close()
decoder := json.NewDecoder(progressFp)
decodeErr := decoder.Decode(&progressRecord)
if decodeErr != nil {
log.Debug(fmt.Sprintf("resumable.Put decode progess record error, %s", decodeErr.Error()))
}
}()
} else {
log.Debug(fmt.Sprintf("resumable.Put open progress record error, %s", openErr.Error()))
}
}
//load in progresses
if progressRecord.Progresses != nil && len(progressRecord.Progresses) > 0 {
//check the expire date of the first progress
now := time.Now()
first := progressRecord.Progresses[0]
if now.Add(time.Hour*24).Unix() <= first.ExpiredAt {
//not expired, go ahead
extra.Progresses = progressRecord.Progresses
}
}
}
if extra.Progresses == nil {
extra.Progresses = make([]BlkputRet, blockCnt)
} else if len(extra.Progresses) != blockCnt {
return ErrInvalidPutProgress
}
if extra.ChunkSize == 0 {
extra.ChunkSize = settings.ChunkSize
}
if extra.TryTimes == 0 {
extra.TryTimes = settings.TryTimes
}
if extra.Notify == nil {
extra.Notify = notifyNil
}
if extra.NotifyErr == nil {
extra.NotifyErr = notifyErrNil
}
var wg sync.WaitGroup
wg.Add(blockCnt)
last := blockCnt - 1
blkSize := 1 << blockBits
nfails := 0
for i := 0; i < blockCnt; i++ {
blkIdx := i
blkSize1 := blkSize
if i == last {
offbase := int64(blkIdx) << blockBits
blkSize1 = int(fsize - offbase)
}
task := func() {
defer wg.Done()
tryTimes := extra.TryTimes
lzRetry:
err := ResumableBlockput(c, l, &extra.Progresses[blkIdx], f, blkIdx, blkSize1, extra)
if err != nil {
if tryTimes > 1 {
tryTimes--
log.Info("resumable.Put retrying ...")
goto lzRetry
}
log.Warn("resumable.Put", blkIdx, "failed:", err)
extra.NotifyErr(blkIdx, blkSize1, err)
nfails++
} else {
//record block progress
if extra.ProgressFile != "" {
progressWLock.Lock()
func() {
defer progressWLock.Unlock()
progressRecord := ProgressRecord{
Progresses: extra.Progresses,
}
mData, mErr := json.Marshal(progressRecord)
//.........这里部分代码省略.........
示例9: QiniuUpload
func QiniuUpload(threadCount int, uploadConfig *UploadConfig) {
timeStart := time.Now()
//make SrcDir the full path
uploadConfig.SrcDir, _ = filepath.Abs(uploadConfig.SrcDir)
dirCache := DirCache{}
pathSep := string(os.PathSeparator)
//create job id
jobId := Md5Hex(fmt.Sprintf("%s:%s", uploadConfig.SrcDir, uploadConfig.Bucket))
//local storage path
storePath := filepath.Join(".qshell", "qupload", jobId)
if err := os.MkdirAll(storePath, 0775); err != nil {
log.Errorf("Failed to mkdir `%s' due to `%s'", storePath, err)
return
}
//cache file
rescanLocalDir := false
cacheResultName := filepath.Join(storePath, jobId+".cache")
cacheTempName := filepath.Join(storePath, jobId+".cache.temp")
cacheCountName := filepath.Join(storePath, jobId+".count")
if _, statErr := os.Stat(cacheResultName); statErr == nil {
//file already exists
rescanLocalDir = uploadConfig.RescanLocal
} else {
rescanLocalDir = true
}
var totalFileCount int64
if rescanLocalDir {
fmt.Println("Listing local sync dir, this can take a long time, please wait patiently...")
totalFileCount = dirCache.Cache(uploadConfig.SrcDir, cacheTempName)
if rErr := os.Remove(cacheResultName); rErr != nil {
log.Debug("Remove the old cached file error", rErr)
}
if rErr := os.Rename(cacheTempName, cacheResultName); rErr != nil {
fmt.Println("Rename the temp cached file error", rErr)
return
}
//write the total count to local file
if cFp, cErr := os.Create(cacheCountName); cErr == nil {
func() {
defer cFp.Close()
uploadInfo := UploadInfo{
TotalFileCount: totalFileCount,
}
uploadInfoBytes, mErr := json.Marshal(&uploadInfo)
if mErr == nil {
if _, wErr := cFp.Write(uploadInfoBytes); wErr != nil {
log.Errorf("Write local cached count file error %s", cErr)
} else {
cFp.Close()
}
}
}()
} else {
log.Errorf("Open local cached count file error %s", cErr)
}
} else {
fmt.Println("Use the last cached local sync dir file list ...")
//read from local cache
if rFp, rErr := os.Open(cacheCountName); rErr == nil {
func() {
defer rFp.Close()
uploadInfo := UploadInfo{}
decoder := json.NewDecoder(rFp)
if dErr := decoder.Decode(&uploadInfo); dErr == nil {
totalFileCount = uploadInfo.TotalFileCount
}
}()
} else {
log.Warnf("Open local cached count file error %s", rErr)
}
}
//leveldb folder
leveldbFileName := filepath.Join(storePath, jobId+".ldb")
ldb, err := leveldb.OpenFile(leveldbFileName, nil)
if err != nil {
log.Errorf("Open leveldb `%s' failed due to `%s'", leveldbFileName, err)
return
}
defer ldb.Close()
//sync
ufp, err := os.Open(cacheResultName)
if err != nil {
log.Errorf("Open cache file `%s' failed due to `%s'", cacheResultName, err)
return
}
defer ufp.Close()
bScanner := bufio.NewScanner(ufp)
bScanner.Split(bufio.ScanLines)
var currentFileCount int64 = 0
var successFileCount int64 = 0
var failureFileCount int64 = 0
//.........这里部分代码省略.........
示例10: List
func (this *ListBucket) List(bucket string, prefix string, listResultFile string) (retErr error) {
var fp *os.File
if listResultFile == "stdout" {
fp = os.Stdout
} else {
var openErr error
fp, openErr = os.Create(listResultFile)
if openErr != nil {
retErr = openErr
log.Errorf("Failed to open list result file `%s'", listResultFile)
return
}
}
defer fp.Close()
bw := bufio.NewWriter(fp)
mac := digest.Mac{this.AccessKey, []byte(this.SecretKey)}
client := rsf.New(&mac)
marker := ""
limit := 1000
run := true
maxRetryTimes := 5
retryTimes := 1
for run {
entries, markerOut, err := client.ListPrefix(nil, bucket, prefix, marker, limit)
if err != nil {
if err == io.EOF {
run = false
} else {
log.Errorf("List error for marker `%s', %s", marker, err)
if retryTimes <= maxRetryTimes {
log.Debug(fmt.Sprintf("Retry list for marker `%s' for `%d' time", marker, retryTimes))
retryTimes += 1
continue
} else {
log.Errorf("List failed too many times for `%s'", marker)
break
}
}
} else {
retryTimes = 1
if markerOut == "" {
run = false
} else {
marker = markerOut
}
}
//append entries
for _, entry := range entries {
lineData := fmt.Sprintf("%s\t%d\t%s\t%d\t%s\t%s\r\n", entry.Key, entry.Fsize, entry.Hash, entry.PutTime, entry.MimeType, entry.EndUser)
_, wErr := bw.WriteString(lineData)
if wErr != nil {
log.Errorf("Write line data `%s' to list result file failed.", lineData)
}
}
fErr := bw.Flush()
if fErr != nil {
log.Error("Flush data to list result file error", err)
}
}
return
}
示例11: Unzip
func Unzip(zipFilePath string, unzipPath string) (err error) {
zipReader, zipErr := zip.OpenReader(zipFilePath)
if zipErr != nil {
err = errors.New(fmt.Sprintf("Open zip file error, %s", zipErr))
return
}
defer zipReader.Close()
zipFiles := zipReader.File
//list dir
for _, zipFile := range zipFiles {
fileInfo := zipFile.FileHeader.FileInfo()
fileName := zipFile.FileHeader.Name
//check charset utf8 or gbk
if !utf8.Valid([]byte(fileName)) {
fileName, err = gbk2Utf8(fileName)
if err != nil {
err = errors.New("Unsupported filename encoding")
continue
}
}
fullPath := filepath.Join(unzipPath, fileName)
if fileInfo.IsDir() {
log.Debug("Mkdir", fullPath)
mErr := os.MkdirAll(fullPath, 0775)
if mErr != nil {
err = errors.New(fmt.Sprintf("Mkdir error, %s", mErr))
continue
}
}
}
//list file
for _, zipFile := range zipFiles {
fileInfo := zipFile.FileHeader.FileInfo()
fileName := zipFile.FileHeader.Name
//check charset utf8 or gbk
if !utf8.Valid([]byte(fileName)) {
fileName, err = gbk2Utf8(fileName)
if err != nil {
err = errors.New("Unsupported filename encoding")
continue
}
}
fullPath := filepath.Join(unzipPath, fileName)
if !fileInfo.IsDir() {
//to be compatible with pkzip(4.5)
lastIndex := strings.LastIndex(fullPath, string(os.PathSeparator))
if lastIndex != -1 {
fullPathDir := fullPath[:lastIndex]
mErr := os.MkdirAll(fullPathDir, 0775)
if mErr != nil {
err = errors.New(fmt.Sprintf("Mkdir error, %s", mErr))
continue
}
}
log.Debug("Creating file", fullPath)
localFp, openErr := os.OpenFile(fullPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
if openErr != nil {
err = errors.New(fmt.Sprintf("Open local file error, %s", openErr))
continue
}
defer localFp.Close()
zipFp, openErr := zipFile.Open()
if openErr != nil {
err = errors.New(fmt.Sprintf("Read zip content error, %s", openErr))
continue
}
defer zipFp.Close()
_, wErr := io.Copy(localFp, zipFp)
if wErr != nil {
err = errors.New(fmt.Sprintf("Save zip content error, %s", wErr))
continue
}
}
}
return
}