本文整理匯總了Golang中github.com/github/git-lfs/lfs.IsFatalError函數的典型用法代碼示例。如果您正苦於以下問題:Golang IsFatalError函數的具體用法?Golang IsFatalError怎麽用?Golang IsFatalError使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了IsFatalError函數的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: uploadPointers
func uploadPointers(pointers []*lfs.WrappedPointer) *lfs.TransferQueue {
totalSize := int64(0)
for _, p := range pointers {
totalSize += p.Size
}
skipObjects := prePushCheckForMissingObjects(pointers)
uploadQueue := lfs.NewUploadQueue(len(pointers), totalSize, pushDryRun)
for i, pointer := range pointers {
if pushDryRun {
Print("push %s => %s", pointer.Oid, pointer.Name)
continue
}
if _, skip := skipObjects[pointer.Oid]; skip {
// object missing locally but on server, don't bother
continue
}
tracerx.Printf("prepare upload: %s %s %d/%d", pointer.Oid, pointer.Name, i+1, len(pointers))
u, err := lfs.NewUploadable(pointer.Oid, pointer.Name)
if err != nil {
if Debugging || lfs.IsFatalError(err) {
Panic(err, err.Error())
} else {
Exit(err.Error())
}
}
uploadQueue.Add(u)
}
return uploadQueue
}
示例2: uploadsWithObjectIDs
func uploadsWithObjectIDs(oids []string) *lfs.TransferQueue {
uploads := []*lfs.Uploadable{}
totalSize := int64(0)
for i, oid := range oids {
if pushDryRun {
Print("push object ID %s", oid)
continue
}
tracerx.Printf("prepare upload: %s %d/%d", oid, i+1, len(oids))
u, err := lfs.NewUploadable(oid, "")
if err != nil {
if Debugging || lfs.IsFatalError(err) {
Panic(err, err.Error())
} else {
Exit(err.Error())
}
}
uploads = append(uploads, u)
}
uploadQueue := lfs.NewUploadQueue(len(oids), totalSize, pushDryRun)
for _, u := range uploads {
uploadQueue.Add(u)
}
return uploadQueue
}
示例3: uploadPointers
func uploadPointers(pointers []*lfs.WrappedPointer) *lfs.TransferQueue {
totalSize := int64(0)
for _, p := range pointers {
totalSize += p.Size
}
uploadQueue := lfs.NewUploadQueue(len(pointers), totalSize, pushDryRun)
for i, pointer := range pointers {
if pushDryRun {
Print("push %s [%s]", pointer.Name, pointer.Oid)
continue
}
tracerx.Printf("prepare upload: %s %s %d/%d", pointer.Oid, pointer.Name, i+1, len(pointers))
u, err := lfs.NewUploadable(pointer.Oid, pointer.Name)
if err != nil {
if Debugging || lfs.IsFatalError(err) {
Panic(err, err.Error())
} else {
Exit(err.Error())
}
}
uploadQueue.Add(u)
}
return uploadQueue
}
示例4: ExitWithError
func ExitWithError(err error) {
if Debugging || lfs.IsFatalError(err) {
Panic(err, err.Error())
} else {
if inner := lfs.GetInnerError(err); inner != nil {
Error(inner.Error())
}
Exit(err.Error())
}
}
示例5: buildTestData
func buildTestData() (oidsExist, oidsMissing []TestObject, err error) {
const oidCount = 50
oidsExist = make([]TestObject, 0, oidCount)
oidsMissing = make([]TestObject, 0, oidCount)
// Build test data for existing files & upload
// Use test repo for this to simplify the process of making sure data matches oid
// We're not performing a real test at this point (although an upload fail will break it)
var callback testDataCallback
repo := test.NewRepo(&callback)
repo.Pushd()
defer repo.Cleanup()
// just one commit
commit := test.CommitInput{CommitterName: "A N Other", CommitterEmail: "[email protected]"}
var totalSize int64
for i := 0; i < oidCount; i++ {
filename := fmt.Sprintf("file%d.dat", i)
sz := int64(rand.Intn(200)) + 50
commit.Files = append(commit.Files, &test.FileInput{Filename: filename, Size: sz})
totalSize += sz
}
outputs := repo.AddCommits([]*test.CommitInput{&commit})
// now upload
uploadQueue := lfs.NewUploadQueue(len(oidsExist), totalSize, false)
for _, f := range outputs[0].Files {
oidsExist = append(oidsExist, TestObject{Oid: f.Oid, Size: f.Size})
u, err := lfs.NewUploadable(f.Oid, "Test file")
if err != nil {
return nil, nil, err
}
uploadQueue.Add(u)
}
uploadQueue.Wait()
for _, err := range uploadQueue.Errors() {
if lfs.IsFatalError(err) {
exit("Fatal error setting up test data: %s", err)
}
}
// Generate SHAs for missing files, random but repeatable
// No actual file content needed for these
rand.Seed(int64(oidCount))
runningSha := sha256.New()
for i := 0; i < oidCount; i++ {
runningSha.Write([]byte{byte(rand.Intn(256))})
oid := hex.EncodeToString(runningSha.Sum(nil))
sz := int64(rand.Intn(200)) + 50
oidsMissing = append(oidsMissing, TestObject{Oid: oid, Size: sz})
}
return oidsExist, oidsMissing, nil
}
示例6: fetchAndReportToChan
// Fetch and report completion of each OID to a channel (optional, pass nil to skip)
func fetchAndReportToChan(pointers []*lfs.WrappedPointer, include, exclude []string, out chan<- *lfs.WrappedPointer) {
totalSize := int64(0)
for _, p := range pointers {
totalSize += p.Size
}
q := lfs.NewDownloadQueue(len(pointers), totalSize, false)
for _, p := range pointers {
// Only add to download queue if local file is not the right size already
// This avoids previous case of over-reporting a requirement for files we already have
// which would only be skipped by PointerSmudgeObject later
passFilter := lfs.FilenamePassesIncludeExcludeFilter(p.Name, include, exclude)
if !lfs.ObjectExistsOfSize(p.Oid, p.Size) && passFilter {
tracerx.Printf("fetch %v [%v]", p.Name, p.Oid)
q.Add(lfs.NewDownloadable(p))
} else {
if !passFilter {
tracerx.Printf("Skipping %v [%v], include/exclude filters applied", p.Name, p.Oid)
} else {
tracerx.Printf("Skipping %v [%v], already exists", p.Name, p.Oid)
}
// If we already have it, or it won't be fetched
// report it to chan immediately to support pull/checkout
if out != nil {
out <- p
}
}
}
if out != nil {
dlwatch := q.Watch()
go func() {
// fetch only reports single OID, but OID *might* be referenced by multiple
// WrappedPointers if same content is at multiple paths, so map oid->slice
oidToPointers := make(map[string][]*lfs.WrappedPointer, len(pointers))
for _, pointer := range pointers {
plist := oidToPointers[pointer.Oid]
oidToPointers[pointer.Oid] = append(plist, pointer)
}
for oid := range dlwatch {
plist, ok := oidToPointers[oid]
if !ok {
continue
}
for _, p := range plist {
out <- p
}
}
close(out)
}()
}
processQueue := time.Now()
q.Wait()
tracerx.PerformanceSince("process queue", processQueue)
for _, err := range q.Errors() {
if Debugging || lfs.IsFatalError(err) {
LoggedError(err, err.Error())
} else {
Error(err.Error())
}
}
}
示例7: pushCommand
// pushCommand pushes local objects to a Git LFS server. It takes two
// arguments:
//
// `<remote> <remote ref>`
//
// Both a remote name ("origin") or a remote URL are accepted.
//
// pushCommand calculates the git objects to send by looking comparing the range
// of commits between the local and remote git servers.
func pushCommand(cmd *cobra.Command, args []string) {
var uploadQueue *lfs.TransferQueue
if len(args) == 0 {
Print("Specify a remote and a remote branch name (`git lfs push origin master`)")
os.Exit(1)
}
lfs.Config.CurrentRemote = args[0]
if useStdin {
requireStdin("Run this command from the Git pre-push hook, or leave the --stdin flag off.")
// called from a pre-push hook! Update the existing pre-push hook if it's
// one that git-lfs set.
lfs.InstallHooks(false)
refsData, err := ioutil.ReadAll(os.Stdin)
if err != nil {
Panic(err, "Error reading refs on stdin")
}
if len(refsData) == 0 {
return
}
left, right := decodeRefs(string(refsData))
if left == pushDeleteBranch {
return
}
uploadQueue = uploadsBetweenRefs(left, right)
} else if pushObjectIDs {
if len(args) < 2 {
Print("Usage: git lfs push --object-id <remote> <lfs-object-id> [lfs-object-id] ...")
return
}
uploadQueue = uploadsWithObjectIDs(args[1:])
} else {
if len(args) < 1 {
Print("Usage: git lfs push --dry-run <remote> [ref]")
return
}
uploadQueue = uploadsBetweenRefAndRemote(args[0], args[1:])
}
if !pushDryRun {
uploadQueue.Wait()
for _, err := range uploadQueue.Errors() {
if Debugging || lfs.IsFatalError(err) {
LoggedError(err, err.Error())
} else {
Error(err.Error())
}
}
if len(uploadQueue.Errors()) > 0 {
os.Exit(2)
}
}
}
示例8: prePushCommand
// prePushCommand is run through Git's pre-push hook. The pre-push hook passes
// two arguments on the command line:
//
// 1. Name of the remote to which the push is being done
// 2. URL to which the push is being done
//
// The hook receives commit information on stdin in the form:
// <local ref> <local sha1> <remote ref> <remote sha1>
//
// In the typical case, prePushCommand will get a list of git objects being
// pushed by using the following:
//
// git rev-list --objects <local sha1> ^<remote sha1>
//
// If any of those git objects are associated with Git LFS objects, those
// objects will be pushed to the Git LFS API.
//
// In the case of pushing a new branch, the list of git objects will be all of
// the git objects in this branch.
//
// In the case of deleting a branch, no attempts to push Git LFS objects will be
// made.
func prePushCommand(cmd *cobra.Command, args []string) {
var left, right string
if len(args) == 0 {
Print("This should be run through Git's pre-push hook. Run `git lfs update` to install it.")
os.Exit(1)
}
lfs.Config.CurrentRemote = args[0]
refsData, err := ioutil.ReadAll(os.Stdin)
if err != nil {
Panic(err, "Error reading refs on stdin")
}
if len(refsData) == 0 {
return
}
left, right = decodeRefs(string(refsData))
if left == prePushDeleteBranch {
return
}
// Just use scanner here
scanOpt := &lfs.ScanRefsOptions{ScanMode: lfs.ScanLeftToRemoteMode, RemoteName: lfs.Config.CurrentRemote}
pointers, err := lfs.ScanRefs(left, right, scanOpt)
if err != nil {
Panic(err, "Error scanning for Git LFS files")
}
totalSize := int64(0)
for _, p := range pointers {
totalSize += p.Size
}
// Objects to skip because they're missing locally but on server
var skipObjects map[string]struct{}
if !prePushDryRun {
// Do this as a pre-flight check since upload queue starts immediately
skipObjects = prePushCheckForMissingObjects(pointers)
}
uploadQueue := lfs.NewUploadQueue(len(pointers), totalSize, prePushDryRun)
for _, pointer := range pointers {
if prePushDryRun {
Print("push %s [%s]", pointer.Name, pointer.Oid)
continue
}
if _, skip := skipObjects[pointer.Oid]; skip {
// object missing locally but on server, don't bother
continue
}
u, err := lfs.NewUploadable(pointer.Oid, pointer.Name)
if err != nil {
if lfs.IsCleanPointerError(err) {
Exit(prePushMissingErrMsg, pointer.Name, lfs.ErrorGetContext(err, "pointer").(*lfs.Pointer).Oid)
} else if Debugging || lfs.IsFatalError(err) {
Panic(err, err.Error())
} else {
Exit(err.Error())
}
}
uploadQueue.Add(u)
}
if !prePushDryRun {
uploadQueue.Wait()
for _, err := range uploadQueue.Errors() {
if Debugging || lfs.IsFatalError(err) {
LoggedError(err, err.Error())
} else {
Error(err.Error())
//.........這裏部分代碼省略.........
示例9: prePushRef
func prePushRef(left, right string) {
// Just use scanner here
scanOpt := lfs.NewScanRefsOptions()
scanOpt.ScanMode = lfs.ScanLeftToRemoteMode
scanOpt.RemoteName = lfs.Config.CurrentRemote
pointers, err := lfs.ScanRefs(left, right, scanOpt)
if err != nil {
Panic(err, "Error scanning for Git LFS files")
}
totalSize := int64(0)
for _, p := range pointers {
totalSize += p.Size
}
// Objects to skip because they're missing locally but on server
var skipObjects lfs.StringSet
if !prePushDryRun {
// Do this as a pre-flight check since upload queue starts immediately
skipObjects = prePushCheckForMissingObjects(pointers)
}
uploadQueue := lfs.NewUploadQueue(len(pointers), totalSize, prePushDryRun)
for _, pointer := range pointers {
if prePushDryRun {
Print("push %s => %s", pointer.Oid, pointer.Name)
continue
}
if skipObjects.Contains(pointer.Oid) {
// object missing locally but on server, don't bother
continue
}
u, err := lfs.NewUploadable(pointer.Oid, pointer.Name)
if err != nil {
if lfs.IsCleanPointerError(err) {
Exit(prePushMissingErrMsg, pointer.Name, lfs.ErrorGetContext(err, "pointer").(*lfs.Pointer).Oid)
} else if Debugging || lfs.IsFatalError(err) {
Panic(err, err.Error())
} else {
Exit(err.Error())
}
}
uploadQueue.Add(u)
}
if !prePushDryRun {
uploadQueue.Wait()
for _, err := range uploadQueue.Errors() {
if Debugging || lfs.IsFatalError(err) {
LoggedError(err, err.Error())
} else {
Error(err.Error())
}
}
if len(uploadQueue.Errors()) > 0 {
os.Exit(2)
}
}
}