本文整理匯總了Golang中github.com/MG-RAST/AWE/lib/core.Workunit.Path方法的典型用法代碼示例。如果您正苦於以下問題:Golang Workunit.Path方法的具體用法?Golang Workunit.Path怎麽用?Golang Workunit.Path使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/MG-RAST/AWE/lib/core.Workunit
的用法示例。
在下文中一共展示了Workunit.Path方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: moveInputData
//fetch input data
func moveInputData(work *core.Workunit) (size int64, err error) {
for _, io := range work.Inputs {
inputname := io.FileName
dataUrl, uerr := io.DataUrl()
if uerr != nil {
return 0, uerr
}
if work.Rank > 0 {
dataUrl = fmt.Sprintf("%s&index=%s&part=%s", dataUrl, work.IndexType(), work.Part())
}
inputFilePath := path.Join(work.Path(), inputname)
logger.Debug(2, "mover: fetching input from url:"+dataUrl)
logger.Event(event.FILE_IN, "workid="+work.Id+" url="+dataUrl)
// this gets file from any downloadable url, not just shock
if datamoved, _, err := shock.FetchFile(inputFilePath, dataUrl, work.Info.DataToken, io.Uncompress, false); err != nil {
return size, err
} else {
size += datamoved
}
logger.Event(event.FILE_READY, "workid="+work.Id+";url="+dataUrl)
}
return
}
示例2: movePreData
//fetch prerequisite data (e.g. reference dbs)
func movePreData(workunit *core.Workunit) (size int64, err error) {
for name, io := range workunit.Predata {
predata_directory := path.Join(conf.DATA_PATH, "predata")
err = os.MkdirAll(predata_directory, 755)
if err != nil {
return 0, errors.New("error creating predata_directory: " + err.Error())
}
file_path := path.Join(predata_directory, name)
if !isFileExisting(file_path) {
size, err = shock.FetchFile(file_path, io.Url, workunit.Info.DataToken, io.Uncompress)
if err != nil {
return 0, errors.New("error in fetchFile:" + err.Error())
}
}
use_symlink := false
linkname := path.Join(workunit.Path(), name)
if workunit.Cmd.Dockerimage != "" || strings.HasPrefix(workunit.Cmd.Name, "app:") { // TODO need more save way to detect use of docker
use_symlink = false // TODO mechanism
if use_symlink {
file_path = path.Join(conf.DOCKER_WORKUNIT_PREDATA_DIR, name)
// some tasks want to write in predata dir, thus need symlink
logger.Debug(1, "dangling symlink:"+linkname+" -> "+file_path)
// creation of dangling symlinks is not possible with with os.Symlink, thus use system ln
link_out, err := exec.Command("ln", "-s", file_path, linkname).CombinedOutput()
logger.Debug(1, fmt.Sprintf("ln returned %s", link_out))
if err != nil {
return 0, errors.New("error creating predata file symlink (dangling version): " + err.Error())
}
} else {
// some programs do not accept symlinks (e.g. emirge), need to copy the file into the work directory
// linkname refers to target file now.
logger.Debug(1, "copy predata:"+file_path+" -> "+linkname)
_, err := shock.CopyFile(file_path, linkname)
if err != nil {
return 0, fmt.Errorf("error copying file from %s to % s: ", file_path, linkname, err.Error())
}
}
} else {
//linkname := path.Join(workunit.Path(), name)
logger.Debug(1, "symlink:"+linkname+" -> "+file_path)
err = os.Symlink(file_path, linkname)
if err != nil {
return 0, errors.New("error creating predata file symlink: " + err.Error())
}
}
}
return
}
示例3: movePreData
//fetch prerequisite data (e.g. reference dbs)
func movePreData(workunit *core.Workunit) (err error) {
for name, io := range workunit.Predata {
file_path := fmt.Sprintf("%s/%s", conf.DATA_PATH, name)
if !isFileExisting(file_path) {
if err = fetchFile(file_path, io.Url, ""); err != nil {
return
}
}
//make a link in work dir to predata in conf.DATA_PATH
linkname := fmt.Sprintf("%s/%s", workunit.Path(), name)
fmt.Printf(linkname + " -> " + file_path + "\n")
os.Symlink(file_path, linkname)
}
return
}
示例4: ParseWorkunitArgs
//parse workunit, fetch input data, compose command arguments
func ParseWorkunitArgs(work *core.Workunit) (err error) {
args := []string{}
argstr := work.Cmd.Args
if argstr == "" {
return
}
argList := parse_arg_string(argstr)
for _, arg := range argList {
match, err := regexp.Match(`\$\{\w+\}`, []byte(arg))
if err == nil && match { //replace environment variable with its value
reg := regexp.MustCompile(`\$\{\w+\}`)
vabs := reg.FindAll([]byte(arg), -1)
parsedArg := arg
for _, vab := range vabs {
vb := bytes.TrimPrefix(vab, []byte("${"))
vb = bytes.TrimSuffix(vb, []byte("}"))
envvalue := os.Getenv(string(vb))
fmt.Printf("%s=%s\n", vb, envvalue)
parsedArg = strings.Replace(parsedArg, string(vab), envvalue, 1)
}
args = append(args, parsedArg)
continue
}
if strings.Contains(arg, "@") { //parse input/output to accessible local file
segs := strings.Split(arg, "@")
if len(segs) > 2 {
return errors.New("invalid format in command args, multiple @ within one arg")
}
inputname := segs[1]
if work.Inputs.Has(inputname) {
inputFilePath := path.Join(work.Path(), inputname)
parsedArg := fmt.Sprintf("%s%s", segs[0], inputFilePath)
args = append(args, parsedArg)
}
continue
}
//no @ or $, append directly
args = append(args, arg)
}
work.Cmd.ParsedArgs = args
work.State = core.WORK_STAT_PREPARED
return nil
}
示例5: ParseWorkunitArgs
//parse workunit, fetch input data, compose command arguments
func ParseWorkunitArgs(work *core.Workunit) (args []string, err error) {
argstr := work.Cmd.Args
if argstr == "" {
return
}
argList := strings.Fields(argstr)
inputsMap := work.Inputs
for _, arg := range argList {
if strings.Contains(arg, "@") { //parse input/output to accessible local file
segs := strings.Split(arg, "@")
if len(segs) > 2 {
return []string{}, errors.New("invalid format in command args, multiple @ within one arg")
}
inputname := segs[1]
if inputsMap.Has(inputname) {
io := inputsMap[inputname]
var dataUrl string
if work.Rank == 0 {
dataUrl = io.DataUrl()
} else {
dataUrl = fmt.Sprintf("%s&index=%s&part=%s", io.DataUrl(), work.IndexType(), work.Part())
}
inputFilePath := fmt.Sprintf("%s/%s", work.Path(), inputname)
logger.Debug(2, "mover: fetching input from url:"+dataUrl)
logger.Event(event.FILE_IN, "workid="+work.Id+" url="+dataUrl)
if err := fetchFile(inputFilePath, dataUrl, work.Info.DataToken); err != nil { //get file from Shock
return []string{}, err
}
logger.Event(event.FILE_READY, "workid="+work.Id+" url="+dataUrl)
parsedArg := fmt.Sprintf("%s%s", segs[0], inputFilePath)
args = append(args, parsedArg)
}
} else { //no @, has nothing to do with input/output, append directly
args = append(args, arg)
}
}
return args, nil
}
示例6: MoveInputData
//fetch input data
func MoveInputData(work *core.Workunit) (size int64, err error) {
for _, io := range work.Inputs {
inputname := io.FileName
// skip if NoFile == true
if !io.NoFile { // is file !
dataUrl, uerr := io.DataUrl()
if uerr != nil {
return 0, uerr
}
inputFilePath := fmt.Sprintf("%s/%s", work.Path(), inputname)
if work.Rank == 0 {
if conf.CACHE_ENABLED && io.Node != "" {
if file_path, err := StatCacheFilePath(io.Node); err == nil {
//make a link in work dir from cached file
linkname := fmt.Sprintf("%s/%s", work.Path(), inputname)
fmt.Printf("input found in cache, making link: " + file_path + " -> " + linkname + "\n")
err = os.Symlink(file_path, linkname)
if err == nil {
logger.Event(event.FILE_READY, "workid="+work.Id+";url="+dataUrl)
}
return 0, err
}
}
} else {
dataUrl = fmt.Sprintf("%s&index=%s&part=%s", dataUrl, work.IndexType(), work.Part())
}
logger.Debug(2, "mover: fetching input file from url:"+dataUrl)
logger.Event(event.FILE_IN, "workid="+work.Id+";url="+dataUrl)
// download file
if datamoved, _, err := shock.FetchFile(inputFilePath, dataUrl, work.Info.DataToken, io.Uncompress, false); err != nil {
return size, errors.New("shock.FetchFile returned: " + err.Error())
} else {
size += datamoved
}
logger.Event(event.FILE_READY, "workid="+work.Id+";url="+dataUrl)
}
// download node attributes if requested
if io.AttrFile != "" {
// get node
node, err := shock.ShockGet(io.Host, io.Node, work.Info.DataToken)
if err != nil {
//return size, err
return size, errors.New("shock.ShockGet (node attributes) returned: " + err.Error())
}
logger.Debug(2, "mover: fetching input attributes from node:"+node.Id)
logger.Event(event.ATTR_IN, "workid="+work.Id+";node="+node.Id)
// print node attributes
attrFilePath := fmt.Sprintf("%s/%s", work.Path(), io.AttrFile)
attr_json, _ := json.Marshal(node.Attributes)
if err := ioutil.WriteFile(attrFilePath, attr_json, 0644); err != nil {
return size, err
}
logger.Event(event.ATTR_READY, "workid="+work.Id+";path="+attrFilePath)
}
}
return
}
示例7: moveInputData
//fetch input data
func moveInputData(work *core.Workunit) (size int64, err error) {
for inputname, io := range work.Inputs {
var dataUrl string
if work.Rank == 0 {
dataUrl = io.DataUrl()
} else {
dataUrl = fmt.Sprintf("%s&index=%s&part=%s", io.DataUrl(), work.IndexType(), work.Part())
}
inputFilePath := path.Join(work.Path(), inputname)
logger.Debug(2, "mover: fetching input from url:"+dataUrl)
logger.Event(event.FILE_IN, "workid="+work.Id+" url="+dataUrl)
if datamoved, err := shock.FetchFile(inputFilePath, dataUrl, work.Info.DataToken, io.Uncompress); err != nil {
return size, err
} else {
size += datamoved
}
logger.Event(event.FILE_READY, "workid="+work.Id+";url="+dataUrl)
}
return
}
示例8: runPreWorkExecutionScript
func runPreWorkExecutionScript(work *core.Workunit) (err error) {
// conf.PreWorkScript is a string
// conf.PreWorkScriptArgs is a string array
args := conf.PRE_WORK_SCRIPT_ARGS
commandName := conf.PRE_WORK_SCRIPT
if commandName == "" {
return nil
}
cmd := exec.Command(commandName, args...)
msg := fmt.Sprintf("worker: start pre-work cmd=%s, args=%v", commandName, args)
fmt.Println(msg)
logger.Debug(1, msg)
logger.Event(event.PRE_WORK_START, "workid="+work.Id,
"pre-work cmd="+commandName,
fmt.Sprintf("args=%v", args))
var stdout, stderr io.ReadCloser
if conf.PRINT_APP_MSG {
stdout, err = cmd.StdoutPipe()
if err != nil {
return
}
stderr, err = cmd.StderrPipe()
if err != nil {
return
}
}
stdoutFilePath := fmt.Sprintf("%s/%s", work.Path(), conf.STDOUT_FILENAME)
stderrFilePath := fmt.Sprintf("%s/%s", work.Path(), conf.STDERR_FILENAME)
outfile, err := os.Create(stdoutFilePath)
defer outfile.Close()
errfile, err := os.Create(stderrFilePath)
defer errfile.Close()
out_writer := bufio.NewWriter(outfile)
defer out_writer.Flush()
err_writer := bufio.NewWriter(errfile)
defer err_writer.Flush()
if conf.PRINT_APP_MSG {
go io.Copy(out_writer, stdout)
go io.Copy(err_writer, stderr)
}
if err := cmd.Start(); err != nil {
msg := fmt.Sprintf(fmt.Sprintf("start pre-work cmd=%s, err=%s", commandName, err.Error()))
fmt.Println(msg)
logger.Debug(1, msg)
return errors.New(msg)
}
done := make(chan error)
go func() {
done <- cmd.Wait()
}()
select {
case <-chankill:
if err := cmd.Process.Kill(); err != nil {
fmt.Println("failed to kill" + err.Error())
}
<-done // allow goroutine to exit
fmt.Println("process killed")
return errors.New("process killed")
case err := <-done:
if err != nil {
return errors.New(fmt.Sprintf("wait on pre-work cmd=%s, err=%s", commandName, err.Error()))
}
}
logger.Event(event.PRE_WORK_END, "workid="+work.Id)
return
}
示例9: RunWorkunitDirect
func RunWorkunitDirect(work *core.Workunit) (pstats *core.WorkPerf, err error) {
pstats = new(core.WorkPerf)
args := work.Cmd.ParsedArgs
//change cwd to the workunit's working directory
if err := work.CDworkpath(); err != nil {
return nil, err
}
commandName := work.Cmd.Name
if commandName == "" {
return nil, errors.New(fmt.Sprintf("error: command name is empty"))
}
cmd := exec.Command(commandName, args...)
msg := fmt.Sprintf("worker: start cmd=%s, args=%v", commandName, args)
fmt.Println(msg)
logger.Debug(1, msg)
logger.Event(event.WORK_START, "workid="+work.Id,
"cmd="+commandName,
fmt.Sprintf("args=%v", args))
var stdout, stderr io.ReadCloser
if conf.PRINT_APP_MSG {
stdout, err = cmd.StdoutPipe()
if err != nil {
return nil, err
}
stderr, err = cmd.StderrPipe()
if err != nil {
return nil, err
}
}
stdoutFilePath := fmt.Sprintf("%s/%s", work.Path(), conf.STDOUT_FILENAME)
stderrFilePath := fmt.Sprintf("%s/%s", work.Path(), conf.STDERR_FILENAME)
outfile, err := os.Create(stdoutFilePath)
defer outfile.Close()
errfile, err := os.Create(stderrFilePath)
defer errfile.Close()
out_writer := bufio.NewWriter(outfile)
defer out_writer.Flush()
err_writer := bufio.NewWriter(errfile)
defer err_writer.Flush()
if conf.PRINT_APP_MSG {
go io.Copy(out_writer, stdout)
go io.Copy(err_writer, stderr)
}
if err := cmd.Start(); err != nil {
return nil, errors.New(fmt.Sprintf("start_cmd=%s, err=%s", commandName, err.Error()))
}
var MaxMem uint64 = 0
done := make(chan error)
memcheck_done := make(chan bool)
go func() {
done <- cmd.Wait()
memcheck_done <- true
}()
mem_check_interval_here := conf.MEM_CHECK_INTERVAL
if mem_check_interval_here == 0 {
mem_check_interval_here = 10 * time.Second
}
go func() {
mstats := new(runtime.MemStats)
runtime.ReadMemStats(mstats)
MaxMem = mstats.Alloc
time.Sleep(2 * time.Second)
for {
select {
default:
mstats := new(runtime.MemStats)
runtime.ReadMemStats(mstats)
if mstats.Alloc > MaxMem {
MaxMem = mstats.Alloc
}
time.Sleep(mem_check_interval_here)
case <-memcheck_done:
return
}
}
}()
select {
case <-chankill:
if err := cmd.Process.Kill(); err != nil {
fmt.Println("failed to kill" + err.Error())
}
<-done // allow goroutine to exit
fmt.Println("process killed")
return nil, errors.New("process killed")
case err := <-done:
if err != nil {
//.........這裏部分代碼省略.........
示例10: RunWorkunitDocker
func RunWorkunitDocker(work *core.Workunit) (pstats *core.WorkPerf, err error) {
pstats = new(core.WorkPerf)
pstats.MaxMemUsage = -1
pstats.MaxMemoryTotalRss = -1
pstats.MaxMemoryTotalSwap = -1
args := work.Cmd.ParsedArgs
//change cwd to the workunit's working directory
if err := work.CDworkpath(); err != nil {
return nil, err
}
docker_preparation_start := time.Now().Unix()
commandName := work.Cmd.Name
use_wrapper_script := false
wrapper_script_filename := "awe_workunit_wrapper.sh"
wrapper_script_filename_host := path.Join(work.Path(), wrapper_script_filename)
wrapper_script_filename_docker := path.Join(conf.DOCKER_WORK_DIR, wrapper_script_filename)
if len(work.Cmd.Cmd_script) > 0 {
use_wrapper_script = true
// create wrapper script
//conf.DOCKER_WORK_DIR
var wrapper_content_string = "#!/bin/bash\n" + strings.Join(work.Cmd.Cmd_script, "\n") + "\n"
logger.Debug(1, fmt.Sprintf("write wrapper script: %s\n%s", wrapper_script_filename_host, strings.Join(work.Cmd.Cmd_script, ", ")))
var wrapper_content_bytes = []byte(wrapper_content_string)
err = ioutil.WriteFile(wrapper_script_filename_host, wrapper_content_bytes, 0755) // not executable: 0644
if err != nil {
return nil, errors.New(fmt.Sprintf("error writing wrapper script, err=%s", err.Error()))
}
}
//cmd := exec.Command(commandName, args...)
container_name := "AWE_workunit"
Dockerimage := work.Cmd.Dockerimage
if work.App != nil && work.App.Name != "" {
Dockerimage = work.App.AppDef.Dockerimage
}
if Dockerimage == "" {
return nil, errors.New(fmt.Sprintf("Error Dockerimage string empty"))
}
logger.Debug(1, fmt.Sprintf("Dockerimage: %s", Dockerimage))
use_docker_api := true
if conf.DOCKER_BINARY != "API" {
use_docker_api = false
}
var client *docker.Client = nil
if use_docker_api {
logger.Debug(1, fmt.Sprintf("Using docker API..."))
client, err = docker.NewClient(conf.DOCKER_SOCKET)
if err != nil {
return nil, errors.New(fmt.Sprintf("error creating docker client", err.Error()))
}
} else {
logger.Debug(1, fmt.Sprintf("Using docker docker binary..."))
}
//imgs, _ := client.ListImages(false)
//for _, img := range imgs {
// spew.Dump(img)
//}
// delete any old AWE_container
err = RemoveOldAWEContainers(client, container_name)
if err != nil {
return nil, err
}
//var node *core.ShockNode = nil
// find image in repo (e.g. extract docker image id)
node, dockerimage_download_url, err := findDockerImageInShock(Dockerimage, work.Info.DataToken)
if err != nil {
return nil, errors.New(fmt.Sprintf("Error getting docker url, err=%s", err.Error()))
}
// TODO attr_json, _ := json.Marshal(node.Attributes) might be better
node_attr_map, ok := node.Attributes.(map[string]interface{})
if !ok {
return nil, errors.New(fmt.Sprintf("(1) could not type assert Shock_Dockerimage_attributes, Dockerimage=%s", Dockerimage))
}
dockerimage_id, ok := node_attr_map["id"].(string)
if !ok {
return nil, errors.New(fmt.Sprintf("(2) could not type assert Shock_Dockerimage_attributes, Dockerimage=%s", Dockerimage))
//.........這裏部分代碼省略.........
示例11: movePreData
//fetch prerequisite data (e.g. reference dbs)
func movePreData(workunit *core.Workunit) (size int64, err error) {
for _, io := range workunit.Predata {
name := io.FileName
predata_directory := path.Join(conf.DATA_PATH, "predata")
err = os.MkdirAll(predata_directory, 755)
if err != nil {
return 0, errors.New("error creating predata_directory: " + err.Error())
}
file_path := path.Join(predata_directory, name)
dataUrl, uerr := io.DataUrl()
if uerr != nil {
return 0, uerr
}
// get shock and local md5sums
isShockPredata := true
node_md5 := ""
if io.Node == "-" {
isShockPredata = false
} else {
node, err := shock.ShockGet(io.Host, io.Node, workunit.Info.DataToken)
if err != nil {
return 0, errors.New("error in ShockGet: " + err.Error())
}
// rename file to be md5sum
node_md5 = node.File.Checksum["md5"]
file_path = path.Join(predata_directory, node_md5)
}
// file does not exist or its md5sum is wrong
if !isFileExisting(file_path) {
logger.Debug(2, "mover: fetching predata from url: "+dataUrl)
logger.Event(event.PRE_IN, "workid="+workunit.Id+" url="+dataUrl)
var md5sum string
file_path_part := file_path + ".part" // temporary name
// this gets file from any downloadable url, not just shock
size, md5sum, err = shock.FetchFile(file_path_part, dataUrl, workunit.Info.DataToken, io.Uncompress, isShockPredata)
if err != nil {
return 0, errors.New("error in fetchFile: " + err.Error())
}
os.Rename(file_path_part, file_path)
if err != nil {
return 0, errors.New("error renaming after download of preData: " + err.Error())
}
if isShockPredata {
if node_md5 != md5sum {
return 0, errors.New("error downloaded file md5 does not mach shock md5, node: " + io.Node)
} else {
logger.Debug(2, "mover: predata "+name+" has md5sum "+md5sum)
}
}
} else {
logger.Debug(2, "mover: predata already exists: "+name)
}
// timstamp for last access - future caching
accessfile, err := os.Create(file_path + ".access")
if err != nil {
return 0, errors.New("error creating predata access file: " + err.Error())
}
defer accessfile.Close()
accessfile.WriteString(time.Now().String())
// determine if running with docker
wants_docker := false
if workunit.Cmd.Dockerimage != "" || workunit.App != nil { // TODO need more save way to detect use of docker
wants_docker = true
}
if wants_docker && conf.USE_DOCKER == "no" {
return 0, errors.New("error: use of docker images has been disabled by administrator")
}
if wants_docker == false && conf.USE_DOCKER == "only" {
return 0, errors.New("error: use of docker images is enforced by administrator")
}
// copy or create symlink in work dir
linkname := path.Join(workunit.Path(), name)
if conf.NO_SYMLINK {
// some programs do not accept symlinks (e.g. emirge), need to copy the file into the work directory
logger.Debug(1, "copy predata: "+file_path+" -> "+linkname)
_, err := shock.CopyFile(file_path, linkname)
if err != nil {
return 0, fmt.Errorf("error copying file from %s to % s: ", file_path, linkname, err.Error())
}
} else {
if wants_docker {
// new filepath for predata dir in container
var docker_file_path string
if isShockPredata {
docker_file_path = path.Join(conf.DOCKER_WORKUNIT_PREDATA_DIR, node_md5)
} else {
docker_file_path = path.Join(conf.DOCKER_WORKUNIT_PREDATA_DIR, name)
}
logger.Debug(1, "creating dangling symlink: "+linkname+" -> "+docker_file_path)
// dangling link will give error, we ignore that here
_ = os.Symlink(docker_file_path, linkname)
} else {
//.........這裏部分代碼省略.........
示例12: ParseWorkunitArgs
//parse workunit, fetch input data, compose command arguments
func ParseWorkunitArgs(work *core.Workunit) (err error) {
args := []string{}
argstr := work.Cmd.Args
if argstr == "" {
return
}
workpath := work.Path()
if len(work.Cmd.Dockerimage) > 0 {
workpath = conf.DOCKER_WORK_DIR
}
// use better file name replacement technique
var virtual_cmd_script = []string{argstr}
replace_filepath_with_full_filepath(work.Inputs, workpath, virtual_cmd_script)
argstr = virtual_cmd_script[0]
argList := parse_arg_string(argstr)
for _, arg := range argList {
match, err := regexp.Match(`\$\{\w+\}`, []byte(arg))
if err == nil && match { //replace environment variable with its value
reg := regexp.MustCompile(`\$\{\w+\}`)
vabs := reg.FindAll([]byte(arg), -1)
parsedArg := arg
for _, vab := range vabs {
vb := bytes.TrimPrefix(vab, []byte("${"))
vb = bytes.TrimSuffix(vb, []byte("}"))
envvalue := os.Getenv(string(vb))
fmt.Printf("%s=%s\n", vb, envvalue)
parsedArg = strings.Replace(parsedArg, string(vab), envvalue, 1)
}
args = append(args, parsedArg)
continue
}
// this might be deprecated by replace_filepath_with_full_filepath
if strings.Contains(arg, "@") { //parse input/output to accessible local file
segs := strings.Split(arg, "@")
if len(segs) > 2 {
return errors.New("invalid format in command args, multiple @ within one arg")
}
inputname := segs[1]
for _, io := range work.Inputs {
if io.FileName == inputname {
inputFilePath := path.Join(workpath, inputname)
parsedArg := fmt.Sprintf("%s%s", segs[0], inputFilePath)
args = append(args, parsedArg)
}
}
continue
}
//no @ or $, append directly
args = append(args, arg)
}
work.Cmd.ParsedArgs = args
work.State = core.WORK_STAT_PREPARED
return nil
}
示例13: UploadOutputData
func UploadOutputData(work *core.Workunit) (size int64, err error) {
for _, io := range work.Outputs {
name := io.FileName
var local_filepath string //local file name generated by the cmd
var file_path string //file name to be uploaded to shock
if io.Directory != "" {
local_filepath = fmt.Sprintf("%s/%s/%s", work.Path(), io.Directory, name)
//if specified, rename the local file name to the specified shock node file name
//otherwise use the local name as shock file name
file_path = local_filepath
if io.ShockFilename != "" {
file_path = fmt.Sprintf("%s/%s/%s", work.Path(), io.Directory, io.ShockFilename)
os.Rename(local_filepath, file_path)
}
} else {
local_filepath = fmt.Sprintf("%s/%s", work.Path(), name)
file_path = local_filepath
if io.ShockFilename != "" {
file_path = fmt.Sprintf("%s/%s", work.Path(), io.ShockFilename)
os.Rename(local_filepath, file_path)
}
}
if (io.Type == "copy") || (io.Type == "update") || io.NoFile {
file_path = ""
} else if fi, err := os.Stat(file_path); err != nil {
//skip this output if missing file and optional
if io.Optional {
continue
} else {
return size, errors.New(fmt.Sprintf("output %s not generated for workunit %s", name, work.Id))
}
} else {
if io.Nonzero && fi.Size() == 0 {
return size, errors.New(fmt.Sprintf("workunit %s generated zero-sized output %s while non-zero-sized file required", work.Id, name))
}
size += fi.Size()
}
logger.Debug(1, "deliverer: push output to shock, filename="+name)
logger.Event(event.FILE_OUT,
"workid="+work.Id,
"filename="+name,
fmt.Sprintf("url=%s/node/%s", io.Host, io.Node))
//upload attribute file to shock IF attribute file is specified in outputs AND it is found in local directory.
var attrfile_path string = ""
if io.AttrFile != "" {
attrfile_path = fmt.Sprintf("%s/%s", work.Path(), io.AttrFile)
if fi, err := os.Stat(attrfile_path); err != nil || fi.Size() == 0 {
attrfile_path = ""
}
}
//set io.FormOptions["parent_node"] if not present and io.FormOptions["parent_name"] exists
if parent_name, ok := io.FormOptions["parent_name"]; ok {
for _, in_io := range work.Inputs {
if in_io.FileName == parent_name {
io.FormOptions["parent_node"] = in_io.Node
}
}
}
logger.Debug(1, "UploadOutputData, core.PutFileToShock: "+file_path)
if err := core.PutFileToShock(file_path, io.Host, io.Node, work.Rank, work.Info.DataToken, attrfile_path, io.Type, io.FormOptions, io.NodeAttr); err != nil {
time.Sleep(3 * time.Second) //wait for 3 seconds and try again
if err := core.PutFileToShock(file_path, io.Host, io.Node, work.Rank, work.Info.DataToken, attrfile_path, io.Type, io.FormOptions, io.NodeAttr); err != nil {
fmt.Errorf("push file error\n")
logger.Error("op=pushfile,err=" + err.Error())
return size, err
}
}
logger.Event(event.FILE_DONE,
"workid="+work.Id,
"filename="+name,
fmt.Sprintf("url=%s/node/%s", io.Host, io.Node))
if io.ShockIndex != "" {
if err := core.ShockPutIndex(io.Host, io.Node, io.ShockIndex, work.Info.DataToken); err != nil {
logger.Error("warning: fail to create index on shock for shock node: " + io.Node)
}
}
if conf.CACHE_ENABLED {
//move output files to cache
cacheDir := getCacheDir(io.Node)
if err := os.MkdirAll(cacheDir, 0777); err != nil {
logger.Error("cache os.MkdirAll():" + err.Error())
}
cacheFilePath := getCacheFilePath(io.Node) //use the same naming mechanism used by shock server
//fmt.Printf("moving file from %s to %s\n", file_path, cacheFilePath)
if err := os.Rename(file_path, cacheFilePath); err != nil {
logger.Error("cache os.Rename():" + err.Error())
}
}
}
return
}
示例14: RunWorkunitDocker
func RunWorkunitDocker(work *core.Workunit) (pstats *core.WorkPerf, err error) {
pstats = new(core.WorkPerf)
pstats.MaxMemUsage = -1
pstats.MaxMemoryTotalRss = -1
pstats.MaxMemoryTotalSwap = -1
args := work.Cmd.ParsedArgs
//change cwd to the workunit's working directory
if err := work.CDworkpath(); err != nil {
return nil, err
}
commandName := work.Cmd.Name
use_wrapper_script := false
wrapper_script_filename := "awe_workunit_wrapper.sh"
wrapper_script_filename_host := path.Join(work.Path(), wrapper_script_filename)
wrapper_script_filename_docker := path.Join(conf.DOCKER_WORK_DIR, wrapper_script_filename)
if strings.HasPrefix(commandName, "app:") {
if len(work.Cmd.ParsedArgs) > 0 {
use_wrapper_script = true
// create wrapper script
//conf.DOCKER_WORK_DIR
var wrapper_content_string = "#!/bin/bash\n" + strings.Join(work.Cmd.Cmd_script, "\n") + "\n"
logger.Debug(1, fmt.Sprintf("write wrapper script: %s\n%s", wrapper_script_filename_host, strings.Join(work.Cmd.Cmd_script, ", ")))
var wrapper_content_bytes = []byte(wrapper_content_string)
err = ioutil.WriteFile(wrapper_script_filename_host, wrapper_content_bytes, 0644)
if err != nil {
return nil, errors.New(fmt.Sprintf("error writing wrapper script, err=%s", err.Error()))
}
}
}
//cmd := exec.Command(commandName, args...)
container_name := "AWE_workunit"
Dockerimage := work.Cmd.Dockerimage
logger.Debug(1, fmt.Sprintf("Dockerimage: %s", Dockerimage))
endpoint := "unix:///var/run/docker.sock"
client, err := docker.NewClient(endpoint)
if err != nil {
return nil, errors.New(fmt.Sprintf("error creating docker client", err.Error()))
}
//imgs, _ := client.ListImages(false)
//for _, img := range imgs {
// spew.Dump(img)
//}
// delete any old AWE_container
err = RemoveOldAWEContainers(client, container_name)
if err != nil {
return nil, err
}
//var node *core.ShockNode = nil
// find image in repo (e.g. extract docker image id)
node, dockerimage_download_url, err := findDockerImageInShock(Dockerimage)
if err != nil {
return nil, errors.New(fmt.Sprintf("Error getting docker url, err=%s", err.Error()))
}
// TODO attr_json, _ := json.Marshal(node.Attributes) might be better
node_attr_map, ok := node.Attributes.(map[string]interface{})
if !ok {
return nil, errors.New(fmt.Sprintf("(1) could not type assert Shock_Dockerimage_attributes, Dockerimage=%s", Dockerimage))
}
dockerimage_id, ok := node_attr_map["id"].(string)
if !ok {
return nil, errors.New(fmt.Sprintf("(2) could not type assert Shock_Dockerimage_attributes, Dockerimage=%s", Dockerimage))
}
if dockerimage_id == "" {
return nil, errors.New(fmt.Sprintf("Id of Dockerimage=%s not found", Dockerimage))
}
logger.Debug(1, fmt.Sprintf("using dockerimage id %s instead of name %s ", dockerimage_id, Dockerimage))
// *** find/inspect image
image, err := client.InspectImage(dockerimage_id)
if err != nil {
logger.Debug(1, fmt.Sprintf("docker image %s is not yet in local repository", Dockerimage))
image_retrieval := "load" // TODO only load is guaraneed to work
switch {
case image_retrieval == "load":
//.........這裏部分代碼省略.........