本文整理汇总了Golang中github.com/juju/utils.Gunzip函数的典型用法代码示例。如果您正苦于以下问题:Golang Gunzip函数的具体用法?Golang Gunzip怎么用?Golang Gunzip使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Gunzip函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: decodeUserData
func decodeUserData(userData string) ([]byte, error) {
data, err := base64.StdEncoding.DecodeString(userData)
if err != nil {
return []byte(""), err
}
return utils.Gunzip(data)
}
示例2: generateSigmaComponents
func (c *environClient) generateSigmaComponents(baseName string, constraints *sigmaConstraints, args environs.StartInstanceParams, drv gosigma.Drive, userData []byte) (cc gosigma.Components, err error) {
cc.SetName(baseName)
cc.SetDescription(baseName)
cc.SetSMP(constraints.cores)
cc.SetCPU(constraints.power)
cc.SetMem(constraints.mem)
vncpass, err := utils.RandomPassword()
if err != nil {
err = errors.Errorf("error generating password: %v", err)
return
}
cc.SetVNCPassword(vncpass)
logger.Debugf("Setting ssh key: %s end", c.config.AuthorizedKeys())
cc.SetSSHPublicKey(c.config.AuthorizedKeys())
cc.AttachDrive(1, "0:0", "virtio", drv.UUID())
cc.NetworkDHCP4(gosigma.ModelVirtio)
if multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...) {
cc.SetMeta(jujuMetaInstance, jujuMetaInstanceStateServer)
} else {
cc.SetMeta(jujuMetaInstance, jujuMetaInstanceServer)
}
cc.SetMeta(jujuMetaEnvironment, c.uuid)
data, err := utils.Gunzip(userData)
if err != nil {
return cc, errors.Trace(err)
}
cc.SetMeta(jujuMetaCoudInit, base64.StdEncoding.EncodeToString(data))
cc.SetMeta(jujuMetaBase64, jujuMetaCoudInit)
return cc, nil
}
示例3: TestCompression
func (*utilsSuite) TestCompression(c *gc.C) {
data := []byte(strings.Repeat("some data to be compressed\n", 100))
compressedData := []byte{
0x1f, 0x8b, 0x08, 0x00, 0x33, 0xb5, 0xf6, 0x50,
0x00, 0x03, 0xed, 0xc9, 0xb1, 0x0d, 0x00, 0x20,
0x08, 0x45, 0xc1, 0xde, 0x29, 0x58, 0x0d, 0xe5,
0x97, 0x04, 0x23, 0xee, 0x1f, 0xa7, 0xb0, 0x7b,
0xd7, 0x5e, 0x57, 0xca, 0xc2, 0xaf, 0xdb, 0x2d,
0x9b, 0xb2, 0x55, 0xb9, 0x8f, 0xba, 0x15, 0xa3,
0x29, 0x8a, 0xa2, 0x28, 0x8a, 0xa2, 0x28, 0xea,
0x67, 0x3d, 0x71, 0x71, 0x6e, 0xbf, 0x8c, 0x0a,
0x00, 0x00,
}
cdata := utils.Gzip(data)
c.Assert(len(cdata) < len(data), gc.Equals, true)
data1, err := utils.Gunzip(cdata)
c.Assert(err, gc.IsNil)
c.Assert(data1, gc.DeepEquals, data)
data1, err = utils.Gunzip(compressedData)
c.Assert(err, gc.IsNil)
c.Assert(data1, gc.DeepEquals, data)
}
示例4: TestBootstrapInstanceUserDataAndState
func (t *localServerSuite) TestBootstrapInstanceUserDataAndState(c *gc.C) {
env := t.Prepare(c)
err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{})
c.Assert(err, jc.ErrorIsNil)
// check that StateServerInstances returns the id of the bootstrap machine.
instanceIds, err := env.StateServerInstances()
c.Assert(err, jc.ErrorIsNil)
c.Assert(instanceIds, gc.HasLen, 1)
insts, err := env.AllInstances()
c.Assert(err, jc.ErrorIsNil)
c.Assert(insts, gc.HasLen, 1)
c.Check(insts[0].Id(), gc.Equals, instanceIds[0])
// check that the user data is configured to start zookeeper
// and the machine and provisioning agents.
// check that the user data is configured to only configure
// authorized SSH keys and set the log output; everything
// else happens after the machine is brought up.
inst := t.srv.ec2srv.Instance(string(insts[0].Id()))
c.Assert(inst, gc.NotNil)
addresses, err := insts[0].Addresses()
c.Assert(err, jc.ErrorIsNil)
c.Assert(addresses, gc.Not(gc.HasLen), 0)
userData, err := utils.Gunzip(inst.UserData)
c.Assert(err, jc.ErrorIsNil)
c.Logf("first instance: UserData: %q", userData)
var userDataMap map[interface{}]interface{}
err = goyaml.Unmarshal(userData, &userDataMap)
c.Assert(err, jc.ErrorIsNil)
c.Assert(userDataMap, jc.DeepEquals, map[interface{}]interface{}{
"output": map[interface{}]interface{}{
"all": "| tee -a /var/log/cloud-init-output.log",
},
"ssh_authorized_keys": splitAuthKeys(env.Config().AuthorizedKeys()),
"runcmd": []interface{}{
"set -xe",
"install -D -m 644 /dev/null '/etc/init/juju-clean-shutdown.conf'",
"printf '%s\\n' '\nauthor \"Juju Team <[email protected]>\"\ndescription \"Stop all network interfaces on shutdown\"\nstart on runlevel [016]\ntask\nconsole output\n\nexec /sbin/ifdown -a -v --force\n' > '/etc/init/juju-clean-shutdown.conf'",
"install -D -m 644 /dev/null '/var/lib/juju/nonce.txt'",
"printf '%s\\n' 'user-admin:bootstrap' > '/var/lib/juju/nonce.txt'",
},
})
// check that a new instance will be started with a machine agent
inst1, hc := testing.AssertStartInstance(c, env, "1")
c.Check(*hc.Arch, gc.Equals, "amd64")
c.Check(*hc.Mem, gc.Equals, uint64(1740))
c.Check(*hc.CpuCores, gc.Equals, uint64(1))
c.Assert(*hc.CpuPower, gc.Equals, uint64(100))
inst = t.srv.ec2srv.Instance(string(inst1.Id()))
c.Assert(inst, gc.NotNil)
userData, err = utils.Gunzip(inst.UserData)
c.Assert(err, jc.ErrorIsNil)
c.Logf("second instance: UserData: %q", userData)
userDataMap = nil
err = goyaml.Unmarshal(userData, &userDataMap)
c.Assert(err, jc.ErrorIsNil)
CheckPackage(c, userDataMap, "curl", true)
CheckPackage(c, userDataMap, "mongodb-server", false)
CheckScripts(c, userDataMap, "jujud bootstrap-state", false)
CheckScripts(c, userDataMap, "/var/lib/juju/agents/machine-1/agent.conf", true)
// TODO check for provisioning agent
err = env.Destroy()
c.Assert(err, jc.ErrorIsNil)
_, err = env.StateServerInstances()
c.Assert(err, gc.Equals, environs.ErrNotBootstrapped)
}
示例5: testUserData
func (*CloudInitSuite) testUserData(c *gc.C, bootstrap bool) {
testJujuHome := c.MkDir()
defer osenv.SetJujuHome(osenv.SetJujuHome(testJujuHome))
tools := &tools.Tools{
URL: "http://foo.com/tools/releases/juju1.2.3-linux-amd64.tgz",
Version: version.MustParseBinary("1.2.3-linux-amd64"),
}
envConfig, err := config.New(config.NoDefaults, dummySampleConfig())
c.Assert(err, gc.IsNil)
allJobs := []params.MachineJob{
params.JobManageEnviron,
params.JobHostUnits,
}
cfg := &cloudinit.MachineConfig{
MachineId: "10",
MachineNonce: "5432",
Tools: tools,
StateInfo: &state.Info{
Info: mongo.Info{
Addrs: []string{"127.0.0.1:1234"},
CACert: "CA CERT\n" + testing.CACert,
},
Password: "pw1",
Tag: "machine-10",
},
APIInfo: &api.Info{
Addrs: []string{"127.0.0.1:1234"},
Password: "pw2",
CACert: "CA CERT\n" + testing.CACert,
Tag: "machine-10",
},
DataDir: environs.DataDir,
LogDir: agent.DefaultLogDir,
Jobs: allJobs,
CloudInitOutputLog: environs.CloudInitOutputLog,
Config: envConfig,
AgentEnvironment: map[string]string{agent.ProviderType: "dummy"},
AuthorizedKeys: "wheredidileavemykeys",
MachineAgentServiceName: "jujud-machine-10",
}
if bootstrap {
cfg.Bootstrap = true
cfg.StateServingInfo = ¶ms.StateServingInfo{
StatePort: envConfig.StatePort(),
APIPort: envConfig.APIPort(),
Cert: testing.ServerCert,
PrivateKey: testing.ServerKey,
}
}
script1 := "script1"
script2 := "script2"
cloudcfg := coreCloudinit.New()
cloudcfg.AddRunCmd(script1)
cloudcfg.AddRunCmd(script2)
result, err := environs.ComposeUserData(cfg, cloudcfg)
c.Assert(err, gc.IsNil)
unzipped, err := utils.Gunzip(result)
c.Assert(err, gc.IsNil)
config := make(map[interface{}]interface{})
err = goyaml.Unmarshal(unzipped, &config)
c.Assert(err, gc.IsNil)
// The scripts given to userData where added as the first
// commands to be run.
runCmd := config["runcmd"].([]interface{})
c.Check(runCmd[0], gc.Equals, script1)
c.Check(runCmd[1], gc.Equals, script2)
if bootstrap {
// The cloudinit config should have nothing but the basics:
// SSH authorized keys, the additional runcmds, and log output.
//
// Note: the additional runcmds *do* belong here, at least
// for MAAS. MAAS needs to configure and then bounce the
// network interfaces, which would sever the SSH connection
// in the synchronous bootstrap phase.
c.Check(config, gc.DeepEquals, map[interface{}]interface{}{
"output": map[interface{}]interface{}{
"all": "| tee -a /var/log/cloud-init-output.log",
},
"runcmd": []interface{}{
"script1", "script2",
"set -xe",
"install -D -m 644 /dev/null '/var/lib/juju/nonce.txt'",
"printf '%s\\n' '5432' > '/var/lib/juju/nonce.txt'",
},
"ssh_authorized_keys": []interface{}{"wheredidileavemykeys"},
})
} else {
// Just check that the cloudinit config looks good,
// and that there are more runcmds than the additional
// ones we passed into ComposeUserData.
c.Check(config["apt_upgrade"], gc.Equals, true)
c.Check(len(runCmd) > 2, jc.IsTrue)
}
}
示例6: testUserData
func (*CloudInitSuite) testUserData(c *gc.C, series string, bootstrap bool) {
testJujuHome := c.MkDir()
defer osenv.SetJujuHome(osenv.SetJujuHome(testJujuHome))
// Use actual series paths instead of local defaults
logDir := must(paths.LogDir(series))
metricsSpoolDir := must(paths.MetricsSpoolDir(series))
dataDir := must(paths.DataDir(series))
tools := &tools.Tools{
URL: "http://tools.testing/tools/released/juju.tgz",
Version: version.Binary{version.MustParse("1.2.3"), "quantal", "amd64"},
}
envConfig, err := config.New(config.NoDefaults, dummySampleConfig())
c.Assert(err, jc.ErrorIsNil)
allJobs := []multiwatcher.MachineJob{
multiwatcher.JobManageModel,
multiwatcher.JobHostUnits,
multiwatcher.JobManageNetworking,
}
cfg := &instancecfg.InstanceConfig{
MachineId: "10",
MachineNonce: "5432",
Tools: tools,
Series: series,
MongoInfo: &mongo.MongoInfo{
Info: mongo.Info{
Addrs: []string{"127.0.0.1:1234"},
CACert: "CA CERT\n" + testing.CACert,
},
Password: "pw1",
Tag: names.NewMachineTag("10"),
},
APIInfo: &api.Info{
Addrs: []string{"127.0.0.1:1234"},
Password: "pw2",
CACert: "CA CERT\n" + testing.CACert,
Tag: names.NewMachineTag("10"),
ModelTag: testing.ModelTag,
},
DataDir: dataDir,
LogDir: path.Join(logDir, "juju"),
MetricsSpoolDir: metricsSpoolDir,
Jobs: allJobs,
CloudInitOutputLog: path.Join(logDir, "cloud-init-output.log"),
Config: envConfig,
AgentEnvironment: map[string]string{agent.ProviderType: "dummy"},
AuthorizedKeys: "wheredidileavemykeys",
MachineAgentServiceName: "jujud-machine-10",
EnableOSUpgrade: true,
}
if bootstrap {
cfg.Bootstrap = true
cfg.StateServingInfo = ¶ms.StateServingInfo{
StatePort: envConfig.StatePort(),
APIPort: envConfig.APIPort(),
Cert: testing.ServerCert,
PrivateKey: testing.ServerKey,
CAPrivateKey: testing.CAKey,
}
}
script1 := "script1"
script2 := "script2"
cloudcfg, err := cloudinit.New(series)
c.Assert(err, jc.ErrorIsNil)
cloudcfg.AddRunCmd(script1)
cloudcfg.AddRunCmd(script2)
result, err := providerinit.ComposeUserData(cfg, cloudcfg, &openstack.OpenstackRenderer{})
c.Assert(err, jc.ErrorIsNil)
unzipped, err := utils.Gunzip(result)
c.Assert(err, jc.ErrorIsNil)
config := make(map[interface{}]interface{})
err = goyaml.Unmarshal(unzipped, &config)
c.Assert(err, jc.ErrorIsNil)
// The scripts given to userData where added as the first
// commands to be run.
runCmd := config["runcmd"].([]interface{})
c.Check(runCmd[0], gc.Equals, script1)
c.Check(runCmd[1], gc.Equals, script2)
if bootstrap {
// The cloudinit config should have nothing but the basics:
// SSH authorized keys, the additional runcmds, and log output.
//
// Note: the additional runcmds *do* belong here, at least
// for MAAS. MAAS needs to configure and then bounce the
// network interfaces, which would sever the SSH connection
// in the synchronous bootstrap phase.
expected := map[interface{}]interface{}{
"output": map[interface{}]interface{}{
"all": "| tee -a /var/log/cloud-init-output.log",
},
"runcmd": []interface{}{
"script1", "script2",
"set -xe",
"install -D -m 644 /dev/null '/etc/init/juju-clean-shutdown.conf'",
"printf '%s\\n' '\nauthor \"Juju Team <[email protected]>\"\ndescription \"Stop all network interfaces on shutdown\"\nstart on runlevel [016]\ntask\nconsole output\n\nexec /sbin/ifdown -a -v --force\n' > '/etc/init/juju-clean-shutdown.conf'",
"install -D -m 644 /dev/null '/var/lib/juju/nonce.txt'",
//.........这里部分代码省略.........
示例7: TestBootstrapInstanceUserDataAndState
func (t *localServerSuite) TestBootstrapInstanceUserDataAndState(c *gc.C) {
env := t.Prepare(c)
envtesting.UploadFakeTools(c, env.Storage())
err := bootstrap.Bootstrap(coretesting.Context(c), env, environs.BootstrapParams{})
c.Assert(err, gc.IsNil)
// check that the state holds the id of the bootstrap machine.
bootstrapState, err := bootstrap.LoadState(env.Storage())
c.Assert(err, gc.IsNil)
c.Assert(bootstrapState.StateInstances, gc.HasLen, 1)
insts, err := env.AllInstances()
c.Assert(err, gc.IsNil)
c.Assert(insts, gc.HasLen, 1)
c.Check(insts[0].Id(), gc.Equals, bootstrapState.StateInstances[0])
// check that the user data is configured to start zookeeper
// and the machine and provisioning agents.
// check that the user data is configured to only configure
// authorized SSH keys and set the log output; everything
// else happens after the machine is brought up.
inst := t.srv.ec2srv.Instance(string(insts[0].Id()))
c.Assert(inst, gc.NotNil)
addresses, err := insts[0].Addresses()
c.Assert(err, gc.IsNil)
c.Assert(addresses, gc.Not(gc.HasLen), 0)
userData, err := utils.Gunzip(inst.UserData)
c.Assert(err, gc.IsNil)
c.Logf("first instance: UserData: %q", userData)
var userDataMap map[interface{}]interface{}
err = goyaml.Unmarshal(userData, &userDataMap)
c.Assert(err, gc.IsNil)
c.Assert(userDataMap, jc.DeepEquals, map[interface{}]interface{}{
"output": map[interface{}]interface{}{
"all": "| tee -a /var/log/cloud-init-output.log",
},
"ssh_authorized_keys": splitAuthKeys(env.Config().AuthorizedKeys()),
"runcmd": []interface{}{
"set -xe",
"install -D -m 644 /dev/null '/var/lib/juju/nonce.txt'",
"printf '%s\\n' 'user-admin:bootstrap' > '/var/lib/juju/nonce.txt'",
},
})
// check that a new instance will be started with a machine agent
inst1, hc := testing.AssertStartInstance(c, env, "1")
c.Check(*hc.Arch, gc.Equals, "amd64")
c.Check(*hc.Mem, gc.Equals, uint64(1740))
c.Check(*hc.CpuCores, gc.Equals, uint64(1))
c.Assert(*hc.CpuPower, gc.Equals, uint64(100))
inst = t.srv.ec2srv.Instance(string(inst1.Id()))
c.Assert(inst, gc.NotNil)
userData, err = utils.Gunzip(inst.UserData)
c.Assert(err, gc.IsNil)
c.Logf("second instance: UserData: %q", userData)
userDataMap = nil
err = goyaml.Unmarshal(userData, &userDataMap)
c.Assert(err, gc.IsNil)
CheckPackage(c, userDataMap, "git", true)
CheckPackage(c, userDataMap, "mongodb-server", false)
CheckScripts(c, userDataMap, "jujud bootstrap-state", false)
CheckScripts(c, userDataMap, "/var/lib/juju/agents/machine-1/agent.conf", true)
// TODO check for provisioning agent
err = env.Destroy()
c.Assert(err, gc.IsNil)
_, err = bootstrap.LoadState(env.Storage())
c.Assert(err, gc.NotNil)
}
示例8: newRawInstance
// newRawInstance is where the new physical instance is actually
// provisioned, relative to the provided args and spec. Info for that
// low-level instance is returned.
func (env *environ) newRawInstance(args environs.StartInstanceParams, img *OvaFileMetadata) (*mo.VirtualMachine, *instance.HardwareCharacteristics, error) {
machineID := common.MachineFullName(env, args.InstanceConfig.MachineId)
cloudcfg, err := cloudinit.New(args.Tools.OneSeries())
if err != nil {
return nil, nil, errors.Trace(err)
}
cloudcfg.AddPackage("open-vm-tools")
cloudcfg.AddPackage("iptables-persistent")
userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg)
if err != nil {
return nil, nil, errors.Annotate(err, "cannot make user data")
}
userData, err = utils.Gunzip(userData)
if err != nil {
return nil, nil, errors.Trace(err)
}
logger.Debugf("Vmware user data; %d bytes", len(userData))
rootDisk := common.MinRootDiskSizeGiB * 1024
if args.Constraints.RootDisk != nil && *args.Constraints.RootDisk > rootDisk {
rootDisk = *args.Constraints.RootDisk
}
cpuCores := DefaultCpuCores
if args.Constraints.CpuCores != nil {
cpuCores = *args.Constraints.CpuCores
}
cpuPower := DefaultCpuPower
if args.Constraints.CpuPower != nil {
cpuPower = *args.Constraints.CpuPower
}
mem := DefaultMemMb
if args.Constraints.Mem != nil {
mem = *args.Constraints.Mem
}
hwc := &instance.HardwareCharacteristics{
Arch: &img.Arch,
Mem: &mem,
CpuCores: &cpuCores,
CpuPower: &cpuPower,
RootDisk: &rootDisk,
}
zones, err := env.parseAvailabilityZones(args)
if err != nil {
return nil, nil, errors.Trace(err)
}
var inst *mo.VirtualMachine
for _, zone := range zones {
var availZone *vmwareAvailZone
availZone, err = env.availZone(zone)
if err != nil {
logger.Warningf("Error while getting availability zone %s: %s", zone, err)
continue
}
apiPort := 0
if isStateServer(args.InstanceConfig) {
apiPort = args.InstanceConfig.StateServingInfo.APIPort
}
spec := &instanceSpec{
machineID: machineID,
zone: availZone,
hwc: hwc,
img: img,
userData: userData,
sshKey: args.InstanceConfig.AuthorizedKeys,
isState: isStateServer(args.InstanceConfig),
apiPort: apiPort,
}
inst, err = env.client.CreateInstance(env.ecfg, spec)
if err != nil {
logger.Warningf("Error while trying to create instance in %s availability zone: %s", zone, err)
continue
}
break
}
if err != nil {
return nil, nil, errors.Annotate(err, "Can't create instance in any of availability zones, last error")
}
return inst, hwc, err
}
示例9: StartInstance
func (env *joyentEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
if args.InstanceConfig.HasNetworks() {
return nil, errors.New("starting instances with networks is not supported yet")
}
series := args.Tools.OneSeries()
arches := args.Tools.Arches()
spec, err := env.FindInstanceSpec(&instances.InstanceConstraint{
Region: env.Ecfg().Region(),
Series: series,
Arches: arches,
Constraints: args.Constraints,
})
if err != nil {
return nil, err
}
tools, err := args.Tools.Match(tools.Filter{Arch: spec.Image.Arch})
if err != nil {
return nil, errors.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches)
}
args.InstanceConfig.Tools = tools[0]
if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config()); err != nil {
return nil, err
}
// This is a hack that ensures that instances can communicate over
// the internal network. Joyent sometimes gives instances
// different 10.x.x.x/21 networks and adding this route allows
// them to talk despite this. See:
// https://bugs.launchpad.net/juju-core/+bug/1401130
cloudcfg, err := cloudinit.New(args.InstanceConfig.Series)
if err != nil {
return nil, errors.Annotate(err, "cannot create cloudinit template")
}
ifupScript := `
#!/bin/bash
# These guards help to ensure that this hack only runs if Joyent's
# internal network still works as it does at time of writing.
[ "$IFACE" == "eth1" ] || [ "$IFACE" == "--all" ] || exit 0
/sbin/ip -4 --oneline addr show dev eth1 | fgrep --quiet " inet 10." || exit 0
/sbin/ip route add 10.0.0.0/8 dev eth1
`[1:]
cloudcfg.AddBootTextFile("/etc/network/if-up.d/joyent", ifupScript, 0755)
userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg)
if err != nil {
return nil, errors.Annotate(err, "cannot make user data")
}
// Unzipping as Joyent API expects it as string
userData, err = utils.Gunzip(userData)
if err != nil {
return nil, errors.Annotate(err, "cannot make user data")
}
logger.Debugf("joyent user data: %d bytes", len(userData))
var machine *cloudapi.Machine
machine, err = env.compute.cloudapi.CreateMachine(cloudapi.CreateMachineOpts{
//Name: env.machineFullName(machineConf.MachineId),
Package: spec.InstanceType.Name,
Image: spec.Image.Id,
Metadata: map[string]string{"metadata.cloud-init:user-data": string(userData)},
Tags: map[string]string{"tag.group": "juju", "tag.env": env.Config().Name()},
})
if err != nil {
return nil, errors.Annotate(err, "cannot create instances")
}
machineId := machine.Id
logger.Infof("provisioning instance %q", machineId)
machine, err = env.compute.cloudapi.GetMachine(machineId)
if err != nil {
return nil, errors.Annotate(err, "cannot start instances")
}
// wait for machine to start
for !strings.EqualFold(machine.State, "running") {
time.Sleep(1 * time.Second)
machine, err = env.compute.cloudapi.GetMachine(machineId)
if err != nil {
return nil, errors.Annotate(err, "cannot start instances")
}
}
logger.Infof("started instance %q", machineId)
inst := &joyentInstance{
machine: machine,
env: env,
}
if multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...) {
if err := common.AddStateInstance(env.Storage(), inst.Id()); err != nil {
//.........这里部分代码省略.........