本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/release_1_5.NewForConfig函数的典型用法代码示例。如果您正苦于以下问题:Golang NewForConfig函数的具体用法?Golang NewForConfig怎么用?Golang NewForConfig使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewForConfig函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: setup
func setup(t *testing.T) (*httptest.Server, *garbagecollector.GarbageCollector, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.EnableCoreControllers = false
masterConfig.GenericConfig.EnableGarbageCollection = true
_, s := framework.RunAMaster(masterConfig)
clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL})
if err != nil {
t.Fatalf("Error in create clientset: %v", err)
}
preferredResources, err := clientSet.Discovery().ServerPreferredResources()
if err != nil {
t.Fatalf("Failed to get supported resources from server: %v", err)
}
deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete"}}, preferredResources)
deletableGroupVersionResources, err := discovery.GroupVersionResources(deletableResources)
if err != nil {
t.Fatalf("Failed to parse supported resources from server: %v", err)
}
config := &restclient.Config{Host: s.URL}
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
metaOnlyClientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
config.ContentConfig.NegotiatedSerializer = nil
clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
gc, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, registered.RESTMapper(), deletableGroupVersionResources)
if err != nil {
t.Fatalf("Failed to create garbage collector")
}
return s, gc, clientSet
}
示例2: rmSetup
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("Error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
resyncPeriodFunc := func() time.Duration {
return resyncPeriod
}
podInformer := informers.NewPodInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod)
rm := replication.NewReplicationManager(
podInformer,
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replication-controller")),
resyncPeriodFunc,
replication.BurstReplicas,
4096,
enableGarbageCollector,
)
if err != nil {
t.Fatalf("Failed to create replication manager")
}
return s, rm, podInformer, clientSet
}
示例3: newKubeClient
// TODO: evaluate using pkg/client/clientcmd
func newKubeClient(dnsConfig *options.KubeDNSConfig) (clientset.Interface, error) {
var (
config *restclient.Config
err error
)
if dnsConfig.KubeMasterURL != "" && dnsConfig.KubeConfigFile == "" {
// Only --kube-master-url was provided.
config = &restclient.Config{
Host: dnsConfig.KubeMasterURL,
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: "v1"}},
}
} else {
// We either have:
// 1) --kube-master-url and --kubecfg-file
// 2) just --kubecfg-file
// 3) neither flag
// In any case, the logic is the same. If (3), this will automatically
// fall back on the service account token.
overrides := &kclientcmd.ConfigOverrides{}
overrides.ClusterInfo.Server = dnsConfig.KubeMasterURL // might be "", but that is OK
rules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: dnsConfig.KubeConfigFile} // might be "", but that is OK
if config, err = kclientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).ClientConfig(); err != nil {
return nil, err
}
}
glog.V(0).Infof("Using %v for kubernetes master, kubernetes API: %v",
config.Host, config.GroupVersion)
return clientset.NewForConfig(config)
}
示例4: rmSetup
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, cache.SharedIndexInformer, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("Error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), nil, resyncPeriod)
rm := replicaset.NewReplicaSetController(
informers.ReplicaSets(),
informers.Pods(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
replicaset.BurstReplicas,
4096,
enableGarbageCollector,
)
if err != nil {
t.Fatalf("Failed to create replicaset controller")
}
return s, rm, informers.ReplicaSets().Informer(), informers.Pods().Informer(), clientSet
}
示例5: Client
func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface, error) {
clientConfig, err := b.Config(name)
if err != nil {
return nil, err
}
return clientset.NewForConfig(clientConfig)
}
示例6: getAPIServerClient
// getAPIServerClient gets a apiserver client.
func getAPIServerClient() (*clientset.Clientset, error) {
config, err := framework.LoadConfig()
if err != nil {
return nil, fmt.Errorf("failed to load config: %v", err)
}
client, err := clientset.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to create client: %v", err)
}
return client, nil
}
示例7: getKubeClient
func getKubeClient(s *options.KubeletServer) (*clientset.Clientset, error) {
clientConfig, err := CreateAPIServerClientConfig(s)
if err == nil {
kubeClient, err := clientset.NewForConfig(clientConfig)
if err != nil {
return nil, err
}
return kubeClient, nil
}
return nil, err
}
示例8: Start
// Start starts the namespace controller.
func (n *NamespaceController) Start() error {
// Use the default QPS
config := restclient.AddUserAgent(&restclient.Config{Host: framework.TestContext.Host}, ncName)
client, err := clientset.NewForConfig(config)
if err != nil {
return err
}
clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
discoverResourcesFn := client.Discovery().ServerPreferredNamespacedResources
nc := namespacecontroller.NewNamespaceController(client, clientPool, discoverResourcesFn, ncResyncPeriod, v1.FinalizerKubernetes)
go nc.Run(ncConcurrency, n.stopCh)
return nil
}
示例9: createClients
func createClients(numberOfClients int) ([]*clientset.Clientset, []*internalclientset.Clientset, error) {
clients := make([]*clientset.Clientset, numberOfClients)
internalClients := make([]*internalclientset.Clientset, numberOfClients)
for i := 0; i < numberOfClients; i++ {
config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred())
config.QPS = 100
config.Burst = 200
if framework.TestContext.KubeAPIContentType != "" {
config.ContentType = framework.TestContext.KubeAPIContentType
}
// For the purpose of this test, we want to force that clients
// do not share underlying transport (which is a default behavior
// in Kubernetes). Thus, we are explicitly creating transport for
// each client here.
transportConfig, err := config.TransportConfig()
if err != nil {
return nil, nil, err
}
tlsConfig, err := transport.TLSConfigFor(transportConfig)
if err != nil {
return nil, nil, err
}
config.Transport = utilnet.SetTransportDefaults(&http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
MaxIdleConnsPerHost: 100,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
})
// Overwrite TLS-related fields from config to avoid collision with
// Transport field.
config.TLSClientConfig = restclient.TLSClientConfig{}
c, err := clientset.NewForConfig(config)
if err != nil {
return nil, nil, err
}
clients[i] = c
internalClient, err := internalclientset.NewForConfig(config)
if err != nil {
return nil, nil, err
}
internalClients[i] = internalClient
}
return clients, internalClients, nil
}
示例10: createClient
func createClient(s *options.SchedulerServer) (*clientset.Clientset, error) {
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
if err != nil {
return nil, fmt.Errorf("unable to build config from flags: %v", err)
}
kubeconfig.ContentType = s.ContentType
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = s.KubeAPIQPS
kubeconfig.Burst = int(s.KubeAPIBurst)
cli, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "leader-election"))
if err != nil {
return nil, fmt.Errorf("invalid API configuration: %v", err)
}
return cli, nil
}
示例11: rmSetup
func rmSetup(t *testing.T) (*httptest.Server, *disruption.DisruptionController, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("Error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pdb-informers")), nil, resyncPeriod)
rm := disruption.NewDisruptionController(
informers.Pods().Informer(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "disruption-controller")),
)
return s, rm, informers.Pods().Informer(), clientSet
}
示例12: Run
// Run runs the specified SchedulerServer. This should never exit.
func Run(s *options.SchedulerServer) error {
if c, err := configz.New("componentconfig"); err == nil {
c.Set(s.KubeSchedulerConfiguration)
} else {
glog.Errorf("unable to register configz: %s", err)
}
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
if err != nil {
glog.Errorf("unable to build config from flags: %v", err)
return err
}
kubeconfig.ContentType = s.ContentType
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = s.KubeAPIQPS
kubeconfig.Burst = int(s.KubeAPIBurst)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
leaderElectionClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "leader-election"))
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
go func() {
mux := http.NewServeMux()
healthz.InstallHandler(mux)
if s.EnableProfiling {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
}
configz.InstallHandler(mux)
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
}()
configFactory := factory.NewConfigFactory(leaderElectionClient, s.SchedulerName, s.HardPodAffinitySymmetricWeight, s.FailureDomains)
config, err := createConfig(s, configFactory)
if err != nil {
glog.Fatalf("Failed to create scheduler configuration: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
config.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: s.SchedulerName})
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: leaderElectionClient.Core().Events("")})
sched := scheduler.New(config)
run := func(_ <-chan struct{}) {
sched.Run()
select {}
}
if !s.LeaderElection.LeaderElect {
run(nil)
glog.Fatal("this statement is unreachable")
panic("unreachable")
}
id, err := os.Hostname()
if err != nil {
glog.Errorf("unable to get hostname: %v", err)
return err
}
// TODO: enable other lock types
rl := resourcelock.EndpointsLock{
EndpointsMeta: v1.ObjectMeta{
Namespace: "kube-system",
Name: "kube-scheduler",
},
Client: leaderElectionClient,
LockConfig: resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: config.Recorder,
},
}
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
Lock: &rl,
LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
RetryPeriod: s.LeaderElection.RetryPeriod.Duration,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: run,
OnStoppedLeading: func() {
glog.Fatalf("lost master")
},
},
})
//.........这里部分代码省略.........
示例13: run
func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) {
// TODO: this should be replaced by a --standalone flag
standaloneMode := (len(s.APIServerList) == 0 && !s.RequireKubeConfig)
if s.ExitOnLockContention && s.LockFilePath == "" {
return errors.New("cannot exit on lock file contention: no lock file specified")
}
done := make(chan struct{})
if s.LockFilePath != "" {
glog.Infof("acquiring file lock on %q", s.LockFilePath)
if err := flock.Acquire(s.LockFilePath); err != nil {
return fmt.Errorf("unable to acquire file lock on %q: %v", s.LockFilePath, err)
}
if s.ExitOnLockContention {
glog.Infof("watching for inotify events for: %v", s.LockFilePath)
if err := watchForLockfileContention(s.LockFilePath, done); err != nil {
return err
}
}
}
// Set feature gates based on the value in KubeletConfiguration
err = utilconfig.DefaultFeatureGate.Set(s.KubeletConfiguration.FeatureGates)
if err != nil {
return err
}
// Register current configuration with /configz endpoint
cfgz, cfgzErr := initConfigz(&s.KubeletConfiguration)
if utilconfig.DefaultFeatureGate.DynamicKubeletConfig() {
// Look for config on the API server. If it exists, replace s.KubeletConfiguration
// with it and continue. initKubeletConfigSync also starts the background thread that checks for new config.
// Don't do dynamic Kubelet configuration in runonce mode
if s.RunOnce == false {
remoteKC, err := initKubeletConfigSync(s)
if err == nil {
// Update s (KubeletServer) with new config from API server
s.KubeletConfiguration = *remoteKC
// Ensure that /configz is up to date with the new config
if cfgzErr != nil {
glog.Errorf("was unable to register configz before due to %s, will not be able to set now", cfgzErr)
} else {
setConfigz(cfgz, &s.KubeletConfiguration)
}
// Update feature gates from the new config
err = utilconfig.DefaultFeatureGate.Set(s.KubeletConfiguration.FeatureGates)
if err != nil {
return err
}
}
}
}
if kubeDeps == nil {
var kubeClient, eventClient *clientset.Clientset
var cloud cloudprovider.Interface
if s.CloudProvider != componentconfigv1alpha1.AutoDetectCloudProvider {
cloud, err = cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
return err
}
if cloud == nil {
glog.V(2).Infof("No cloud provider specified: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
} else {
glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
}
}
if s.BootstrapKubeconfig != "" {
nodeName, err := getNodeName(cloud, nodeutil.GetHostname(s.HostnameOverride))
if err != nil {
return err
}
if err := bootstrapClientCert(s.KubeConfig.Value(), s.BootstrapKubeconfig, s.CertDirectory, nodeName); err != nil {
return err
}
}
clientConfig, err := CreateAPIServerClientConfig(s)
if err == nil {
kubeClient, err = clientset.NewForConfig(clientConfig)
if err != nil {
glog.Warningf("New kubeClient from clientConfig error: %v", err)
}
// make a separate client for events
eventClientConfig := *clientConfig
eventClientConfig.QPS = float32(s.EventRecordQPS)
eventClientConfig.Burst = int(s.EventBurst)
eventClient, err = clientset.NewForConfig(&eventClientConfig)
} else {
if s.RequireKubeConfig {
return fmt.Errorf("invalid kubeconfig: %v", err)
}
if standaloneMode {
glog.Warningf("No API client: %v", err)
}
}
//.........这里部分代码省略.........
示例14: main
func main() {
flag.Parse()
glog.Infof("Starting serve_hostnames soak test with queries=%d and podsPerNode=%d upTo=%d",
*queriesAverage, *podsPerNode, *upTo)
var spec string
if *gke != "" {
spec = filepath.Join(os.Getenv("HOME"), ".config", "gcloud", "kubernetes", "kubeconfig")
} else {
spec = filepath.Join(os.Getenv("HOME"), ".kube", "config")
}
settings, err := clientcmd.LoadFromFile(spec)
if err != nil {
glog.Fatalf("Error loading configuration: %v", err.Error())
}
if *gke != "" {
settings.CurrentContext = *gke
}
config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig()
if err != nil {
glog.Fatalf("Failed to construct config: %v", err)
}
client, err := clientset.NewForConfig(config)
if err != nil {
glog.Fatalf("Failed to make client: %v", err)
}
var nodes *v1.NodeList
for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) {
nodes, err = client.Nodes().List(v1.ListOptions{})
if err == nil {
break
}
glog.Warningf("Failed to list nodes: %v", err)
}
if err != nil {
glog.Fatalf("Giving up trying to list nodes: %v", err)
}
if len(nodes.Items) == 0 {
glog.Fatalf("Failed to find any nodes.")
}
glog.Infof("Found %d nodes on this cluster:", len(nodes.Items))
for i, node := range nodes.Items {
glog.Infof("%d: %s", i, node.Name)
}
queries := *queriesAverage * len(nodes.Items) * *podsPerNode
// Create the namespace
got, err := client.Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "serve-hostnames-"}})
if err != nil {
glog.Fatalf("Failed to create namespace: %v", err)
}
ns := got.Name
defer func(ns string) {
if err := client.Core().Namespaces().Delete(ns, nil); err != nil {
glog.Warningf("Failed to delete namespace ns: %e", ns, err)
} else {
// wait until the namespace disappears
for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ {
if _, err := client.Namespaces().Get(ns, metav1.GetOptions{}); err != nil {
if errors.IsNotFound(err) {
return
}
}
time.Sleep(time.Second)
}
}
}(ns)
glog.Infof("Created namespace %s", ns)
// Create a service for these pods.
glog.Infof("Creating service %s/serve-hostnames", ns)
// Make several attempts to create a service.
var svc *v1.Service
for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) {
t := time.Now()
svc, err = client.Services(ns).Create(&v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: "serve-hostnames",
Labels: map[string]string{
"name": "serve-hostname",
},
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Protocol: "TCP",
Port: 9376,
TargetPort: intstr.FromInt(9376),
}},
Selector: map[string]string{
"name": "serve-hostname",
},
},
})
glog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t))
//.........这里部分代码省略.........
示例15: BeforeEach
// BeforeEach gets a client and makes a namespace.
func (f *Framework) BeforeEach() {
// The fact that we need this feels like a bug in ginkgo.
// https://github.com/onsi/ginkgo/issues/222
f.cleanupHandle = AddCleanupAction(f.AfterEach)
if f.ClientSet == nil {
By("Creating a kubernetes client")
config, err := LoadConfig()
Expect(err).NotTo(HaveOccurred())
config.QPS = f.options.ClientQPS
config.Burst = f.options.ClientBurst
if f.options.GroupVersion != nil {
config.GroupVersion = f.options.GroupVersion
}
if TestContext.KubeAPIContentType != "" {
config.ContentType = TestContext.KubeAPIContentType
}
f.ClientSet, err = internalclientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.ClientSet_1_5, err = release_1_5.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
clientRepoConfig := getClientRepoConfig(config)
f.StagingClient, err = staging.NewForConfig(clientRepoConfig)
Expect(err).NotTo(HaveOccurred())
f.ClientPool = dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
}
if f.federated {
if f.FederationClientset_1_5 == nil {
By("Creating a release 1.4 federation Clientset")
var err error
f.FederationClientset_1_5, err = LoadFederationClientset_1_5()
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for federation-apiserver to be ready")
err := WaitForFederationApiserverReady(f.FederationClientset_1_5)
Expect(err).NotTo(HaveOccurred())
By("federation-apiserver is ready")
By("Creating a federation namespace")
ns, err := f.createFederationNamespace(f.BaseName)
Expect(err).NotTo(HaveOccurred())
f.FederationNamespace = ns
By(fmt.Sprintf("Created federation namespace %s", ns.Name))
}
By("Building a namespace api object")
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
"e2e-framework": f.BaseName,
})
Expect(err).NotTo(HaveOccurred())
f.Namespace = namespace
if TestContext.VerifyServiceAccount {
By("Waiting for a default service account to be provisioned in namespace")
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
Expect(err).NotTo(HaveOccurred())
} else {
Logf("Skipping waiting for service account")
}
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
inKubemark: ProviderIs("kubemark"),
masterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
})
if err != nil {
Logf("Error while creating NewResourceUsageGatherer: %v", err)
} else {
go f.gatherer.startGatheringData()
}
}
if TestContext.GatherLogsSizes {
f.logsSizeWaitGroup = sync.WaitGroup{}
f.logsSizeWaitGroup.Add(1)
f.logsSizeCloseChannel = make(chan bool)
f.logsSizeVerifier = NewLogsVerifier(f.ClientSet, f.logsSizeCloseChannel)
go func() {
f.logsSizeVerifier.Run()
f.logsSizeWaitGroup.Done()
}()
}
}