当前位置: 首页>>代码示例>>Python>>正文


Python cluster.Cluster类代码示例

本文整理汇总了Python中cluster.Cluster的典型用法代码示例。如果您正苦于以下问题:Python Cluster类的具体用法?Python Cluster怎么用?Python Cluster使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Cluster类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: alloc_new

 def alloc_new(self,name, configspec, vmitype, chefmode=True):
     '''Allocates a new cluster.
     
     @param name - name for the cluster (ideally unique)
     @param configspec - a ConfigSpec instance
     @param vmitype - string specifying vmi type (supported: 'vagrant')
     '''
     cid = self._db.insert({})        
     try:
         configspec.validate()
         cluster = Cluster( name, configspec, cid, chefmode=chefmode )
         VMI.Create(vmitype, cluster)
         self._db.update( 
                         { '_id' : cluster.id() }, 
                         { u'name' : name,
                           u'config' : configspec.jsonmap, 
                           u'vmi' : cluster.get_vmi().jsonmap(), 
                           u'machines' : cluster.machines()} )
     except Exception as exc:
         import traceback
         Log.error('Cluster creation failed: %s' % exc)
         Log.error( traceback.format_exc() )
         self._db.remove( { '_id' : cid } )
         raise Exception('Failed to create cluster: %s' % name)
     
     return cluster
开发者ID:allfs,项目名称:autooam,代码行数:26,代码来源:clustermgr.py

示例2: add_message_to_cluster

 def add_message_to_cluster(self, message):
     rep = message.get_tokenrepresentation()
     c = self.get_cluster(rep)
     if c==None:
         c = Cluster(rep, "initial")
         self.__cluster.append(c)
     c.add_messages([message])
开发者ID:tumi8,项目名称:Protocol-Informatics,代码行数:7,代码来源:clustercollection.py

示例3: reduce_pareto_set

    def reduce_pareto_set(self, par_set):
        """
        Realiza el clustering
        """
        lista_cluster=[]
        for solucion in par_set.solutions:
            cluster = Cluster()
            cluster.agregar_solucion(solucion)
            lista_cluster.append(cluster)
  
        while len(lista_cluster) > self.max_pareto_points:
            min_distancia = sys.maxint
            for i in range (0,len(lista_cluster)-1):
                for j in range(i+1, len(lista_cluster)-1): 
                    c = lista_cluster[i]
                    distancia = c.calcular_distancia(lista_cluster[j])
                    if distancia < min_distancia:
                        min_distancia = distancia
                        c1 = i
                        c2 = j
               
            cluster = lista_cluster[c1].unir(lista_cluster[c2]) #retorna un nuevo cluster 
            del lista_cluster[c1]
            del lista_cluster[c2]

            lista_cluster.append(cluster)
        
        par_set=[]
        for cluster in lista_cluster:
            solucion = cluster.centroide()
            par_set.append(solucion)
            
        return par_set 
开发者ID:jorgeramirez,项目名称:AE,代码行数:33,代码来源:spea.py

示例4: request_cluster

def request_cluster(argv):
    """
    only request cluster on GCE, and output all configuration information
    :param argv: sys.argv
    :return: None
    """
    if len(argv) < 7:
        print_help()
        exit(1)

    cluster_name = argv[2]
    ambari_agent_vm_num = int(argv[3])
    docker_num = int(argv[4])
    service_server_num = int(argv[5])
    with_ambari_server = False
    ambari_server_num = int(argv[6])
    if ambari_server_num > 0:
        with_ambari_server = True

    cluster = Cluster()
    cluster.request_gce_cluster(ambari_agent_vm_num, docker_num, service_server_num,
                                with_ambari_server, cluster_name)

    time_to_wait = Config.ATTRIBUTES["gce_boot_time"]
    print "wait ", str(time_to_wait), " seconds for the cluster to boot ... ..."
    time.sleep(int(time_to_wait))

    data = Data()
    data.add_new_cluster(cluster)

    print "complete"
开发者ID:pengchengxu,项目名称:ambari-agent-simulator,代码行数:31,代码来源:launcher_cluster.py

示例5: get_cluster_obs

def get_cluster_obs(clusters, r):	
	res = Cluster(-1, [])
	for c in clusters:
		for obs_row in c.observations:
			if obs_row == r:
				res.id = c.id
				res.centroide = c.centroide
				res.observations = c.observations
	return res
开发者ID:Kodstok,项目名称:IA,代码行数:9,代码来源:kmeans.py

示例6: quit

def quit(host, port, cluster_id, quit_cluster):
    logging.info('Node %s:%d quit from cluster [ %d ]', host, port, cluster_id)
    instance = pick_by(host, port)
    if instance.assignee is None:
        return
    Cluster.lock_by_id(instance.assignee_id)
    quit_cluster(host, port)
    instance.assignee = None
    db.session.add(instance)
开发者ID:Bluelich,项目名称:redis-ctl,代码行数:9,代码来源:node.py

示例7: exportgroup_create

    def exportgroup_create(self, name, project, tenant, varray, exportgrouptype, export_destination=None):
        '''
        This function will take export group name and project name  as input and
        It will create the Export group with given name.
        parameters:
           name : Name of the export group.
           project: Name of the project path.
           tenant: Container tenant name.
        return
            returns with status of creation. 
        '''
        # check for existance of export group.
        try:
            status = self.exportgroup_show(name, project, tenant)
        except SOSError as e:
            if(e.err_code == SOSError.NOT_FOUND_ERR):
                if(tenant == None):
                    tenant = ""
                    
                fullproj = tenant + "/" + project
                projuri = Project(self.__ipAddr, self.__port).project_query(fullproj) 
                nhuri = VirtualArray(self.__ipAddr, self.__port).varray_query(varray)
                
                parms = {
                'name' : name,
                'project' : projuri,
                'varray' : nhuri,
                'type' :exportgrouptype
                }
                if(exportgrouptype and export_destination):
                    if (exportgrouptype == 'Cluster'):
                        cluster_obj = Cluster(self.__ipAddr, self.__port)
                        try:
                            cluster_uri = cluster_obj.cluster_query(export_destination, fullproj)
                        except SOSError as e:
                            raise e
                        parms['clusters'] = [cluster_uri]
                    elif (exportgrouptype == 'Host'):
                        host_obj = Host(self.__ipAddr, self.__port)
                        try:
                            host_uri = host_obj.query_by_name(export_destination)
                        except SOSError as e:
                            raise e
                        parms['hosts'] = [host_uri]
                    # else:   # exportgrouptype == Exclusive
                        # TODO: add code for initiator                 
                body = json.dumps(parms)
                (s, h) = common.service_json_request(self.__ipAddr, self.__port, "POST", 
                                             self.URI_EXPORT_GROUP, body)

                o = common.json_decode(s)
                return o
            else:
                raise e
        if(status):
            raise SOSError(SOSError.ENTRY_ALREADY_EXISTS_ERR,
                           "Export group with name " + name + " already exists")
开发者ID:tylerbaker,项目名称:controller-openstack-cinder,代码行数:57,代码来源:exportgroup.py

示例8: exportgroup_add_cluster

 def exportgroup_add_cluster(self, exportgroupname, tenantname, projectname, clusternames, sync):
     exportgroup_uri = self.exportgroup_query(exportgroupname, projectname, tenantname)
     cluster_uris = []
     clusterObject = Cluster(self.__ipAddr, self.__port)
     for clustername in clusternames:
         cluster_uris.append(clusterObject.cluster_query(clustername, tenantname))
     parms = {}
     parms["cluster_changes"] = self._add_list(cluster_uris)
     o = self.send_json_request(exportgroup_uri, parms)
     return self.check_for_sync(o, sync)
开发者ID:blueranger,项目名称:coprhd-controller,代码行数:10,代码来源:exportgroup.py

示例9: __init__

    def __init__(self, cluster_document):
        Cluster.__init__(self, cluster_document)
        self._config_members = self._resolve_members("configServers")
        #self._shards = self._resolve_shard_members()
        self._shards = self._resolve_members("shards")

        # members list stores the mongos servers
        if not self._members or not self._config_members or not self._shards:
            raise Exception("Please specify config, shard, and mongos servers for cluster %s"
                            % self.get_cluster_name())
开发者ID:richardxx,项目名称:mongoctl-service,代码行数:10,代码来源:sharded_cluster.py

示例10: exportgroup_create

    def exportgroup_create(self, name, project, tenant, varray, exportgrouptype, export_destination=None):
        """
        This function will take export group name and project name  as input
        and it will create the Export group with given name.
        parameters:
           name : Name of the export group.
           project: Name of the project path.
           tenant: Container tenant name.
        return
            returns with status of creation.
        """
        # check for existance of export group.
        try:
            status = self.exportgroup_show(name, project, tenant)
        except SOSError as e:
            if e.err_code == SOSError.NOT_FOUND_ERR:
                if tenant is None:
                    tenant = ""

                fullproj = tenant + "/" + project
                projObject = Project(self.__ipAddr, self.__port)
                projuri = projObject.project_query(fullproj)

                varrayObject = VirtualArray(self.__ipAddr, self.__port)
                nhuri = varrayObject.varray_query(varray)

                parms = {"name": name, "project": projuri, "varray": nhuri, "type": exportgrouptype}

                if exportgrouptype and export_destination:
                    if exportgrouptype == "Cluster":
                        cluster_obj = Cluster(self.__ipAddr, self.__port)
                        try:
                            cluster_uri = cluster_obj.cluster_query(export_destination, fullproj)
                        except SOSError as e:
                            raise e
                        parms["clusters"] = [cluster_uri]
                    elif exportgrouptype == "Host":
                        host_obj = Host(self.__ipAddr, self.__port)
                        try:
                            host_uri = host_obj.query_by_name(export_destination)
                        except SOSError as e:
                            raise e
                        parms["hosts"] = [host_uri]

                body = json.dumps(parms)
                (s, h) = common.service_json_request(self.__ipAddr, self.__port, "POST", self.URI_EXPORT_GROUP, body)

                o = common.json_decode(s)
                return o
            else:
                raise e

        if status:
            raise SOSError(SOSError.ENTRY_ALREADY_EXISTS_ERR, "Export group with name " + name + " already exists")
开发者ID:blueranger,项目名称:coprhd-controller,代码行数:54,代码来源:exportgroup.py

示例11: addNewCluster

 def addNewCluster(self, tf, sentence):
     """
     Creates a new cluster and adds it to the clusters.
     tf - term frequency counts of the given sentence
     sentence - sentence to be added to the cluster
     """
     self.newCID += 1
     newCluster = Cluster(self.newCID)
     newCluster.tf = tf
     newCluster.addSentenceToCluster(sentence)
     self.clusters[self.newCID] = newCluster
     print "Added new cluster for cid: {}".format(self.newCID)
开发者ID:ddeeps2610,项目名称:Planner,代码行数:12,代码来源:sentenceClusterer.py

示例12: train

def train(config):
  '''Tain Loop for TDM Algorithm'''

  train_rawdata_url = config["train_rawdata_url"]
  test_rawdata_url = config["test_rawdata_url"]
  data_dir = config['data_dir']
  raw_train_data = os.path.join(data_dir, train_rawdata_url.split('/')[-1])
  raw_test_data = os.path.join(data_dir, test_rawdata_url.split('/')[-1])
  tree_filename = os.path.join(data_dir, config['tree_filename'])
  train_sample = os.path.join(data_dir, config['train_sample'])
  test_sample = os.path.join(data_dir, config['test_sample'])
  stat_file = os.path.join(data_dir, config['stat_file'])

  print("Start to generating initialization data")
  # Download the raw data
  hdfs_download(train_rawdata_url, raw_train_data)
  hdfs_download(test_rawdata_url, raw_test_data)

  generator = Generator(raw_train_data,
                        raw_test_data,
                        tree_filename,
                        train_sample,
                        test_sample,
                        config['feature_conf'],
                        stat_file,
                        config['seq_len'],
                        config['min_seq_len'],
                        config['parall'],
                        config['train_id_label'],
                        config['test_id_label'])
  generator.generate()

  # Upload generating data to hdfs
  hdfs_upload(data_dir, config["upload_url"])

  # TDM train
  model_embed = os.path.join(data_dir, 'model.embed')
  tree_upload_dir = os.path.join(config['upload_url'], os.path.split(data_dir)[-1])
  for i in range(config['epocs']):
    print('Training, iteration: {iteration}'.format(iteration=i))

    # TODO(genbao.cgb): Train with xdl

    # Download the model file
    hdfs_download(config['model_url'], model_embed)

    # Tree clustering
    cluster = Cluster(model_embed, tree_filename,
                      parall=config['parall'], stat_file=stat_file)
    cluster.train()

    # Upload clustered tree to hdfs
    hdfs_upload(tree_filename, tree_upload_dir, over_write=True)
开发者ID:q64545,项目名称:x-deeplearning,代码行数:53,代码来源:tdm.py

示例13: main

def main():
    cluster = Cluster()
    cluster.init_from_file('nodes.yaml')

    for n in cluster.nodes:
        n.execute('hostname; sleep 4')

    while any((n.is_busy() for n in cluster.nodes)):
        print 'waiting'
        sleep(0.5)

    for n in cluster.nodes:
        print n.last_result
开发者ID:jstutters,项目名称:distqueue,代码行数:13,代码来源:distqueue.py

示例14: initialization

    def initialization(self):
        for i in xrange(0, self.clusterNumber):
            c = Cluster(i, len(self.observations[0]))
            self.clusters.append(c)

        i = 0
        for obs in self.observations:
            obs = np.append(obs, 0)
            self.clusters[i % self.clusterNumber].addObservation(obs, 0)
            i += 1

        for c in self.clusters:
            c.updateCentroid()
            c.updateDist()
开发者ID:btrd,项目名称:anomaly_detection,代码行数:14,代码来源:kMeanClusterer.py

示例15: train

def train(config):
  '''Tain Loop for TDM Algorithm'''

  data_dir = os.path.join(DIR, config['data_dir'])
  tree_filename = os.path.join(data_dir, config['tree_filename'])
  stat_file = os.path.join(data_dir, config['stat_file'])

  print("Start to cluster tree")
  # Download item id
  upload_dir = os.path.join(config['upload_url'], os.path.split(data_dir)[-1])
  item_id_url = os.path.join(upload_dir, config['item_id_file'])
  item_id_file = os.path.join(data_dir, 'item.id')
  hdfs_download(item_id_url, item_id_file)
  model_embed_tmp = os.path.join(data_dir, 'model.embed.tmp')
  hdfs_download(config['model_url'] + '/item_emb', model_embed_tmp)

  # Read max item id from item id file
  max_item_id = 0
  with open(item_id_file) as f:
    for line in f:
      item_id = int(line)
      if item_id > max_item_id:
        max_item_id = item_id
  max_item_id += 1

  model_embed = os.path.join(data_dir, 'model.embed')
  item_count = 0
  id_set = set()
  with open(model_embed_tmp) as f:
    with open(model_embed, 'wb') as fo:
      for line in f:
        arr = line.split(",")
        item_id = int(arr[0])
        if (len(arr) > 2) and (item_id < max_item_id) and (item_id not in id_set):
          id_set.add(item_id)
          item_count += 1
          fo.write(line)

  os.remove(model_embed_tmp)
  print("Filer embedding done, records:{}, max_leaf_id: {}".format(
      item_count, max_item_id))

  # Tree clustering
  cluster = Cluster(model_embed, tree_filename,
                    parall=config['parall'], stat_file=stat_file)
  cluster.train()

  # Upload clustered tree to hdfs
  tree_upload_dir = os.path.join(config['upload_url'], os.path.split(data_dir)[-1])
  hdfs_upload(tree_filename, tree_upload_dir, over_write=True)
开发者ID:q64545,项目名称:x-deeplearning,代码行数:50,代码来源:tree_cluster.py


注:本文中的cluster.Cluster类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。