当前位置: 首页>>代码示例>>Python>>正文


Python client.Client类代码示例

本文整理汇总了Python中snakebite.client.Client的典型用法代码示例。如果您正苦于以下问题:Python Client类的具体用法?Python Client怎么用?Python Client使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Client类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: signature

 def signature(self):
     client = Client(self._host, self._port, effective_user=self._user, use_trash=False)
     stats = client.stat([self._partial])
     if stats['file_type'] == 'f':
         return "modification_time:{}".format(stats['modification_time'])
     else:
         return stats['file_type']
开发者ID:lexman,项目名称:tuttle,代码行数:7,代码来源:hdfs.py

示例2: EffectiveUserTest

class EffectiveUserTest(MiniClusterTestBase):
    ERR_MSG_TOUCH = "org.apache.hadoop.security.AccessControlException\nPermission denied: user=__foobar"
    ERR_MSG_STAT = "`/foobar2': No such file or directory"

    VALID_FILE = '/foobar'
    INVALID_FILE = '/foobar2'

    def setUp(self):
        self.custom_client = Client(self.cluster.host, self.cluster.port)
        self.custom_foobar_client = Client(host=self.cluster.host,
                                           port=self.cluster.port,
                                           effective_user='__foobar')

    def test_touch(self):
        print tuple(self.custom_client.touchz([self.VALID_FILE]))
        try:
            tuple(self.custom_foobar_client.touchz([self.INVALID_FILE]))
	except Exception, e:
            self.assertTrue(e.message.startswith(self.ERR_MSG_TOUCH))

        self.custom_client.stat([self.VALID_FILE])
        try:
            self.custom_client.stat([self.INVALID_FILE])
        except Exception, e:
            self.assertEquals(e.message, self.ERR_MSG_STAT)
开发者ID:3rwww1,项目名称:snakebite,代码行数:25,代码来源:effective_user_test.py

示例3: test

def test():
    """
    """
    client = Client("192.168.99.100", 9000)
    for f in client.ls(['/files']):
        print f
        for line in client.cat([f.get('path')]):
            for l in line:
                print l
开发者ID:alfonsokim,项目名称:scripts,代码行数:9,代码来源:test.py

示例4: run

    def run(self):
        c = Client(self.host, self.port)

        listing = c.ls([self.log_path], recurse=True)
        events = []
        for f in listing:
            path = f['path']

            if not path.endswith('.jhist'):
                continue

            ts = arrow.get(f['modification_time']/1000)

            if ts <= self.checktime:
                continue

            job_id = job_pattern.match(path.split('/')[-1]).group(0)

            if job_id in self.jobs and self.jobs[job_id] >= ts.timestamp*1000:
                log.debug('Skipping processed job: ' + job_id)
                continue

            config_path = path[:path.rfind('/')]+'/'+job_id+'_conf.xml'

            event = {
                'inviso.type': 'mr2',
                'job.id': job_id,
                'application.id': job_id.replace('job_', 'application_'),
                'job.type': 'mr2',
                'file.type': ['history', 'config'],
                'jobflow' : self.jobflow,
                'cluster.id': self.cluster_id,
                'cluster': self.cluster_name,
                'history.uri': 'hdfs://%s:%s%s' % (self.host,self.port,path),
                'config.uri':'hdfs://%s:%s%s' % (self.host,self.port,config_path),
                'host': self.host,
                'port': self.port,
                'timestamp': str(ts),
                'epoch': f['modification_time'],
                'mapreduce.version': 'mr2'
            }

            log.info('Publishing event: (%s) %s %s' % (event['cluster'], event['job.id'], ts))
            events.append(event)
        for chunk in [events[i:i + self.chunk_size] for i in xrange(0, len(events), self.chunk_size)]:
            self.publisher.publish(chunk)
开发者ID:Netflix,项目名称:inviso,代码行数:46,代码来源:monitor.py

示例5: delete_item

def delete_item(config, filepath='', localpath=''):

    if(config['BACKEND'] == 'hdfs'):
        client = Client(socket.gethostname(), config['HADOOP_RPC_PORT'], use_trash=False)
        del_gen = client.delete([filepath], recurse=True)
        for del_item in del_gen:
            pass
    elif(config['BACKEND'] == 'swift'):
        pass  # To be implemented

    # Deleting modules or datasets from local directories (will also suffice for nfs backend)
    if(os.path.isdir(localpath)):  # Check if it is a dataset
        shutil.rmtree(localpath)
    else:
        try:
            os.remove(localpath)
        except OSError:
            pass
开发者ID:CSC-IT-Center-for-Science,项目名称:spark-analysis,代码行数:18,代码来源:helper.py

示例6: crfalign

def crfalign(sc, inputFilename, outputDirectory, 
            limit=LIMIT, location='hdfs', outputFormat="text", partitions=None, deleteFirst=True):

    # crfConfigDir = os.path.join(os.path.dirname(__file__), "data/config")
    # def cpath(n):
    #     return os.path.join(crfConfigDir, n)

    # smEyeColor = HybridJaccard(ref_path=cpath("eyeColor_reference_wiki.txt"),
    #                            config_path=cpath("eyeColor_config.txt"))
    # smHairColor = HybridJaccard(ref_path=cpath("hairColor_reference_wiki.txt"),
    #                             config_path=cpath("hairColor_config.txt"))
    # print smEyeColor, smHairColor

    if location == "hdfs":
        if deleteFirst:
            namenode = "memex-nn1"
            port = 8020
            client = Client(namenode, 8020, use_trash=True)
            try:
                for deleted in client.delete([outputDirectory], recurse=True):
                    print deleted
            except FileNotFoundException as e:
                pass

    # hypothesis1: data fetched this way prompts the lzo compression error
    # hypothesis2: but it doesn't matter, error is just a warning
    rdd_crfl = sc.textFile(inputFilename)
    rdd_crfl.setName('rdd_crfl')

    if limit:
        rdd_crfl = sc.parallelize(rdd_crfl.take(limit))
    if partitions:
        rdd_crfl = rdd_crfl.repartition(partitions)

    rdd_final = rdd_crfl
    print outputFormat
    if outputFormat == "sequence":
        rdd_final.saveAsSequenceFile(outputDirectory)
    elif outputFormat == "text":
        print "saving to %s" % outputDirectory
        rdd_final.saveAsTextFile(outputDirectory)
    else:
        raise RuntimeError("Unrecognized output format: %s" % outputFormat)
开发者ID:cjsanjay,项目名称:dig-crf,代码行数:43,代码来源:crfminimal.py

示例7: __init__

 def __init__(self, workflow, **kwargs):
     super(HDFSTextLoader, self).__init__(workflow, **kwargs)
     self.file_name = kwargs["file"]
     self.chunk_lines_number = kwargs.get("chunk", 1000)
     client_kwargs = dict(kwargs)
     del client_kwargs["file"]
     if "chunk" in kwargs:
         del client_kwargs["chunk"]
     self.hdfs_client = Client(**client_kwargs)
     self.output = [""] * self.chunk_lines_number
     self.finished = Bool()
开发者ID:2php,项目名称:veles,代码行数:11,代码来源:hdfs_loader.py

示例8: getTrainedModel

def getTrainedModel(hdfsServer, modelFile):
    hdfsPort = int(os.environ.get('HDFS_NAME_PORT', 8020))
    modelSavePath = "/user/" + os.getenv('LOGNAME') + "/data/model/" + modelFile + '/'

    # Load the saved model data
    hdfs_client = Client(hdfsServer, hdfsPort)
    filesInfo = hdfs_client.ls([modelSavePath])

    # Copy HDFS files to local temp directory
    # First clean up and recreate the temp folder
    copyDir = tempfile.gettempdir() + "/" + modelFile
    shutil.rmtree(copyDir, ignore_errors=True)
    os.makedirs(copyDir)
    res = hdfs_client.copyToLocal([f['path'] for f in filesInfo], copyDir)
    for r in res:
        if not r['result']:
            print "Error: %s" % r

    modelFilePath = copyDir + '/' + modelFile
    print "Load model from  %s" % modelFilePath
    return joblib.load(modelFilePath)
开发者ID:patng323,项目名称:w205-course-project,代码行数:21,代码来源:processStream.py

示例9: getObjsBackend

def getObjsBackend(objs, backend, config):

    if(backend == 'hdfs'):

        client = Client(socket.gethostname(), config['HADOOP_RPC_PORT'], use_trash=False)

        for obj in objs:
                try:
                    copy_gen = client.copyToLocal([obj[0]], obj[1])
                    for copy_item in copy_gen:
                        pass
                except Exception as e:
                        print(e)
    elif(backend == 'swift'):

        options = {'os_auth_url': os.environ['OS_AUTH_URL'], 'os_username': os.environ['OS_USERNAME'], 'os_password': os.environ['OS_PASSWORD'], 'os_tenant_id': os.environ['OS_TENANT_ID'], 'os_tenant_name': os.environ['OS_TENANT_NAME']}
        swiftService = SwiftService(options=options)

        for obj in objs:

            # Create the containers which are used in this application for Object Storage
            if(obj[0] == 'sqlite.db'):
                swiftService.post(container='containerFiles')
                swiftService.post(container='containerFeatures')
                swiftService.post(container='containerModules')

            out_file = obj[1]  # Get the output file location from runner
            localoptions = {'out_file': out_file}
            objects = []
            objects.append(obj[0])
            swiftDownload = swiftService.download(container='containerModules', objects=objects, options=localoptions)

            for downloaded in swiftDownload:
                if("error" in downloaded.keys()):
                    raise RuntimeError(downloaded['error'])
                # print(downloaded)

    elif(backend == 'nfs'):  # Every file is already in respective local dirs
        pass
开发者ID:CSC-IT-Center-for-Science,项目名称:spark-analysis,代码行数:39,代码来源:helper.py

示例10: HDFSStat

class HDFSStat(object):

    cluster = 'hostname'
    port = 8020
    default_path = '/user/hive/warehouse'

    @staticmethod
    def build_path(table):
        nm = table.split('.')[0]
        tb = table.split('.')[1]
        return default_path + '/' + nm + '.db/' + tb

    def __init__(self):
        self.client = Client(HDFSStat.cluster, HDFSStat.port, use_trash=False)

    def latest_partition(self, table_name, table_path=None):
        t_path = HDFSStat.build_path(table_name) if table_path is None else table_path
        latest_dir = list(self.client.ls([t_path])).pop()
        return path.basename(latest_dir['path']).split('=')[1]

    def poke_partition(self, table_name, partition_name, partition, table_path=None):
        t_path = HDFSStat.build_path(table_name) if table_path is None else table_path
        partition_path = t_path + '/' + partition_name + '=' + partition
        return self.client.test(partition_path, exists=True, directory=True, zero_length=False)
开发者ID:imsid,项目名称:kickstarter,代码行数:24,代码来源:hdfsstat.py

示例11: HDFSTextLoader

class HDFSTextLoader(Unit, TriviallyDistributable):
    def __init__(self, workflow, **kwargs):
        super(HDFSTextLoader, self).__init__(workflow, **kwargs)
        self.file_name = kwargs["file"]
        self.chunk_lines_number = kwargs.get("chunk", 1000)
        client_kwargs = dict(kwargs)
        del client_kwargs["file"]
        if "chunk" in kwargs:
            del client_kwargs["chunk"]
        self.hdfs_client = Client(**client_kwargs)
        self.output = [""] * self.chunk_lines_number
        self.finished = Bool()

    def initialize(self):
        self.debug("Opened %s", self.hdfs_client.stat([self.file_name]))
        self._generator = self.hdfs_client.text([self.file_name])

    def run(self):
        assert not self.finished
        try:
            for i in range(self.chunk_lines_number):
                self.output[i] = next(self._generator)
        except StopIteration:
            self.finished <<= True
开发者ID:2php,项目名称:veles,代码行数:24,代码来源:hdfs_loader.py

示例12: __init__

    def __init__(self, path, name_node, hive_server,
                 user="root", hive_db="default", password=None, nn_port=8020, hive_port=10000):

        # HDFS Connection
        self._client = Client(name_node, nn_port)

        self._db = hive_db

        # Hive Connection
        self._hive = pyhs2.connect(host=hive_server,
                                   port=hive_port,
                                   authMechanism="PLAIN",
                                   database=hive_db,
                                   user=user,
                                   password=password)
        self._path = path
开发者ID:grundprinzip,项目名称:pyxplorer,代码行数:16,代码来源:loader.py

示例13: __init__

	def __init__(self,topic,user,server,port,web_port,base,hdfs_tmp):
		self.topic = topic
		self.username = user
		self.server = server
		self.port = port
		self.base = base
		self.path = ["%s/%s" % (base,topic)]
		self.hdfs_tmp = hdfs_tmp

		try:
			self.client=Client(server,port,effective_user=user) 
			self.hdfsclient=hdfs.client.InsecureClient(\
                 "http://%s:%d" % (server,web_port),user=user)
	 		self.daylist=self.check()
		except:
	 		print "Base path %s does not contain valid structure" % (base)
	 		raise	
开发者ID:agrebin,项目名称:snappymerge,代码行数:17,代码来源:snappymerge.py

示例14: int

import argparse
import subprocess

parser = argparse.ArgumentParser()
parser.add_argument("--hdfs", help="HDFS FS name", default = 'localhost')
parser.add_argument("--model", help="Name of model file", default = 'belt.model')
args = parser.parse_args()


hdfsServer = args.hdfs
hdfsPort = int(os.environ.get('HDFS_NAME_PORT', 8020))
hdfsHost = "hdfs://" + hdfsServer + ":" + str(hdfsPort)
modelSavePath = "/user/" + os.getenv('LOGNAME') + "/data/model/" + args.model + "/"
print "hdfs=%s, savePath=%s, hdfsHost=%s" % (hdfsServer, modelSavePath, hdfsHost)

hdfs_client = Client(hdfsServer, hdfsPort)

X_train_file = hdfs_client.text(["/user/" + os.getenv('LOGNAME') + "/data/X_train.txt"]).next()
y_train_file = hdfs_client.text(["/user/" + os.getenv('LOGNAME') + "/data/y_train.txt"]).next()

X_train = np.genfromtxt(str.splitlines(X_train_file))
y_train = np.genfromtxt(str.splitlines(y_train_file))

clf = LogisticRegression()
clf = clf.fit(X_train, y_train)

files = joblib.dump(clf, "belt.model")

subprocess.check_call(['hdfs', 'dfs', '-rm', '-r', '-f', modelSavePath], shell=False)
subprocess.check_call(['hdfs', 'dfs', '-mkdir', '-p', modelSavePath], shell=False)
开发者ID:patng323,项目名称:w205-course-project,代码行数:30,代码来源:trainModel.py

示例15: Client

from snakebite.client import Client

client = Client('localhost', 9000)
for f in client.copyToLocal(['/input/input.txt'], '/tmp'):
   print f
开发者ID:CedarLogic,项目名称:HadoopWithPython,代码行数:5,代码来源:copy_to_local.py


注:本文中的snakebite.client.Client类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。