本文整理汇总了Python中Algorithmia类的典型用法代码示例。如果您正苦于以下问题:Python Algorithmia类的具体用法?Python Algorithmia怎么用?Python Algorithmia使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Algorithmia类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: abstractToKeyword
def abstractToKeyword(title):
process = subprocess.Popen('python scholar.py -c %(count)d -A %(text)s --csv' \
% {"count": 1, "text": title}, shell=True, stdout=subprocess.PIPE)
output=process.communicate()[0]
elements = output.rsplit('|')
lastIndex = len(elements) - 1
LDAinput = [[elements[0], elements[lastIndex]],1]
client = Algorithmia.client('simfAKlzXJA516uRJm37b8tT9b31')
algo = client.algo('kenny/LDA/0.1.3')
results = algo.pipe(LDAinput)
return list(results[0].keys())
示例2: generate_sentence
def generate_sentence(filepath):
'''
Generates a sentence given a trained trigram model
PARAMETERS:
<str> filepath: location that trained model is located
in Algorithmia API
RETURNS:
<str> output: a randomly generated sentence
'''
client = Algorithmia.client(api_key.key)
input = [filepath, "xxBeGiN142xx", "xxEnD142xx"]
algo = client.algo('ngram/RandomTextFromTrigram/0.1.1')
print algo.pipe(input)
示例3: test_create_acl
def test_create_acl(self):
c = Algorithmia.client(os.environ['ALGORITHMIA_API_KEY'])
dd = DataDirectory(c, 'data://.my/privatePermissions')
if dd.exists():
dd.delete(True)
dd.create(ReadAcl.private)
dd_perms = DataDirectory(c, 'data://.my/privatePermissions').get_permissions()
self.assertEquals(dd_perms.read_acl, AclType.private)
dd.update_permissions(ReadAcl.public)
dd_perms = DataDirectory(c, 'data://.my/privatePermissions').get_permissions()
self.assertEquals(dd_perms.read_acl, AclType.public)
示例4: get_faces
def get_faces(path):
with open(settings.MEDIA_ROOT + "/" + path, "rb") as img:
bimage = base64.b64encode(img.read())
Algorithmia.apiKey = "Simple simivSeptsC+ZsLks5ia0wXmFbC1"
result = Algorithmia.algo("/ANaimi/FaceDetection").pipe(bimage)
faces = []
for rect in result:
face = Face()
face.name = "Petter Rabbit"
face.x = rect["x"]
face.y = rect["y"]
face.width = rect["width"]
face.height = rect["height"]
faces.append(face)
for face in faces:
face.save()
return faces
示例5: get_faces
def get_faces(path):
with open(path, 'rb') as img:
bimage = base64.b64encode(img.read())
Algorithmia.apiKey = 'API_KEY'
result = Algorithmia.algo('/algo/ANaimi/FaceDetection/0.1.0').pipe(biamge)
faces = []
for rect in result:
face = Face()
face.name = person_name()
face.x = rect['x']
face.y = rect['y']
face.width = rect['width']
face.height = rect['height']
faces.append(face)
return faces
示例6: random
def random():
try:
query = wikipedia.random(pages=1)
input = wikipedia.WikipediaPage(title=query).summary
title = wikipedia.WikipediaPage(title=query).title
image = wikipedia.WikipediaPage(title=query).images[0]
client = Algorithmia.client('Simple simR+{}'.format(api_key))
algo = client.algo('nlp/Summarizer/0.1.2')
contents ={
'image': image,
'title': title,
'summary': algo.pipe(input),
'link': 'https://en.wikipedia.org/wiki/{}'.format(wikipedia.random(pages=1))
}
except:
return json.dumps({
'msg': "Sorry, we couldn't find a Wikipedia article matching your search."
})
return json.dumps(contents)
示例7: summarise_img
def summarise_img(src, options=False):
'''
Retrieve meta-data for an image web resource.
Use algorithmia, openshift or similar cloud service.
'''
import Algorithmia
client = Algorithmia.client(config.ALGORITHMIA['api_key'])
algo = client.algo('deeplearning/IllustrationTagger/0.2.3')
input = {"image":src}
if options:
# tags (optional) required probs
for opt, value in options.items():
input[opt] = value
# e.g. threshold 0.3 etc
result = algo.pipe(input)
return result
示例8: pull_tweets
def pull_tweets():
"""Pull tweets from Twitter API via Algorithmia."""
input = {
"query": q_input,
"numTweets": "700",
"auth": {
"app_key": 'your_consumer_key',
"app_secret": 'your_consumer_secret_key',
"oauth_token": 'your_access_token',
"oauth_token_secret": 'your_access_token_secret'
}
}
client = Algorithmia.client('your_algorithmia_api_key')
algo = client.algo('twitter/RetrieveTweetsWithKeyword/0.1.3')
tweet_list = [{'user_id': record['user']['id'],
'retweet_count': record['retweet_count'],
'text': record['text']}
for record in algo.pipe(input).result]
return tweet_list
示例9: get_faces
def get_faces(path):
print "getting faces"
path = MEDIA_ROOT + "/" + path
with open(path, 'rb') as img:
bimage = base64.b64encode(img.read())
Algorithmia.apiKey = 'Simple totally_real_api_key'
result = Algorithmia.algo('/ANaimi/FaceDetection').pipe(bimage)
faces = []
for rect in result:
print "found face"
face = Face()
face.name = "Anon"
face.x = rect['x']
face.y = rect['y']
face.width = rect['width']
face.height = rect['height']
faces.append(face)
Face.save(face)
return faces
示例10: get_faces
def get_faces(photo):
import Algorithmia
import base64
Algorithmia.apiKey = os.environ.get('ALGORITHMIA_KEY')
with default_storage.open(photo.img.name, 'rb') as img:
b64 = base64.b64encode(img.read())
rectangles = Algorithmia.algo("/ANaimi/FaceDetection/0.1.2").pipe(b64)
faces = []
for rect in rectangles:
face = Face()
face.photo = photo
face.name = '?'
face.x = rect['x']
face.y = rect['y']
face.width = rect['width']
face.height = rect['height']
face.save()
faces.append(face)
return faces
示例11: generate_trigrams
def generate_trigrams(corpus, filepath):
'''
Generates a trained trigram model
PARAMETERS:
str[] corpus: array of strings generated from splitting
the original corpus. Needs beginning and
end tags in data
<str> filepath: location that data is stored in Algorithmia
data API
RETURNS:
filepath: location that data is stored in Algorithmia data API
(as confirmation)
'''
with open(corpus, 'r') as myfile:
data = myfile.read().replace('\n', '')
data = data.replace("xxEnD142xx", "xxEnD142xx qq")
data = data.split(" qq ")
input = [data, "xxBeGiN142xx", "xxEnD142xx", filepath]
client = Algorithmia.client(api_key.key)
algo = client.algo('ngram/GenerateTrigramFrequencies/0.1.1')
print "Trigram Frequency txt in data api, filepath is:"
print algo.pipe(input)
示例12: main
def main(filepath, outpath, length):
story = ''
client = Algorithmia.client(api_key.key)
alg_path = "data://.algo/ngram/GenerateTrigramFrequencies/temp/trigrams.txt"
generate_trigrams(filepath, alg_path)
while len(re.findall(r'\w+', story)) < length:
print "Generating new paragraph..."
input = ["data://.algo/ngram/GenerateTrigramFrequencies/temp/trigrams.txt", "xxBeGiN142xx", "xxEnD142xx", (randint(1,9))]
new_par = client.algo('/lizmrush/GenerateParagraphFromTrigram/0.1.2').pipe(input)
if len(re.findall(r'\w+', story)) + len(re.findall(r'\w+', new_par)) > length:
break
story += new_par.strip()
story += '\n\n'
print "Word count:"
print len(re.findall(r'\w+', story))
with open(outpath, 'w') as f:
f.write(story.encode('utf8'))
f.close()
print "Complete! Story written to " + outpath
示例13: get_faces
def get_faces(photo):
import Algorithmia
import base64
Algorithmia.apiKey = "Simple simWy1EsBB4ZucRa4q8DiPocne11"
with open(photo.image.path, "rb") as img:
b64 = base64.b64encode(img.read())
result = Algorithmia.algo("/ANaimi/FaceDetection").pipe(b64)
faces = []
for rect in result:
face = Face()
face.photo = photo
face.name = '?'
face.x = rect['x']
face.y = rect['y']
face.width = rect['width']
face.height = rect['height']
face.save()
faces.append(face)
return faces
示例14: main
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--api-key', required=True, help='algorithmia api key')
parser.add_argument('--connector-path', required=True, help='s3 or dropbox path for the directory to scan')
parser.add_argument('--recursive', action='store_true', help='continue scanning all sub-directories of the connector path')
args = parser.parse_args()
# Initialize Algorithmia Python client
client = Algorithmia.client(args.api_key)
# Get the algorithm we plan to use on each picture
algo = client.algo('deeplearning/ColorfulImageColorization/1.0.1')
algo.set_options(timeout=600) # This is a slow algorithm, so let's bump up the timeout to 10 minutes
# The root level directory that we will traverse
top_level_dir = client.dir(args.connector_path)
# Colorize the files
if args.recursive:
recursivelyColorize(algo, args.connector_path, top_level_dir)
else:
colorizeFilesInDirectory(algo, args.connector_path, top_level_dir)
print 'Done processing!'
示例15: list
list(string.punctuation) + ['http', 'https'])
tokens = word_tokenize(s)
cleanup = [token.lower() for token in tokens if token.lower()
not in stopset and len(token) > 2]
return cleanup
# data = load_json('summer_transfer.json')['tweets']
data = load_json('transfer.json')['tweets']
langs = []
text = '\n'.join([(d['text']) for d in data.values()])
# fdist = FreqDist(cleanupDoc(text))
# freq = pd.DataFrame(dict(fdist), index=['freq']).T
# freq.sort_values('freq', ascending=False, inplace=True)
# # freq.to_csv('freq_winter')
# print(freq.head(100))
import Algorithmia
input = [
text,
2,
5,
False,
True
]
input = text # "An engineer is trying to design a faster submarine. \nWould she prefer to study a fish or a flock of birds?"
client = Algorithmia.client('simkxwJR9Pt23FxpLaN6755Gq4U1')
algo = client.algo('dbgannon/KeyPhrases/0.1.1')
print(algo.pipe(input))