当前位置: 首页>>代码示例>>Python>>正文


Python FlickrAPI.photos_search方法代码示例

本文整理汇总了Python中flickrapi.FlickrAPI.photos_search方法的典型用法代码示例。如果您正苦于以下问题:Python FlickrAPI.photos_search方法的具体用法?Python FlickrAPI.photos_search怎么用?Python FlickrAPI.photos_search使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在flickrapi.FlickrAPI的用法示例。


在下文中一共展示了FlickrAPI.photos_search方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: fetch_from_flickr

# 需要导入模块: from flickrapi import FlickrAPI [as 别名]
# 或者: from flickrapi.FlickrAPI import photos_search [as 别名]
    def fetch_from_flickr(self, keyword, api_key, api_secret, number_links=50):
        """ Fetch images from Flikr """
        from flickrapi import FlickrAPI # we import flikcr API only if needed

        # calculate number of pages:
        if number_links < 200:
            items_per_page = number_links
        else:
            items_per_page = 200   # max 200 for flikr
        pages_nbr = int(ceil(number_links / items_per_page))
        links = []

        # get links from the first page:
        print("Carwling Flickr Search...")
        flickr = FlickrAPI(api_key, api_secret)
        response = flickr.photos_search(api_key=api_key,
                                        text=keyword,
                                        per_page=items_per_page,
                                        media='photos',
                                        sort='relevance')
        images = [im for im in list(response.iter()) if im.tag == 'photo']
        for photo in images:
            photo_url = "https://farm{0}.staticflickr.com/{1}/{2}_{3}.jpg". format(
                photo.get('farm'), photo.get('server'), photo.get('id'), photo.get('secret'))
            links.append(photo_url)
        print(" >> ", len(links), " links extracted...", end="")

        # get next pages:
        for i in range(1, pages_nbr):
            response = flickr.photos_search(api_key=api_key,
                                            text=keyword,
                                            per_page=items_per_page,
                                            media='photos',
                                            page = i + 1,  
                                            sort='relevance')
            images = [im for im in list(response.iter()) if im.tag == 'photo']
            for photo in images:
                link = "https://farm{0}.staticflickr.com/{1}/{2}_{3}.jpg". format(
                    photo.get('farm'), photo.get('server'), photo.get('id'), photo.get('secret'))
                links.append(link)
            print("\r >> ", len(links), " links extracted...", end="")

        # store and reduce the number of images if too much:
        return links
开发者ID:amineHorseman,项目名称:images-web-crawler,代码行数:46,代码来源:web_crawler.py

示例2: FwiktrFlickrRetriever

# 需要导入模块: from flickrapi import FlickrAPI [as 别名]
# 或者: from flickrapi.FlickrAPI import photos_search [as 别名]
class FwiktrFlickrRetriever(FwiktrServiceManager):
    transformList = [FwiktrFlickrFuckItSelectionTransform(), FwiktrFlickrFullANDSelectionTransform()]
    def __init__(self):
        self._pic_info = []
        FwiktrServiceManager.__init__(self)
        self.name = "Flickr"

    def _SetupService(self):
        self._fapi = FlickrAPI(self._GetOption('flickr_api_key'), self._GetOption('flickr_api_secret'))

    def GetPictureXML(self):
        return flickr_info_xml

    def GetPictureData(self):
        return {'picture_title':cgi.escape(self._pic_info['title']), 'picture_info':self._GetPictureSpecificData()}
        
    def _GetPictureSpecificData(self):
        return flickr_info_xml % {'flickr_server':self._pic_info['server'], 'flickr_farm':self._pic_info['farm'], 'flickr_photo_id':self._pic_info['id'], 'flickr_secret':self._pic_info['secret'], 'flickr_owner_id':self._pic_info['owner']}

    def GetNewPicture(self, tag_list):
        try:
            if len(tag_list) > 20:
                culler = FwiktrFlickrTagCullTransform()
                tag_list = culler.RunTransform(tag_list)
            tag_string = ','.join(tag_list)
            if(tag_string == ""): return False
            pic = FwiktrFlickrFullANDSelectionTransform()
            rsp = self._fapi.photos_search(api_key=self._GetOption('flickr_api_key'),tags=tag_string,tag_mode='all')
            self._fapi.testFailure(rsp)
            print rsp.photos[0]['total']
            if(int(rsp.photos[0]['total']) == 0):
                pic = FwiktrFlickrFuckItSelectionTransform()
                rsp = self._fapi.photos_search(api_key=self._GetOption('flickr_api_key'),tags=tag_string,tag_mode='any')
                print rsp.photos[0]['total']
                self._fapi.testFailure(rsp)
            if(int(rsp.photos[0]['total']) == 0): 
                return False
            rand_index = random.randint(0, min(int(rsp.photos[0]['perpage']), int(rsp.photos[0]['total'])))
            self._pic_info = rsp.photos[0].photo[rand_index]            
            pic.RunTransform({'total':rsp.photos[0]['total'],'picked':rand_index})
            return True 
        except:
            return False
开发者ID:qdot,项目名称:fwiktr,代码行数:45,代码来源:fwiktr_web.py

示例3: str

# 需要导入模块: from flickrapi import FlickrAPI [as 别名]
# 或者: from flickrapi.FlickrAPI import photos_search [as 别名]
        
        successfulQuery = True
    return response


if __name__ == "__main__":
    
    flickr_api_key = config.flickrAPIKey
    flickr_per_page = str(config.maxPhotosPerPage)
    flickrAPI = FlickrAPI(config.flickrAPIKey, config.flickrSecret)
    response = flickrAPI.photos_search(
        api_key=flickr_api_key,
        sort='interestingness-desc',
        ispublic='1',
        media='photos',
        per_page=flickr_per_page,
        page='1',
        text='love',
        extras = config.photo_extras,
        min_upload_date='1300822535',
        max_upload_date='1300882535')
    count = 0
    for photo in response[0]:
        image = photo.attrib
        print (extract_url(image))
        if(is_valid_photo(image)):
            count = count + 1
    print (count)


开发者ID:Asperado,项目名称:iconic,代码行数:30,代码来源:photos_search.py

示例4: __init__

# 需要导入模块: from flickrapi import FlickrAPI [as 别名]
# 或者: from flickrapi.FlickrAPI import photos_search [as 别名]
class Offlickr:

    def __init__(
        self,
        key,
        secret,
        httplib=None,
        dryrun=False,
        verbose=False,
        ):
        """Instantiates an Offlickr object
        An API key is needed, as well as an API secret"""

        self.__flickrAPIKey = key
        self.__flickrSecret = secret
        self.__httplib = httplib

        # Get authentication token
        # note we must explicitly select the xmlnode parser to be compatible with FlickrAPI 1.2

        self.fapi = FlickrAPI(self.__flickrAPIKey, self.__flickrSecret,
                              format='xmlnode')
        (token, frob) = self.fapi.get_token_part_one()
        if not token:
            raw_input('Press ENTER after you authorized this program')
        self.fapi.get_token_part_two((token, frob))
        self.token = token
        test_login = self.fapi.test_login()
        uid = test_login.user[0]['id']
        self.flickrUserId = uid
        self.dryrun = dryrun
        self.verbose = verbose

    def __testFailure(self, rsp):
        """Returns whether the previous call was successful"""

        if rsp['stat'] == 'fail':
            print 'Error!'
            return True
        else:
            return False

    def getPhotoList(self, dateLo, dateHi):
        """Returns a list of photo given a time frame"""

        n = 0
        flickr_max = 500
        photos = []

        print 'Retrieving list of photos'
        while True:
            if self.verbose:
                print 'Requesting a page...'
            n = n + 1
            rsp = self.fapi.photos_search(
                api_key=self.__flickrAPIKey,
                auth_token=self.token,
                user_id=self.flickrUserId,
                per_page=str(flickr_max),
                page=str(n),
                min_upload_date=dateLo,
                max_upload_date=dateHi,
                )
            if self.__testFailure(rsp):
                return None
            if rsp.photos[0]['total'] == '0':
                return None
            photos += rsp.photos[0].photo
            if self.verbose:
                print ' %d photos so far' % len(photos)
            if len(photos) >= int(rsp.photos[0]['total']):
                break

        return photos

    def getGeotaggedPhotoList(self, dateLo, dateHi):
        """Returns a list of photo given a time frame"""

        n = 0
        flickr_max = 500
        photos = []

        print 'Retrieving list of photos'
        while True:
            if self.verbose:
                print 'Requesting a page...'
            n = n + 1
            rsp = \
                self.fapi.photos_getWithGeoData(api_key=self.__flickrAPIKey,
                    auth_token=self.token, user_id=self.flickrUserId,
                    per_page=str(flickr_max), page=str(n))
            if self.__testFailure(rsp):
                return None
            if rsp.photos[0]['total'] == '0':
                return None
            photos += rsp.photos[0].photo
            if self.verbose:
                print ' %d photos so far' % len(photos)
            if len(photos) >= int(rsp.photos[0]['total']):
                break
#.........这里部分代码省略.........
开发者ID:skvidal,项目名称:offlickr,代码行数:103,代码来源:offlickr.py

示例5: __init__

# 需要导入模块: from flickrapi import FlickrAPI [as 别名]
# 或者: from flickrapi.FlickrAPI import photos_search [as 别名]

#.........这里部分代码省略.........
          len(rsp.photosets[0].photoset), self.user_id)
    return rsp.photosets[0].photoset

  def parseInfoFromPhoto(self, photo, perms=None):
    info = {}
    info['id'] = photo['id']
    info['title'] = photo['title'].replace('/', '_')
    # Some pics don't contain originalformat attribute, so set it to jpg by default.
    try:
      info['format'] = photo['originalformat']
    except KeyError:
      info['format'] = 'jpg'

    try:
      info['dupload'] = photo['dateupload']
    except KeyError:
      info['dupload'] = '0'

    try:
      info['dupdate'] = photo['lastupdate']
    except KeyError:
      info['dupdate'] = '0'
    
    info['perms'] = perms
    return info

  def parseInfoFromFullInfo(self, id, fullInfo):
    info = {}
    info['id'] = id
    info['title'] = fullInfo[4]
    info['format'] = fullInfo[0]
    info['dupload'] = fullInfo[10]
    info['dupdate'] = fullInfo[11]
    info['mode'] = fullInfo[1]
    return info

  def getPhotosFromPhotoset(self, photoset_id):
    log.debug("set id: %s", photoset_id)
    photosPermsMap = {}
    # I'm not utilizing the value part of this dictionary. Its arbitrarily
    # set to i.
    for i in range(0,3):
      page = 1
      while True:
        rsp = self.fapi.photosets_getPhotos(auth_token=self.authtoken,
                                            photoset_id=photoset_id, 
                                            extras=self.extras, 
                                            page=str(page),
                                            privacy_filter=str(i))
        if not rsp:
          break
        if not hasattr(rsp.photoset[0], 'photo'):
          log.error("photoset %s doesn't have attribute photo", rsp.photoset[0]['id'])
          break
        for p in rsp.photoset[0].photo:
          photosPermsMap[p] = str(i)
        page += 1
        if page > int(rsp.photoset[0]['pages']): break
      if photosPermsMap: break
    return photosPermsMap
            
  def getPhotoStream(self, user_id):
    log.debug("userid: %s", user_id)
    retList = []
    pageNo = 1
    maxPage = 1
    while pageNo<=maxPage:
      log.info("retreiving page number %s of %s", pageNo, maxPage) 
      rsp = self.fapi.photos_search(auth_token=self.authtoken, 
                                    user_id=user_id, per_page="500", 
                                    page=str(pageNo), extras=self.extras)
      if not rsp:
        log.error("can't retrive photos from your stream; got error %s",
            rsp.errormsg)
        return retList
      if not hasattr(rsp.photos[0], 'photo'):
        log.error("photos.search response doesn't have attribute photos; "
            "returning list acquired so far")
        return retList
      for a in rsp.photos[0].photo:
        retList.append(a)
      maxPage = int(rsp.photos[0]['pages'])
      pageNo = pageNo + 1
    return retList
 
  def getTaggedPhotos(self, tags, user_id=None):
    log.debug("tags: %s user_id: %s", tags, user_id)
    kw = kwdict(auth_token=self.authtoken, tags=tags, tag_mode="all", 
                extras=self.extras, per_page="500")
    if user_id is not None: 
      kw = kwdict(user_id=user_id, **kw)
    rsp = self.fapi.photos_search(**kw)
    log.debug("search for photos with tags %s has been"
              " successfully finished" % tags)
    if not rsp:
      log.error("couldn't search for the photos; got error %s", rsp.errormsg)
      return
    if not hasattr(rsp.photos[0], 'photo'):
      return []
    return rsp.photos[0].photo
开发者ID:Xirg,项目名称:flickrfs-old,代码行数:104,代码来源:transactions.py

示例6: importFromFlickr

# 需要导入模块: from flickrapi import FlickrAPI [as 别名]
# 或者: from flickrapi.FlickrAPI import photos_search [as 别名]

#.........这里部分代码省略.........

        # Photo tags are easy
        tags = []
        for tag in photo_info.find('tags'):
            if tag.attrib['machine_tag'] != '1':  # Ignore ugly automatically created inivisible-to-users tags
                tags.append(tag.attrib['raw'])

        # Import comments - needs its own Flickr API call
        comments = []
        if int(photo_info.find('comments').text) > 0:
            comment_rsp = flickr.photos_comments_getList(photo_id = photoID).find('comments')
            for comment in comment_rsp:
                comments.append({
                    'user_id': comment.attrib.get('author'),
                    'screen_name': comment.attrib.get('authorname'),
                    'timestamp': comment.attrib.get('datecreate'),
                    'iconfarm': comment.attrib.get('iconfarm'),
                    'iconserver': comment.attrib.get('iconserver'),
                    'text': comment.text
                })

        # Import Favorites.  These come in at most 50 per request. Another dedicated Flickr API call
        favorites = []
        favorite_rsp = flickr.photos_getFavorites(photo_id = photoID, per_page = '50').find('photo')
        for fav in favorite_rsp:
            favorites.append({
                'user_id': fav.attrib.get('nsid'),
                'screen_name': fav.attrib.get('username'),
                'timestamp': fav.attrib.get('favedate'),
                'iconfarm': comment.attrib.get('iconfarm'),
                'iconserver': comment.attrib.get('iconserver')
            })

        fav_page_count = int(favorite_rsp.attrib['pages'])
    
        if fav_page_count > 1:
            for i in range(2, fav_page_count + 1):
                favorite_rsp = flickr.photos_getFavorites(photo_id = photoID, page = str(i), per_page = '50').find('photo')
                for fav in favorite_rsp:
                    favorites.append({
                        'user_id': fav.attrib['nsid'],
                        'screen_name': fav.attrib.get('username'),
                        'timestamp': fav.attrib.get('favedate'),
                        'iconfarm': comment.attrib.get('iconfarm'),
                        'iconserver': comment.attrib.get('iconserver')
                    })

        # View count
        # There's no direct flickr API to get a photo's view count (weird)
        # But we can add 'views' to the list of extra info returned by photo.search... (weird)
        # Can't search by photo ID (not weird), but can search by min & max upload time... set those to the photo's upload time, and we find the exact photo... (lucky)
        views = flickr.photos_search(user_id = flickr_owner_id, min_upload_date = time_posted, max_upload_date = time_posted, extras = 'views')
        views = views.find('photos')[0].attrib['views']

    except Exception as e:
        return jsonify(result = False, error = "Fuck me.  Flickr Import went horribly awry.  Send this message to Remi:\n\nPhoto: %s - %s" % (photoID, e.__repr__()))

    try:
        # So, we've pulled absolutely everything about this one photo out of Flickr.
        # Now dump it all into Brickr. You're welcome.
        photo = Photo(photo_url, g.user, title, desc)
        file_object = urllib2.urlopen(photo_url)  # Download photo from Flickr
        fp = StringIO(file_object.read())
        if not photo.save_file(fp):
            return jsonify(result = False, error = "Well shit. So, everything exported FROM Flickr just fine.  But we failed to save the exported photo file.  Send this message to Remi:\n\nPhoto: %s - Flickr Export - %s" % (photoID, photo_url))

            
        # Flickr buddy icon URL:
        # http://farm{icon-farm}.staticflickr.com/{icon-server}/buddyicons/{nsid}.jpg
        # http://farm4.staticflickr.com/3692/buddyicons/[email protected]
        photo.views = views
        db.session.add(photo)
        db.session.commit()  # Shit, should do everything in one commit, but we need a photo ID before adding things to the photo...

        for c in comments:
            user = User.get_user_or_placeholder(c['screen_name'], c['user_id'])
            comment = Comment(user, photo, c['text'], datetime.date.fromtimestamp(float(c['timestamp'])))
            db.session.add(comment)

        for n in notes:
            user = User.get_user_or_placeholder(n['screen_name'], n['user_id'])
            note = Note(user, photo, n['text'], n['x'], n['y'], n['w'], n['h'])
            db.session.add(note)

        for t in tags:
            tag = Tag.get_or_create(t)
            photo.tags.extend([tag])
            db.session.add(tag)

        for f in favorites:
            user = User.get_user_or_placeholder(f['screen_name'], f['user_id'])
            fav = Favorite(user, photo)
            db.session.add(fav)

        db.session.commit()

        return jsonify(result = True, url = url_for('photos.photo', user_url = g.user.url, photoID = photo.id))

    except Exception as e:
        return jsonify(result = False, error = "Well shit. So, everything exported FROM flickr just fine.  But dumping it INTO Brickr is apparently too much to ask.  Send this message to Remi:\n\nPhoto: %s - Brickr Import - %s" % (photoID, e.__repr__()))
开发者ID:remig,项目名称:brickr,代码行数:104,代码来源:photos.py

示例7: __init__

# 需要导入模块: from flickrapi import FlickrAPI [as 别名]
# 或者: from flickrapi.FlickrAPI import photos_search [as 别名]
class Fwiktr:
    def __init__(self):
        self.ftp_socket = ftplib.FTP('30helensagree.com')
        self.ftp_socket.login('thirtyhelens', 'ZHXK7tzL')
        self.ftp_socket.cwd('30helensagree.com')
        self.SetupFlickr()
        self.SetupTwitter()

    def SetupFlickr(self):
        # make a new FlickrAPI instance
        self.fapi = FlickrAPI(flickrAPIKey, flickrAPISecret)

    def SetupTwitter(self):
        self.tapi = twitter.Api()        

    def RunPOSTagger(self):
        twitter_messages = self.tapi.GetPublicTimeline()
        for message in twitter_messages:
            try:

                cmd = 'echo "' + message.text + '" | treetagger/cmd/tree-tagger-english > ./twitter_message_output.txt'
                os.system(cmd)
                self.pos_file = open('twitter_message_output.txt', 'r')
                tokens = []
                self.parse_string = ""
                for line in self.pos_file:
                    current_line = []
                    self.parse_string += line + "<BR>"
                    for value in tokenize.whitespace(line):
                        current_line.append(value)
                    tokens.append(current_line)

                filename = uuid.uuid4()                
                self.output_file = open(str(filename)+".html", 'w')
                self.output_file.write(file_header % (message.text))
                self.output_file.write(message.text + "<BR>")
                
                self.RetreiveFlickrURLs(tokens)

                self.output_file.write(file_footer)
                self.output_file.close()
                self.output_file = open(str(filename)+".html", 'r')
                self.ftp_socket.storlines("STOR "+str(filename)+".html", self.output_file)
                self.output_file.close()
                self.pos_file.close()
                time.sleep(30)
            except UnicodeEncodeError:
                print "Twitter Message not ascii, skipping"        
            except AttributeError:
                print "Weird XML error. I wish it'd stop doing that"

    def CullAndFormatTagList(self, tagList):
        #Start by culling everything that's not a noun
        tags_culled = "";
        for tag_tuple in tagList:
            if tag_tuple[1] == "NP" or tag_tuple[1] == "NN" or tag_tuple[1] == "NNS":
                tags_culled += tag_tuple[0] + ","
        return tags_culled

    def RetreiveFlickrURLs(self, tagList):
        tag_string = self.CullAndFormatTagList(tagList)

        rsp = self.fapi.photos_search(api_key=flickrAPIKey,tags=tag_string)

        if(rsp.photos[0]['total'] == 0): return 
        rand_index = random.randint(0, min(int(rsp.photos[0]['perpage']), int(rsp.photos[0]['total'])))
        i = 0        

        urls = "<UL>"
	for a in rsp.photos[0].photo:            
            photo_url = "http://farm%s.static.flickr.com/%s/%s_%s.jpg" % (a['farm'], a['server'], a['id'], a['secret'])
            flickr_url = "http://www.flickr.com/photos/%s/%s" % (a['owner'], a['id'])
            urls += "<LI><A HREF='"+photo_url+"'>"+photo_url+"</A> - <A HREF='"+ flickr_url+"'>"+flickr_url+"</A></LI>"
            if i == rand_index:
                self.output_file.write("<A HREF='"+flickr_url+"'><IMG SRC='"+photo_url+"' border=0></A>")
            i = i + 1
        urls += "</UL>"
        self.output_file.write("<HR>")
        self.output_file.write(self.parse_string)
        self.output_file.write("<HR>")
        self.output_file.write("Tag String for Flickr Search: " + tag_string)
        self.output_file.write("<HR>")
        self.output_file.write("Using photo " + str(rand_index) + " of " + rsp.photos[0]['total']  + "<BR>")
        self.output_file.write("Selection method: RANDOM CHOICE<BR>")
        self.output_file.write(urls)
开发者ID:qdot,项目名称:fwiktr,代码行数:87,代码来源:fwiktr.py

示例8: Importer

# 需要导入模块: from flickrapi import FlickrAPI [as 别名]
# 或者: from flickrapi.FlickrAPI import photos_search [as 别名]
class Importer(object):

    def __init__(self):
        self.flickr = FlickrAPI(FLICKR_KEY)

    def get_photosets(self, username, filename=None):
        filename = filename or username+'.json'
        if os.path.exists(filename):
            print "Looks like we already have information about your photos."
            if raw_input("Refresh? (y/n): ").lower().startswith('n'):
                return deserialize(open(filename).read())

        print "Downloading information about your photos."
        if '@' in username:
            response = self.flickr.people_findByEmail(find_email=username)
        else:
            response = self.flickr.people_findByUsername(username=username)
        nsid = response[0].get('nsid')

        response = self.flickr.photosets_getList(user_id=nsid)
        photosets = []
        photo_ids = []
        for ps in response[0]:
            photoset = {'id': ps.get('id'),
                        'title': ps[0].text,
                        'description': ps[1].text,
                        'photos':[]}
            photos_response = self.flickr.photosets_getPhotos(photoset_id=photoset['id'],
                                                              extras='url_o')
            for pxml in photos_response[0]:
                photo = {'id':pxml.get('id'),
                         'title':pxml.get('title')}
                photoset['photos'].append(photo)
                photo_ids.append(photo['id'])
            print photoset['title'],'-',len(photoset['photos']),'photos'
            photosets.append(photoset)

        # get photos not in photosets
        photos_response = self.flickr.photos_search(user_id=nsid, per_page=500)
        photoset = {'id':'stream',
                    'title':'Flickr Stream',
                    'description':'Photos from my flickr stream',
                    'photos':[]}
        for pxml in response[0]:
            photo = {'id':pxml.get('id'),
                     'title':pxml.get('title')}
            if photo['id'] not in photo_ids:
                photoset['photos'].append(photo)
                photo_ids.append(photo['id'])
        if photoset['photos']:
            print photoset['title'],'-',len(photoset['photos']),'photos'
            photosets.append(photoset)

        f = open(filename, "w")
        f.write(serialize(photosets))
        f.close()
        return photosets

    def download_images(self, photosets, directory):
        print "Downloading your photos"
        if not os.path.exists(directory):
            os.mkdir(directory)
        default = None
        for photoset in photosets:
            dirpath = os.path.join(directory, photoset['id']+' - '+photoset['title'])
            if not os.path.exists(dirpath):
                os.mkdir(dirpath)
            for photo in photoset['photos']:
                filename = os.path.join(dirpath, photo['id']+'.jpg')
                if os.path.exists(filename):
                    if default is None:
                        print "Photo", photo['id'], "has already been downloaded."
                        default = raw_input("Download again? (y/n/Y/N) (capital to not ask again): ")
                    if default == 'n':
                        default = None
                        continue
                    elif default == 'N':
                        continue
                    elif default == 'y':
                        default = None

                f = open(filename, 'w')
                if not photo.get('url'):
                    try:
                        sizes_response = self.flickr.photos_getSizes(photo_id=photo['id'])
                    except:
                        print "Failed to download photo:", photo['id'], '... sorry!'
                    else:
                        photo['url'] = sizes_response[0][-1].get('source')
                if photo.get('url'):
                    print "Downloading", photo['title'], 'from', photo['url']
                    remote = urllib2.urlopen(photo['url'])
                    f.write(remote.read())
                    f.close()
                    remote.close()

    def upload_images(self, photosets, directory):
        client = DivvyshotClient()
        for photoset in photosets:
            event_data = client.create_event(name=photoset['title'],
#.........这里部分代码省略.........
开发者ID:pcardune,项目名称:paulo.flickrimport,代码行数:103,代码来源:shell.py

示例9: FlickrAPI

# 需要导入模块: from flickrapi import FlickrAPI [as 别名]
# 或者: from flickrapi.FlickrAPI import photos_search [as 别名]
flickr = FlickrAPI('ecbe2e529de5b65c68fb66470c587423')

n = 0
pagenr = 1
ids = []
maxDate = int(time.time())
tempMaxDate = maxDate

totalPics = 0
newestDate = datetime.fromtimestamp(0)

try:
    while True:
        while n < 4000:
            try:
                result = flickr.photos_search(bbox="2.570801,49.475263,6.437988,51.512161", max_upload_date=maxDate, per_page=250, extras="geo,date_upload", page=pagenr)
                pagenr = pagenr + 1
                photos = result.find('photos').findall('photo')
                
                if photos:
                    for photo in photos:
                        photoid = int(photo.attrib['id'])
                        if photoid not in ids:
                            ids.append(photoid)
                            #output.write("%s %s\n" %(photo.attrib['longitude'], photo.attrib['latitude']))
                            dateupload = int(photo.attrib['dateupload'])
                            if dateupload<tempMaxDate:
                                tempMaxDate = dateupload
                                
                            utcdate = datetime.utcfromtimestamp(dateupload)
                            if utcdate > newestDate:
开发者ID:asrointernship,项目名称:jasper-scripts,代码行数:33,代码来源:flickrsearch.py


注:本文中的flickrapi.FlickrAPI.photos_search方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。