當前位置: 首頁>>代碼示例>>Python>>正文


Python request.GET屬性代碼示例

本文整理匯總了Python中urllib.request.GET屬性的典型用法代碼示例。如果您正苦於以下問題:Python request.GET屬性的具體用法?Python request.GET怎麽用?Python request.GET使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在urllib.request的用法示例。


在下文中一共展示了request.GET屬性的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: found

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def found(request):
    """
    View to handle the enter-search/press-enter behaviour in the autocomplete box
    """
    if 'search' not in request.GET:
        return ForbiddenResponse(request, 'must give search in query')
    search = request.GET['search']
    studentQuery = get_query(search, ['userid', 'emplid', 'first_name', 'last_name'])
    people = Person.objects.filter(studentQuery)[:200]
    for p in people:
        # decorate with RAAppointment count
        p.ras = RAAppointment.objects.filter(unit__in=request.units, person=p, deleted=False).count()

    context = {'people': people}
    return render(request, 'ra/found.html', context)


#This is an index of all RA Appointments belonging to a given person. 
開發者ID:sfu-fas,項目名稱:coursys,代碼行數:20,代碼來源:views.py

示例2: browse

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def browse(request):
    if 'tabledata' in request.GET:
        return RADataJson.as_view()(request)

    form = RABrowseForm()
    context = {'form': form, 'supervisor_only': not request.units}
    return render(request, 'ra/browse.html', context) 
開發者ID:sfu-fas,項目名稱:coursys,代碼行數:9,代碼來源:views.py

示例3: filter_queryset

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def filter_queryset(self, qs):
        GET = self.request.GET

        # limit to those visible to this user
        qs = qs.filter(
            Q(unit__in=self.request.units)
            | Q(hiring_faculty__userid=self.request.user.username)
        )
        qs = qs.exclude(deleted=True)

        # "current" contracts filter
        if 'current' in GET and GET['current'] == 'yes':
            today = datetime.date.today()
            slack = 14 # number of days to fudge the start/end
            qs = qs.filter(start_date__lte=today + datetime.timedelta(days=slack),
                           end_date__gte=today - datetime.timedelta(days=slack))

        # search box
        srch = GET.get('sSearch', None)
        if srch:
            # get RA set from haystack, and use it to limit our query.
            ra_qs = SearchQuerySet().models(RAAppointment).filter(text__fuzzy=srch)[:500]
            ra_qs = [r for r in ra_qs if r is not None]
            if ra_qs:
                # ignore very low scores: elasticsearch grabs too much sometimes
                max_score = max(r.score for r in ra_qs)
                ra_pks = (r.pk for r in ra_qs if r.score > max_score/5)
                qs = qs.filter(pk__in=ra_pks)
            else:
                qs = qs.none()

        return qs 
開發者ID:sfu-fas,項目名稱:coursys,代碼行數:34,代碼來源:views.py

示例4: person_info

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def person_info(request):
    """
    Get more info about this person, for AJAX updates on new RA form
    """
    result = {'programs': []}
    emplid = request.GET.get('emplid', None)
    if not emplid or not emplid.isdigit() or len(emplid) != 9:
        pass
    else:
        programs = []
        
        # GradPrograms
        emplid = request.GET['emplid']
        grads = GradStudent.objects.filter(person__emplid=emplid, program__unit__in=request.units)
        for gs in grads:
            pdata = {
                     'program': gs.program.label,
                     'unit': gs.program.unit.name,
                     'status': gs.get_current_status_display(),
                     }
            programs.append(pdata)

        result['programs'] = programs
        
        # other SIMS info
        try:
            otherinfo = more_personal_info(emplid, needed=['citizen', 'visa'])
            result.update(otherinfo)
        except SIMSProblem as e:
            result['error'] = str(e)

    return HttpResponse(json.dumps(result), content_type='application/json;charset=utf-8') 
開發者ID:sfu-fas,項目名稱:coursys,代碼行數:34,代碼來源:views.py

示例5: quick_search

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def quick_search(request):
    if 'term' in request.GET:
        term = request.GET['term']
        grads = GradStudent.objects.filter(program__unit__in=request.units) \
                .filter(_get_query(term)) \
                .select_related('person', 'program')[:500] 
                # take more here so the sorting gets more useful students: trim to 50 top later
        
        # sort according to ACTIVE_STATUS_ORDER to get useful students at the top: 
        #   decorate with order, sort, and build jquery response
        grads_sort = [(ACTIVE_STATUS_ORDER[gs.current_status], gs) for gs in grads]
        grads_sort.sort()
        grads_sort = grads_sort[:50]
        
        data = [{'value': str(g.slug), 'label': "%s, %s, %s" % 
                 (g.person.name(), g.program.label, g.get_current_status_display())} 
                 for _,g in grads_sort]
        response = HttpResponse(content_type='application/json')
        json.dump(data, response, indent=1)
        return response
    elif 'search' in request.GET:
        grad_slug = request.GET['search']
        try:
            grad = GradStudent.objects.get(slug=grad_slug, program__unit__in=request.units)
            return HttpResponseRedirect(reverse('grad:view', kwargs={'grad_slug':grad.slug}))
        except GradStudent.DoesNotExist:
            return HttpResponseRedirect(reverse('grad:not_found') + "?search=" + urllib.parse.quote_plus(grad_slug.encode('utf8')))
    else:
        return ForbiddenResponse(request, 'must send term') 
開發者ID:sfu-fas,項目名稱:coursys,代碼行數:31,代碼來源:quick_search.py

示例6: view

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def view(request, pathway_id):
    print(pathway_id)
    
#    pathway_data = kegg_rest_request('get/hsa%s' % (pathway_id))
    pathway = Pathway.objects.get(kegg=pathway_id)

     
    pathway.genes = pathway.genes.split(',')
    print(pathway.genes)
    
#    genes = parse_genes(pathway_data)
#    pathway = {}
#    pathway['name'] = pathway_data.split('\n')[1].replace('NAME', '') 
#    #get gene_ids
#    genelist = []
#    for gene in genes:
#        
#        genelist.append('hsa:%s' % gene['id'])
##        print gene['id']
#    gene_url = '+'.join(genelist)
#    url = '/conv/ncbi-geneid/%s' % (gene_url)
#    results = kegg_rest_request(url)
    #print results
    
    
    #if request.method == 'GET':
    return render_to_response('pathway_analysis/view.html', {'pathway':pathway}, context_instance=RequestContext(request)) 
開發者ID:raonyguimaraes,項目名稱:mendelmd,代碼行數:29,代碼來源:views.py

示例7: dicopinionResult

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def dicopinionResult(request):
    dicStockNum = request.GET['dicStockNum']
    dateCount = setDate()
    stock_name = get_stock_name(dicStockNum)

    for pageNum in range(1, 10):
        urlPage = 'http://guba.eastmoney.com/list,' + \
            str(dicStockNum)+',f_'+str(pageNum)+'.html'
        stockPageRequest = urllib.request.urlopen(urlPage)
        htmlTitleContent = str(stockPageRequest.read(), 'utf-8')
        titlePattern = re.compile(
            '<span class="l3">(.*?)title="(.*?)"(.*?)<span class="l6">(\d\d)-(\d\d)</span>', re.S)
        gotTitle = re.findall(titlePattern, htmlTitleContent)
        print(type(gotTitle))
        for i in range(len(gotTitle)):
            for j in range(len(dateCount)):
                if int(gotTitle[i][3]) == dateCount[j][0] and int(gotTitle[i][4]) == dateCount[j][1]:
                    dateCount[j][5] += 1
                    segList = list(jieba.cut(gotTitle[i][1], cut_all=True))
                    # print(tx_npl(gotTitle[i][1]))
                    for eachItem in segList:
                        if eachItem != ' ':
                            if eachItem in positiveWord:
                                dateCount[j][2] += 1
                                continue
                            elif eachItem in negativeWord:
                                dateCount[j][3] += 1
                                continue
                            elif eachItem in neutralWord:
                                dateCount[j][4] += 1
    return render(request, 'dicopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)}) 
開發者ID:LinLidi,項目名稱:StockSensation,代碼行數:33,代碼來源:views.py

示例8: nbopinionResult

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def nbopinionResult(request):
    Nb_stock_number = request.GET['Nb_stock_number']
    dateCount = setDate()
    stock_name = get_stock_name(Nb_stock_number)
    homedir = os.getcwd()

    clf = joblib.load(homedir+'/StockVisualData/Clf.pkl')
    vectorizer = joblib.load(homedir+'/StockVisualData/Vect')
    transformer = joblib.load(homedir+'/StockVisualData/Tfidf')

    for pageNum in range(1, 21):
        urlPage = 'http://guba.eastmoney.com/list,' + \
            str(Nb_stock_number)+'_'+str(pageNum)+'.html'
        stockPageRequest = urllib.request.urlopen(urlPage)
        htmlTitleContent = str(stockPageRequest.read(), 'utf-8')
        titlePattern = re.compile(
            '<span class="l3">(.*?)title="(.*?)"(.*?)<span class="l6">(\d\d)-(\d\d)</span>', re.S)
        gotTitle = re.findall(titlePattern, htmlTitleContent)
        for i in range(len(gotTitle)):
            text_predict = []
            for j in range(len(dateCount)):
                if int(gotTitle[i][3]) == dateCount[j][0] and int(gotTitle[i][4]) == dateCount[j][1]:
                    dateCount[j][5] += 1
                    seg_list = list(jieba.cut(gotTitle[i][1], cut_all=True))
                    seg_text = " ".join(seg_list)
                    text_predict.append(seg_text)
                    text_predict = np.array(text_predict)
                    text_frequency = vectorizer.transform(text_predict)
                    new_tfidf = transformer.transform(text_frequency)
                    predicted = clf.predict(new_tfidf)
                    if predicted == '積極':
                        dateCount[j][2] += 1
                        continue
                    elif predicted == '消極':
                        dateCount[j][3] += 1
                        continue
                    elif predicted == '中立':
                        dateCount[j][4] += 1
    return render(request, 'nbopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)})

# 設置時間數組 
開發者ID:LinLidi,項目名稱:StockSensation,代碼行數:43,代碼來源:views.py

示例9: dispatch

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def dispatch(self, request, *args, **kwargs):
        if kwargs['frequency'] not in constants.FREQUENCIES:
            raise Http404()
        self.payment_frequency = kwargs['frequency']

        # Ensure that the donation amount, currency and source page are legit
        start_form = StartCardPaymentForm(request.GET)
        if not start_form.is_valid():
            return HttpResponseRedirect('/')

        self.amount = start_form.cleaned_data['amount']
        self.currency = start_form.cleaned_data['currency']
        self.source_page = Page.objects.get(pk=start_form.cleaned_data['source_page_id']).specific
        return super().dispatch(request, *args, **kwargs) 
開發者ID:mozilla,項目名稱:donate-wagtail,代碼行數:16,代碼來源:views.py

示例10: pay_periods

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def pay_periods(request):
    """
    Calculate number of pay periods between contract start and end dates.
    i.e. number of work days in period / 10
    
    I swear this was easier that doing it in JS, okay?
    """
    day = datetime.timedelta(days=1)
    week = datetime.timedelta(days=7)
    if 'start' not in request.GET or 'end' not in request.GET:
        result = ''
    else:
        st = request.GET['start']
        en = request.GET['end']
        try:
            st = datetime.datetime.strptime(st, "%Y-%m-%d").date()
            en = datetime.datetime.strptime(en, "%Y-%m-%d").date()
        except ValueError:
            result = ''
        else:
            # move start/end into Mon-Fri work week
            if st.weekday() == 5:
                st += 2*day
            elif st.weekday() == 6:
                st += day
            if en.weekday() == 5:
                en -= day
            elif en.weekday() == 6:
                en -= 2*day

            # number of full weeks (until sameday: last same weekday before end date)
            weeks = ((en-st)/7).days
            sameday = st + weeks*week
            assert sameday <= en < sameday + week
            
            # number of days remaining
            days = (en - sameday).days
            if sameday.weekday() > en.weekday():
                # don't count weekend days in between
                days -= 2
            
            days += 1 # count both start and end days
            result = "%.1f" % ((weeks*5 + days)/10.0)
    
    return HttpResponse(result, content_type='text/plain;charset=utf-8') 
開發者ID:sfu-fas,項目名稱:coursys,代碼行數:47,代碼來源:views.py

示例11: _course_info_staff

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def _course_info_staff(request, course_slug):
    """
    Course front page
    """
    course = get_object_or_404(CourseOffering, slug=course_slug)
    member = Member.objects.get(offering=course, person__userid=request.user.username, role__in=['INST','TA','APPR'])
    activities = all_activities_filter(offering=course)
    any_group = True in [a.group for a in activities]
    
    # Non Ajax way to reorder activity, please also see reorder_activity view function for ajax way to reorder
    order = None  
    act = None  
    if 'order' in request.GET:  
        order = request.GET['order']  
    if 'act' in request.GET:  
        act = request.GET['act']  
    if order and act:  
        reorder_course_activities(activities, act, order)  
        return HttpResponseRedirect(reverse('offering:course_info', kwargs={'course_slug': course_slug}))  


    # Todo: is the activity type necessary?
    activities_info = []
    total_percent = 0
    for activity in activities:
        if activity.percent:
            total_percent += activity.percent

        if isinstance(activity, NumericActivity):
            activities_info.append({'activity':activity, 'type':ACTIVITY_TYPE['NG']})            
        elif isinstance(activity, LetterActivity):
            activities_info.append({'activity':activity, 'type':ACTIVITY_TYPE['LG']})

    if len(activities) == 0:
        num_pages = Page.objects.filter(offering=course)
        if num_pages == 0:
            messages.info(request, "Students won't see this course in their menu on the front page. As soon as some activities or pages have been added, they will see a link to the course info page.")
    
    discussion_activity = False
    if course.discussion:
        discussion_activity = discuss_activity.recent_activity(member)

    # advertise combined offering if applicable.
    offer_combined = course.joint_with() and len(activities) == 0
    
    context = {'course': course, 'member': member, 'activities_info': activities_info, 'from_page': FROMPAGE['course'],
               'order_type': ORDER_TYPE, 'any_group': any_group, 'total_percent': total_percent, 'discussion_activity': discussion_activity,
               'offer_combined': offer_combined}
    return render(request, "grades/course_info_staff.html", context) 
開發者ID:sfu-fas,項目名稱:coursys,代碼行數:51,代碼來源:views.py

示例12: formula_tester

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def formula_tester(request, course_slug):
    course = get_object_or_404(CourseOffering, slug=course_slug)
    numeric_activities = NumericActivity.objects.filter(offering=course, deleted=False)
    result = ""
    
    if 'formula' in request.GET: # If the form has been submitted...
        activity_entries = []
        faked_activities = [] # used to evaluate the formula
        has_error = False
        for numeric_activity in numeric_activities:
            activity_form_entry = ActivityFormEntry(request.GET, prefix=numeric_activity.slug)
            if not activity_form_entry.is_valid():
                has_error = True
            else:
                value = activity_form_entry.cleaned_data['value']
                if not value:
                    value = 0
                faked_activities.append(FakeActivity(numeric_activity.name, numeric_activity.short_name,
                                                     activity_form_entry.cleaned_data['status'],
                                                     numeric_activity.max_grade, numeric_activity.percent,
                                                     value))
            activity_entries.append(FormulaTesterActivityEntry(numeric_activity, activity_form_entry))
            

        formula_form_entry = FormulaFormEntry(request.GET)
        formula_form_entry.activate_form_entry_validation(course_slug, None)
        
        if not formula_form_entry.is_valid():
            has_error = True
        if has_error:
            messages.error(request, "Please correct the error below")
        else:
            parsed_expr = pickle.loads(formula_form_entry.pickled_formula)
            act_dict = activities_dictionary(faked_activities)
            try:
                result = eval_parse(parsed_expr, FakeEvalActivity(course), act_dict, None, True)
            except EvalException:
                messages.error(request,  "Can not evaluate formula")
    else:
        activity_entries = []
        for numeric_activity in numeric_activities:
            activity_form_entry = ActivityFormEntry(prefix=numeric_activity.slug)
            activity_entries.append(FormulaTesterActivityEntry(numeric_activity, activity_form_entry))
        formula_form_entry = FormulaFormEntry()
    context = {'course': course, 'activity_entries': activity_entries,
               'formula_form_entry': formula_form_entry, 'result': result}
    return render(request, 'grades/formula_tester.html', context) 
開發者ID:sfu-fas,項目名稱:coursys,代碼行數:49,代碼來源:views.py

示例13: dicopinionResult

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def dicopinionResult(request):
    dicStockNum = request.GET['dicStockNum']
    dateCount = setDate()
    stock_name = get_stock_name(dicStockNum)

    # 爬取10頁 後續改為異步爬取
    for pageNum in range(1, 10):
        print(f'page:{pageNum}')
        urlPage = 'http://guba.eastmoney.com/list,' + \
                  str(dicStockNum) + ',f_' + str(pageNum) + '.html'
        stockPageRequest = requests.get(urlPage, headers=headers)
        htmlTitleContent = stockPageRequest.text

        resp = Selector(text=htmlTitleContent)
        nodes = resp.xpath(
            '//div[contains(@class,"articleh normal_post") or contains(@class,"articleh normal_post odd")]')

        for index, item in enumerate(nodes):
            view = item.xpath('./span[@class="l1 a1"]/text()').extract_first()
            comment_count = item.xpath('./span[@class="l2 a2"]/text()').extract_first()
            title = item.xpath('./span[@class="l3 a3"]/a/text()').extract_first()
            author = item.xpath('./span[@class="l4 a4"]/a/text()').extract_first()
            create_time = item.xpath('./span[@class="l5 a5"]/text()').extract_first()
            # 處理日期
            date_pattern = re.search('(\d+)-(\d+)', create_time)

            month = sub_zero(date_pattern.group(1))

            day = sub_zero(date_pattern.group(2))

            for j in range(len(dateCount)):  # 5天

                if int(month) == dateCount[j][0] and int(day) == dateCount[j][1]:
                    dateCount[j][5] += 1  # 數組的最後一個數+1,計算出現了一次,今天的標題
                    segList = list(jieba.cut(title, cut_all=True))  # 分詞後保存
                    # print(tx_npl(gotTitle[i][1]))
                    for eachItem in segList:
                        if eachItem != ' ':
                            if eachItem in positiveWord:  # 粗暴 簡單
                                dateCount[j][2] += 1
                                continue
                            elif eachItem in negativeWord:
                                dateCount[j][3] += 1
                                continue
                            elif eachItem in neutralWord:
                                dateCount[j][4] += 1

                # print(f'{month}月{day}日:條數{len(segList)}')

    # 最近5天的數據
    print(dateCount)
    return render(request, 'dicopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)}) 
開發者ID:Rockyzsu,項目名稱:StockPredict,代碼行數:54,代碼來源:views.py

示例14: nbopinionResult

# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import GET [as 別名]
def nbopinionResult(request):
    Nb_stock_number = request.GET['Nb_stock_number']
    dateCount = setDate()
    stock_name = get_stock_name(Nb_stock_number)
    homedir = os.getcwd()

    clf = joblib.load(homedir + '/StockVisualData/Clf.pkl')
    vectorizer = joblib.load(homedir + '/StockVisualData/Vect')
    transformer = joblib.load(homedir + '/StockVisualData/Tfidf')

    for pageNum in range(1, 10):

        urlPage = 'http://guba.eastmoney.com/list,' + \
                  str(Nb_stock_number) + '_' + str(pageNum) + '.html'
        stockPageRequest = requests.get(urlPage, headers=headers)
        htmlTitleContent = stockPageRequest.text

        resp = Selector(text=htmlTitleContent)
        nodes = resp.xpath(
            '//div[contains(@class,"articleh normal_post") or contains(@class,"articleh normal_post odd")]')

        for index, item in enumerate(nodes):
            view = item.xpath('./span[@class="l1 a1"]/text()').extract_first()
            comment_count = item.xpath('./span[@class="l2 a2"]/text()').extract_first()
            title = item.xpath('./span[@class="l3 a3"]/a/text()').extract_first()
            author = item.xpath('./span[@class="l4 a4"]/a/text()').extract_first()
            create_time = item.xpath('./span[@class="l5 a5"]/text()').extract_first()
            # 處理日期
            date_pattern = re.search('(\d+)-(\d+)', create_time)

            month = sub_zero(date_pattern.group(1))

            day = sub_zero(date_pattern.group(2))

            text_predict = []
            for j in range(len(dateCount)):
                if int(month) == dateCount[j][0] and int(day) == dateCount[j][1]:
                    dateCount[j][5] += 1
                    seg_list = list(jieba.cut(title, cut_all=True))
                    seg_text = " ".join(seg_list)
                    text_predict.append(seg_text)
                    text_predict = np.array(text_predict)
                    text_frequency = vectorizer.transform(text_predict)
                    new_tfidf = transformer.transform(text_frequency)
                    predicted = clf.predict(new_tfidf)
                    if predicted == '積極':
                        dateCount[j][2] += 1
                        continue
                    elif predicted == '消極':
                        dateCount[j][3] += 1
                        continue
                    elif predicted == '中立':
                        dateCount[j][4] += 1
                    # 沒有返回分數

    return render(request, 'nbopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)})


# 設置時間數組 
開發者ID:Rockyzsu,項目名稱:StockPredict,代碼行數:61,代碼來源:views.py


注:本文中的urllib.request.GET屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。