当前位置: 首页>>代码示例>>Python>>正文


Python request.GET属性代码示例

本文整理汇总了Python中urllib.request.GET属性的典型用法代码示例。如果您正苦于以下问题:Python request.GET属性的具体用法?Python request.GET怎么用?Python request.GET使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在urllib.request的用法示例。


在下文中一共展示了request.GET属性的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: found

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def found(request):
    """
    View to handle the enter-search/press-enter behaviour in the autocomplete box
    """
    if 'search' not in request.GET:
        return ForbiddenResponse(request, 'must give search in query')
    search = request.GET['search']
    studentQuery = get_query(search, ['userid', 'emplid', 'first_name', 'last_name'])
    people = Person.objects.filter(studentQuery)[:200]
    for p in people:
        # decorate with RAAppointment count
        p.ras = RAAppointment.objects.filter(unit__in=request.units, person=p, deleted=False).count()

    context = {'people': people}
    return render(request, 'ra/found.html', context)


#This is an index of all RA Appointments belonging to a given person. 
开发者ID:sfu-fas,项目名称:coursys,代码行数:20,代码来源:views.py

示例2: browse

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def browse(request):
    if 'tabledata' in request.GET:
        return RADataJson.as_view()(request)

    form = RABrowseForm()
    context = {'form': form, 'supervisor_only': not request.units}
    return render(request, 'ra/browse.html', context) 
开发者ID:sfu-fas,项目名称:coursys,代码行数:9,代码来源:views.py

示例3: filter_queryset

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def filter_queryset(self, qs):
        GET = self.request.GET

        # limit to those visible to this user
        qs = qs.filter(
            Q(unit__in=self.request.units)
            | Q(hiring_faculty__userid=self.request.user.username)
        )
        qs = qs.exclude(deleted=True)

        # "current" contracts filter
        if 'current' in GET and GET['current'] == 'yes':
            today = datetime.date.today()
            slack = 14 # number of days to fudge the start/end
            qs = qs.filter(start_date__lte=today + datetime.timedelta(days=slack),
                           end_date__gte=today - datetime.timedelta(days=slack))

        # search box
        srch = GET.get('sSearch', None)
        if srch:
            # get RA set from haystack, and use it to limit our query.
            ra_qs = SearchQuerySet().models(RAAppointment).filter(text__fuzzy=srch)[:500]
            ra_qs = [r for r in ra_qs if r is not None]
            if ra_qs:
                # ignore very low scores: elasticsearch grabs too much sometimes
                max_score = max(r.score for r in ra_qs)
                ra_pks = (r.pk for r in ra_qs if r.score > max_score/5)
                qs = qs.filter(pk__in=ra_pks)
            else:
                qs = qs.none()

        return qs 
开发者ID:sfu-fas,项目名称:coursys,代码行数:34,代码来源:views.py

示例4: person_info

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def person_info(request):
    """
    Get more info about this person, for AJAX updates on new RA form
    """
    result = {'programs': []}
    emplid = request.GET.get('emplid', None)
    if not emplid or not emplid.isdigit() or len(emplid) != 9:
        pass
    else:
        programs = []
        
        # GradPrograms
        emplid = request.GET['emplid']
        grads = GradStudent.objects.filter(person__emplid=emplid, program__unit__in=request.units)
        for gs in grads:
            pdata = {
                     'program': gs.program.label,
                     'unit': gs.program.unit.name,
                     'status': gs.get_current_status_display(),
                     }
            programs.append(pdata)

        result['programs'] = programs
        
        # other SIMS info
        try:
            otherinfo = more_personal_info(emplid, needed=['citizen', 'visa'])
            result.update(otherinfo)
        except SIMSProblem as e:
            result['error'] = str(e)

    return HttpResponse(json.dumps(result), content_type='application/json;charset=utf-8') 
开发者ID:sfu-fas,项目名称:coursys,代码行数:34,代码来源:views.py

示例5: quick_search

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def quick_search(request):
    if 'term' in request.GET:
        term = request.GET['term']
        grads = GradStudent.objects.filter(program__unit__in=request.units) \
                .filter(_get_query(term)) \
                .select_related('person', 'program')[:500] 
                # take more here so the sorting gets more useful students: trim to 50 top later
        
        # sort according to ACTIVE_STATUS_ORDER to get useful students at the top: 
        #   decorate with order, sort, and build jquery response
        grads_sort = [(ACTIVE_STATUS_ORDER[gs.current_status], gs) for gs in grads]
        grads_sort.sort()
        grads_sort = grads_sort[:50]
        
        data = [{'value': str(g.slug), 'label': "%s, %s, %s" % 
                 (g.person.name(), g.program.label, g.get_current_status_display())} 
                 for _,g in grads_sort]
        response = HttpResponse(content_type='application/json')
        json.dump(data, response, indent=1)
        return response
    elif 'search' in request.GET:
        grad_slug = request.GET['search']
        try:
            grad = GradStudent.objects.get(slug=grad_slug, program__unit__in=request.units)
            return HttpResponseRedirect(reverse('grad:view', kwargs={'grad_slug':grad.slug}))
        except GradStudent.DoesNotExist:
            return HttpResponseRedirect(reverse('grad:not_found') + "?search=" + urllib.parse.quote_plus(grad_slug.encode('utf8')))
    else:
        return ForbiddenResponse(request, 'must send term') 
开发者ID:sfu-fas,项目名称:coursys,代码行数:31,代码来源:quick_search.py

示例6: view

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def view(request, pathway_id):
    print(pathway_id)
    
#    pathway_data = kegg_rest_request('get/hsa%s' % (pathway_id))
    pathway = Pathway.objects.get(kegg=pathway_id)

     
    pathway.genes = pathway.genes.split(',')
    print(pathway.genes)
    
#    genes = parse_genes(pathway_data)
#    pathway = {}
#    pathway['name'] = pathway_data.split('\n')[1].replace('NAME', '') 
#    #get gene_ids
#    genelist = []
#    for gene in genes:
#        
#        genelist.append('hsa:%s' % gene['id'])
##        print gene['id']
#    gene_url = '+'.join(genelist)
#    url = '/conv/ncbi-geneid/%s' % (gene_url)
#    results = kegg_rest_request(url)
    #print results
    
    
    #if request.method == 'GET':
    return render_to_response('pathway_analysis/view.html', {'pathway':pathway}, context_instance=RequestContext(request)) 
开发者ID:raonyguimaraes,项目名称:mendelmd,代码行数:29,代码来源:views.py

示例7: dicopinionResult

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def dicopinionResult(request):
    dicStockNum = request.GET['dicStockNum']
    dateCount = setDate()
    stock_name = get_stock_name(dicStockNum)

    for pageNum in range(1, 10):
        urlPage = 'http://guba.eastmoney.com/list,' + \
            str(dicStockNum)+',f_'+str(pageNum)+'.html'
        stockPageRequest = urllib.request.urlopen(urlPage)
        htmlTitleContent = str(stockPageRequest.read(), 'utf-8')
        titlePattern = re.compile(
            '<span class="l3">(.*?)title="(.*?)"(.*?)<span class="l6">(\d\d)-(\d\d)</span>', re.S)
        gotTitle = re.findall(titlePattern, htmlTitleContent)
        print(type(gotTitle))
        for i in range(len(gotTitle)):
            for j in range(len(dateCount)):
                if int(gotTitle[i][3]) == dateCount[j][0] and int(gotTitle[i][4]) == dateCount[j][1]:
                    dateCount[j][5] += 1
                    segList = list(jieba.cut(gotTitle[i][1], cut_all=True))
                    # print(tx_npl(gotTitle[i][1]))
                    for eachItem in segList:
                        if eachItem != ' ':
                            if eachItem in positiveWord:
                                dateCount[j][2] += 1
                                continue
                            elif eachItem in negativeWord:
                                dateCount[j][3] += 1
                                continue
                            elif eachItem in neutralWord:
                                dateCount[j][4] += 1
    return render(request, 'dicopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)}) 
开发者ID:LinLidi,项目名称:StockSensation,代码行数:33,代码来源:views.py

示例8: nbopinionResult

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def nbopinionResult(request):
    Nb_stock_number = request.GET['Nb_stock_number']
    dateCount = setDate()
    stock_name = get_stock_name(Nb_stock_number)
    homedir = os.getcwd()

    clf = joblib.load(homedir+'/StockVisualData/Clf.pkl')
    vectorizer = joblib.load(homedir+'/StockVisualData/Vect')
    transformer = joblib.load(homedir+'/StockVisualData/Tfidf')

    for pageNum in range(1, 21):
        urlPage = 'http://guba.eastmoney.com/list,' + \
            str(Nb_stock_number)+'_'+str(pageNum)+'.html'
        stockPageRequest = urllib.request.urlopen(urlPage)
        htmlTitleContent = str(stockPageRequest.read(), 'utf-8')
        titlePattern = re.compile(
            '<span class="l3">(.*?)title="(.*?)"(.*?)<span class="l6">(\d\d)-(\d\d)</span>', re.S)
        gotTitle = re.findall(titlePattern, htmlTitleContent)
        for i in range(len(gotTitle)):
            text_predict = []
            for j in range(len(dateCount)):
                if int(gotTitle[i][3]) == dateCount[j][0] and int(gotTitle[i][4]) == dateCount[j][1]:
                    dateCount[j][5] += 1
                    seg_list = list(jieba.cut(gotTitle[i][1], cut_all=True))
                    seg_text = " ".join(seg_list)
                    text_predict.append(seg_text)
                    text_predict = np.array(text_predict)
                    text_frequency = vectorizer.transform(text_predict)
                    new_tfidf = transformer.transform(text_frequency)
                    predicted = clf.predict(new_tfidf)
                    if predicted == '积极':
                        dateCount[j][2] += 1
                        continue
                    elif predicted == '消极':
                        dateCount[j][3] += 1
                        continue
                    elif predicted == '中立':
                        dateCount[j][4] += 1
    return render(request, 'nbopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)})

# 设置时间数组 
开发者ID:LinLidi,项目名称:StockSensation,代码行数:43,代码来源:views.py

示例9: dispatch

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def dispatch(self, request, *args, **kwargs):
        if kwargs['frequency'] not in constants.FREQUENCIES:
            raise Http404()
        self.payment_frequency = kwargs['frequency']

        # Ensure that the donation amount, currency and source page are legit
        start_form = StartCardPaymentForm(request.GET)
        if not start_form.is_valid():
            return HttpResponseRedirect('/')

        self.amount = start_form.cleaned_data['amount']
        self.currency = start_form.cleaned_data['currency']
        self.source_page = Page.objects.get(pk=start_form.cleaned_data['source_page_id']).specific
        return super().dispatch(request, *args, **kwargs) 
开发者ID:mozilla,项目名称:donate-wagtail,代码行数:16,代码来源:views.py

示例10: pay_periods

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def pay_periods(request):
    """
    Calculate number of pay periods between contract start and end dates.
    i.e. number of work days in period / 10
    
    I swear this was easier that doing it in JS, okay?
    """
    day = datetime.timedelta(days=1)
    week = datetime.timedelta(days=7)
    if 'start' not in request.GET or 'end' not in request.GET:
        result = ''
    else:
        st = request.GET['start']
        en = request.GET['end']
        try:
            st = datetime.datetime.strptime(st, "%Y-%m-%d").date()
            en = datetime.datetime.strptime(en, "%Y-%m-%d").date()
        except ValueError:
            result = ''
        else:
            # move start/end into Mon-Fri work week
            if st.weekday() == 5:
                st += 2*day
            elif st.weekday() == 6:
                st += day
            if en.weekday() == 5:
                en -= day
            elif en.weekday() == 6:
                en -= 2*day

            # number of full weeks (until sameday: last same weekday before end date)
            weeks = ((en-st)/7).days
            sameday = st + weeks*week
            assert sameday <= en < sameday + week
            
            # number of days remaining
            days = (en - sameday).days
            if sameday.weekday() > en.weekday():
                # don't count weekend days in between
                days -= 2
            
            days += 1 # count both start and end days
            result = "%.1f" % ((weeks*5 + days)/10.0)
    
    return HttpResponse(result, content_type='text/plain;charset=utf-8') 
开发者ID:sfu-fas,项目名称:coursys,代码行数:47,代码来源:views.py

示例11: _course_info_staff

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def _course_info_staff(request, course_slug):
    """
    Course front page
    """
    course = get_object_or_404(CourseOffering, slug=course_slug)
    member = Member.objects.get(offering=course, person__userid=request.user.username, role__in=['INST','TA','APPR'])
    activities = all_activities_filter(offering=course)
    any_group = True in [a.group for a in activities]
    
    # Non Ajax way to reorder activity, please also see reorder_activity view function for ajax way to reorder
    order = None  
    act = None  
    if 'order' in request.GET:  
        order = request.GET['order']  
    if 'act' in request.GET:  
        act = request.GET['act']  
    if order and act:  
        reorder_course_activities(activities, act, order)  
        return HttpResponseRedirect(reverse('offering:course_info', kwargs={'course_slug': course_slug}))  


    # Todo: is the activity type necessary?
    activities_info = []
    total_percent = 0
    for activity in activities:
        if activity.percent:
            total_percent += activity.percent

        if isinstance(activity, NumericActivity):
            activities_info.append({'activity':activity, 'type':ACTIVITY_TYPE['NG']})            
        elif isinstance(activity, LetterActivity):
            activities_info.append({'activity':activity, 'type':ACTIVITY_TYPE['LG']})

    if len(activities) == 0:
        num_pages = Page.objects.filter(offering=course)
        if num_pages == 0:
            messages.info(request, "Students won't see this course in their menu on the front page. As soon as some activities or pages have been added, they will see a link to the course info page.")
    
    discussion_activity = False
    if course.discussion:
        discussion_activity = discuss_activity.recent_activity(member)

    # advertise combined offering if applicable.
    offer_combined = course.joint_with() and len(activities) == 0
    
    context = {'course': course, 'member': member, 'activities_info': activities_info, 'from_page': FROMPAGE['course'],
               'order_type': ORDER_TYPE, 'any_group': any_group, 'total_percent': total_percent, 'discussion_activity': discussion_activity,
               'offer_combined': offer_combined}
    return render(request, "grades/course_info_staff.html", context) 
开发者ID:sfu-fas,项目名称:coursys,代码行数:51,代码来源:views.py

示例12: formula_tester

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def formula_tester(request, course_slug):
    course = get_object_or_404(CourseOffering, slug=course_slug)
    numeric_activities = NumericActivity.objects.filter(offering=course, deleted=False)
    result = ""
    
    if 'formula' in request.GET: # If the form has been submitted...
        activity_entries = []
        faked_activities = [] # used to evaluate the formula
        has_error = False
        for numeric_activity in numeric_activities:
            activity_form_entry = ActivityFormEntry(request.GET, prefix=numeric_activity.slug)
            if not activity_form_entry.is_valid():
                has_error = True
            else:
                value = activity_form_entry.cleaned_data['value']
                if not value:
                    value = 0
                faked_activities.append(FakeActivity(numeric_activity.name, numeric_activity.short_name,
                                                     activity_form_entry.cleaned_data['status'],
                                                     numeric_activity.max_grade, numeric_activity.percent,
                                                     value))
            activity_entries.append(FormulaTesterActivityEntry(numeric_activity, activity_form_entry))
            

        formula_form_entry = FormulaFormEntry(request.GET)
        formula_form_entry.activate_form_entry_validation(course_slug, None)
        
        if not formula_form_entry.is_valid():
            has_error = True
        if has_error:
            messages.error(request, "Please correct the error below")
        else:
            parsed_expr = pickle.loads(formula_form_entry.pickled_formula)
            act_dict = activities_dictionary(faked_activities)
            try:
                result = eval_parse(parsed_expr, FakeEvalActivity(course), act_dict, None, True)
            except EvalException:
                messages.error(request,  "Can not evaluate formula")
    else:
        activity_entries = []
        for numeric_activity in numeric_activities:
            activity_form_entry = ActivityFormEntry(prefix=numeric_activity.slug)
            activity_entries.append(FormulaTesterActivityEntry(numeric_activity, activity_form_entry))
        formula_form_entry = FormulaFormEntry()
    context = {'course': course, 'activity_entries': activity_entries,
               'formula_form_entry': formula_form_entry, 'result': result}
    return render(request, 'grades/formula_tester.html', context) 
开发者ID:sfu-fas,项目名称:coursys,代码行数:49,代码来源:views.py

示例13: dicopinionResult

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def dicopinionResult(request):
    dicStockNum = request.GET['dicStockNum']
    dateCount = setDate()
    stock_name = get_stock_name(dicStockNum)

    # 爬取10页 后续改为异步爬取
    for pageNum in range(1, 10):
        print(f'page:{pageNum}')
        urlPage = 'http://guba.eastmoney.com/list,' + \
                  str(dicStockNum) + ',f_' + str(pageNum) + '.html'
        stockPageRequest = requests.get(urlPage, headers=headers)
        htmlTitleContent = stockPageRequest.text

        resp = Selector(text=htmlTitleContent)
        nodes = resp.xpath(
            '//div[contains(@class,"articleh normal_post") or contains(@class,"articleh normal_post odd")]')

        for index, item in enumerate(nodes):
            view = item.xpath('./span[@class="l1 a1"]/text()').extract_first()
            comment_count = item.xpath('./span[@class="l2 a2"]/text()').extract_first()
            title = item.xpath('./span[@class="l3 a3"]/a/text()').extract_first()
            author = item.xpath('./span[@class="l4 a4"]/a/text()').extract_first()
            create_time = item.xpath('./span[@class="l5 a5"]/text()').extract_first()
            # 处理日期
            date_pattern = re.search('(\d+)-(\d+)', create_time)

            month = sub_zero(date_pattern.group(1))

            day = sub_zero(date_pattern.group(2))

            for j in range(len(dateCount)):  # 5天

                if int(month) == dateCount[j][0] and int(day) == dateCount[j][1]:
                    dateCount[j][5] += 1  # 数组的最后一个数+1,计算出现了一次,今天的标题
                    segList = list(jieba.cut(title, cut_all=True))  # 分词后保存
                    # print(tx_npl(gotTitle[i][1]))
                    for eachItem in segList:
                        if eachItem != ' ':
                            if eachItem in positiveWord:  # 粗暴 简单
                                dateCount[j][2] += 1
                                continue
                            elif eachItem in negativeWord:
                                dateCount[j][3] += 1
                                continue
                            elif eachItem in neutralWord:
                                dateCount[j][4] += 1

                # print(f'{month}月{day}日:条数{len(segList)}')

    # 最近5天的数据
    print(dateCount)
    return render(request, 'dicopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)}) 
开发者ID:Rockyzsu,项目名称:StockPredict,代码行数:54,代码来源:views.py

示例14: nbopinionResult

# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import GET [as 别名]
def nbopinionResult(request):
    Nb_stock_number = request.GET['Nb_stock_number']
    dateCount = setDate()
    stock_name = get_stock_name(Nb_stock_number)
    homedir = os.getcwd()

    clf = joblib.load(homedir + '/StockVisualData/Clf.pkl')
    vectorizer = joblib.load(homedir + '/StockVisualData/Vect')
    transformer = joblib.load(homedir + '/StockVisualData/Tfidf')

    for pageNum in range(1, 10):

        urlPage = 'http://guba.eastmoney.com/list,' + \
                  str(Nb_stock_number) + '_' + str(pageNum) + '.html'
        stockPageRequest = requests.get(urlPage, headers=headers)
        htmlTitleContent = stockPageRequest.text

        resp = Selector(text=htmlTitleContent)
        nodes = resp.xpath(
            '//div[contains(@class,"articleh normal_post") or contains(@class,"articleh normal_post odd")]')

        for index, item in enumerate(nodes):
            view = item.xpath('./span[@class="l1 a1"]/text()').extract_first()
            comment_count = item.xpath('./span[@class="l2 a2"]/text()').extract_first()
            title = item.xpath('./span[@class="l3 a3"]/a/text()').extract_first()
            author = item.xpath('./span[@class="l4 a4"]/a/text()').extract_first()
            create_time = item.xpath('./span[@class="l5 a5"]/text()').extract_first()
            # 处理日期
            date_pattern = re.search('(\d+)-(\d+)', create_time)

            month = sub_zero(date_pattern.group(1))

            day = sub_zero(date_pattern.group(2))

            text_predict = []
            for j in range(len(dateCount)):
                if int(month) == dateCount[j][0] and int(day) == dateCount[j][1]:
                    dateCount[j][5] += 1
                    seg_list = list(jieba.cut(title, cut_all=True))
                    seg_text = " ".join(seg_list)
                    text_predict.append(seg_text)
                    text_predict = np.array(text_predict)
                    text_frequency = vectorizer.transform(text_predict)
                    new_tfidf = transformer.transform(text_frequency)
                    predicted = clf.predict(new_tfidf)
                    if predicted == '积极':
                        dateCount[j][2] += 1
                        continue
                    elif predicted == '消极':
                        dateCount[j][3] += 1
                        continue
                    elif predicted == '中立':
                        dateCount[j][4] += 1
                    # 没有返回分数

    return render(request, 'nbopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)})


# 设置时间数组 
开发者ID:Rockyzsu,项目名称:StockPredict,代码行数:61,代码来源:views.py


注:本文中的urllib.request.GET属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。