seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
251738639
|
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import ObjectDoesNotExist
from .models import Port, Category, BuildHistory, Maintainer, Dependency, Builder, User
from bs4 import BeautifulSoup
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
import requests
import json
def index(request):
alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z']
categories = Category.objects.all().order_by('name')
return render(request, 'ports/index.html', {
'alphabet': alphabet,
'categories': categories
})
def categorylist(request, cat):
all_ports = Port.objects.filter(categories__name=cat).order_by('id')
portscount = all_ports.count()
paginated_ports = Paginator(all_ports, 100)
page = request.GET.get('page', 1)
try:
ports = paginated_ports.get_page(page)
except PageNotAnInteger:
ports = paginated_ports.get_page(1)
except EmptyPage:
ports = paginated_ports.get_page(paginated_ports.num_pages)
return render(request, 'ports/categorylist.html',
{
'ports': ports,
'portscount': portscount,
'category': cat
})
def letterlist(request, letter):
ports = Port.objects.all()
sortedports = []
for port in ports:
firstletter = list(port.name)[0]
if firstletter.casefold() == letter.casefold():
sortedports.append(port)
portscount = len(sortedports)
return render(request, 'ports/letterlist.html',
{
'ports': sortedports,
'letter': letter.upper(),
'portscount': portscount
})
def portdetail(request, name):
port = Port.objects.get(name=name)
maintainers = Maintainer.objects.filter(ports__name=name)
dependencies = Dependency.objects.filter(port_name_id=port.id)
builders = Builder.objects.values_list('name', flat=True)
build_history = {}
for builder in builders:
build_history[builder] = BuildHistory.objects.filter(builder_name__name=builder, port_name=name).order_by('-time_start')
return render(request, 'ports/portdetail.html', {
'port': port,
'build_history': build_history,
'maintainers': maintainers,
'dependencies': dependencies,
'builders_list': builders,
})
def all_builds_view(request):
filter_applied = False
if request.method == 'POST':
if request.POST['status-filter']:
filter_by = request.POST['status-filter']
if filter_by == "All Builds":
all_builds = BuildHistory.objects.all().order_by('-time_start')
else:
all_builds = BuildHistory.objects.filter(status=filter_by).order_by('-time_start')
filter_applied = filter_by
else:
return HttpResponse("Something went wrong")
else:
all_builds = BuildHistory.objects.all().order_by('-time_start')
paginated_builds = Paginator(all_builds, 100)
page = request.GET.get('page', 1)
try:
builds = paginated_builds.get_page(page)
except PageNotAnInteger:
builds = paginated_builds.get_page(1)
except EmptyPage:
builds = paginated_builds.get_page(paginated_builds.num_pages)
return render(request, 'ports/all_builds.html', {
'all_builds': builds,
'filter_applied': filter_applied,
})
def stats(request):
return render(request, 'ports/stats.html')
def stats_portdetail(request, name):
port = Port.objects.get(name=name)
return render(request, 'ports/stats_portdetail.html', {
'port': port,
})
def get_ports_of_maintainers(maintainers, request):
i = 0
for maintainer in maintainers:
if i > 0:
all_ports = maintainer.ports.all().order_by('id') | all_ports
else:
all_ports = maintainer.ports.all().order_by('id')
i = i + 1
all_ports_num = all_ports.count()
paginated_ports = Paginator(all_ports, 100)
page = request.GET.get('page', 1)
try:
ports = paginated_ports.get_page(page)
except PageNotAnInteger:
ports = paginated_ports.get_page(1)
except EmptyPage:
ports = paginated_ports.get_page(paginated_ports.num_pages)
return ports, all_ports_num
def maintainer_detail_github(request, github_handle):
maintainers = Maintainer.objects.filter(github=github_handle)
ports, all_ports_num = get_ports_of_maintainers(maintainers, request)
return render(request, 'ports/maintainerdetail.html', {
'maintainers': maintainers,
'maintainer': github_handle,
'all_ports_num': all_ports_num,
'ports': ports,
'github': True,
})
def maintainer_detail_email(request, name, domain):
maintainers = Maintainer.objects.filter(name=name, domain=domain)
ports, all_ports_num = get_ports_of_maintainers(maintainers, request)
return render(request, 'ports/maintainerdetail.html', {
'maintainers': maintainers,
'maintainer': name,
'ports': ports,
'all_ports_num': all_ports_num,
'github': False
})
# Respond to ajax-call triggered by the search box
def search(request):
if request.method == 'POST':
search_text = request.POST['search_text']
search_by = request.POST['search_by']
if search_by == "search-by-port-name":
results = Port.objects.filter(name__icontains=search_text)[:50]
elif search_by == "search-by-description":
results = Port.objects.filter(description__search=search_text)[:50]
return render(request, 'ports/search.html', {
'results': results,
'search_text': search_text,
'search_by': search_by
})
# Respond to ajax call for loading tickets
def tickets(request):
if request.method == 'POST':
port_name = request.POST['portname']
URL = "https://trac.macports.org/query?status=!closed&port=~{}".format(port_name)
r = requests.get(URL)
Soup = BeautifulSoup(r.content, 'html5lib')
all_tickets = []
for row in Soup.findAll('tr', attrs={'class': 'prio2'}):
srow = row.find('td', attrs={'class': 'summary'})
ticket = {}
ticket['url'] = srow.a['href']
ticket['title'] = srow.a.text
all_tickets.append(ticket)
return render(request, 'ports/tickets.html', {
'portname': port_name,
'tickets': all_tickets,
})
# Respond to ajax calls for searching within a category
def category_filter(request):
if request.method == 'POST':
if request.POST['content'] == "Category":
query = request.POST['query']
search_in = request.POST['search_in']
filtered_ports = Port.objects.filter(categories__name=search_in, name__icontains=query)
return render(request, 'ports/filtered_table.html', {
'ports': filtered_ports,
'search_in': search_in,
'query': query,
'content': "Category"
})
elif request.POST['content'] == "Maintainer":
query = request.POST['query']
search_in = request.POST['search_in']
filtered_ports = Port.objects.filter(maintainers__name=search_in, name__icontains=query)
return render(request, 'ports/filtered_table.html', {
'ports': filtered_ports,
'search_in': search_in,
'query': query,
'content': "Maintainer",
})
# Accept submissions from mpstats and update the users table
@csrf_exempt
def stats_submit(request):
if request.method == "POST":
try:
submitted = request.body.decode("utf-8")
received_json = json.loads(submitted.split('=')[1])
user = User()
user.uuid = received_json['id']
user.osx_version = received_json['os']['osx_version']
user.macports_version = received_json['os']['macports_version']
user.os_arch = received_json['os']['os_arch']
user.xcode_version = received_json['os']['xcode_version']
user.active_ports = received_json['active_ports']
user.save()
return HttpResponse("Success")
except:
return HttpResponse("Something went wrong")
else:
return HttpResponse("Method Not Allowed")
| null |
app/ports/views.py
|
views.py
|
py
| 8,739 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "models.Category.objects.all",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Category.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "models.Category",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.Port.objects.filter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models.Port.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "models.Port",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.PageNotAnInteger",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "models.Port.objects.all",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "models.Port.objects",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "models.Port",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "models.Port.objects.get",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "models.Port.objects",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "models.Port",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "models.Maintainer.objects.filter",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "models.Maintainer.objects",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "models.Maintainer",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "models.Dependency.objects.filter",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "models.Dependency.objects",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "models.Dependency",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "models.Builder.objects.values_list",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "models.Builder.objects",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "models.Builder",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "models.BuildHistory.objects.filter",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "models.BuildHistory.objects",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "models.BuildHistory",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "models.BuildHistory.objects.all",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "models.BuildHistory.objects",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "models.BuildHistory",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "models.BuildHistory.objects.filter",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "models.BuildHistory.objects",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "models.BuildHistory",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "models.BuildHistory.objects.all",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "models.BuildHistory.objects",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "models.BuildHistory",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.PageNotAnInteger",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "models.Port.objects.get",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "models.Port.objects",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "models.Port",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.PageNotAnInteger",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "models.Maintainer.objects.filter",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "models.Maintainer.objects",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "models.Maintainer",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "models.Maintainer.objects.filter",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "models.Maintainer.objects",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "models.Maintainer",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "models.Port.objects.filter",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "models.Port.objects",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "models.Port",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "models.Port.objects.filter",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "models.Port.objects",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "models.Port",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "models.Port.objects.filter",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "models.Port.objects",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "models.Port",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "models.Port.objects.filter",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "models.Port.objects",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "models.Port",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "models.User",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 231,
"usage_type": "name"
}
] |
100532373
|
#!/usr/bin/env python3
import datetime
import json
import os
import re
import fnmatch
import numpy as np
import cv2
from natsort import natsorted
import random
import sys
ROOT_DIR = '/media/juan/Data/retail_products/ceiling/shoot1_one_person_vivoteks'
IMAGE_DIR = os.path.join(ROOT_DIR, "all_cams")
VGG_ANNOTATIONS_DIR = os.path.join(ROOT_DIR, "vgg_annotations")
SPLIT_TRAIN_TEST = False
INFO = {
"description": "P5-real Dataset 2",
"url": "",
"version": "0.1.0",
"year": 2019,
"contributor": "Juan Terven",
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
LICENSES = [
{
"id": 1,
"name": "Attribution-NonCommercial-ShareAlike License",
"url": "http://creativecommons.org/licenses/by-nc-sa/2.0/"
}
]
CATEGORIES = [
{
'id': 1,
'name': 'ritz_medium',
'supercategory': 'sku',
},
{
'id': 2,
'name': 'angies_boom_pop_chip',
'supercategory': 'sku',
},
{
'id': 3,
'name': 'red_bull_red',
'supercategory': 'sku',
},
{
'id': 4,
'name': 'ivory_concentrated_dishwashing',
'supercategory': 'sku',
},
{
'id': 5,
'name': 'terra_chips',
'supercategory': 'sku',
},
{
'id': 6,
'name': 'lays_potato_chips',
'supercategory': 'sku',
},
{
'id': 7,
'name': 'dawn_ultra_dishwashing',
'supercategory': 'sku',
},
{
'id': 8,
'name': 'equate_cotton_bandage',
'supercategory': 'sku',
},
{
'id': 9,
'name': 'equate_exam_gloves',
'supercategory': 'sku',
},
{
'id': 10,
'name': 'frosted_flakes',
'supercategory': 'sku',
},
{
'id': 11,
'name': 'red_bull_sugar_free',
'supercategory': 'sku',
},
{
'id': 12,
'name': 'nutter_butter_cookies',
'supercategory': 'sku',
},
{
'id': 13,
'name': 'lysol_disinfecting',
'supercategory': 'sku',
},
{
'id': 14,
'name': 'salted_cashew_halves',
'supercategory': 'sku',
},
{
'id': 15,
'name': 'dawn_simply_clean',
'supercategory': 'sku',
},
{
'id': 16,
'name': 'dawn_ultra_platinum',
'supercategory': 'sku',
},
{
'id': 17,
'name': 'oreo_cookies',
'supercategory': 'sku',
},
{
'id': 18,
'name': 'ritz_small',
'supercategory': 'sku',
},
{
'id': 19,
'name': 'chips_ahoy',
'supercategory': 'sku',
},
{
'id': 20,
'name': 'vita_coconut_water',
'supercategory': 'sku',
},
{
'id': 21,
'name': 'red_bull_blue',
'supercategory': 'sku',
},
{
'id': 22,
'name': 'bounty_napkins',
'supercategory': 'sku',
},
{
'id': 23,
'name': 'ritz_large',
'supercategory': 'sku',
},
{
'id': 24,
'name': 'red_bull_yellow',
'supercategory': 'sku',
},
{
'id': 25,
'name': 'tostitos_scoops',
'supercategory': 'sku',
},
{
'id': 26,
'name': 'veggie_straws',
'supercategory': 'sku',
},
{
'id': 27,
'name': 'lays_stax_chips',
'supercategory': 'sku',
},
{
'id': 28,
'name': 'tostitos_salsa',
'supercategory': 'sku',
},
{
'id': 29,
'name': 'tide_detergent',
'supercategory': 'sku',
},
{
'id': 30,
'name': 'equate_wound_dressing',
'supercategory': 'sku',
}
]
def main():
coco_output_train = {
"info": INFO,
"licenses": LICENSES,
"categories": CATEGORIES,
"images": [],
"annotations": []
}
coco_output_test = {
"info": INFO,
"licenses": LICENSES,
"categories": CATEGORIES,
"images": [],
"annotations": []
}
image_id = 1
product_id = 1
no_regions_count = 0
annotations_train = []
annotations_test = []
missing_annotations = []
# Get list of images ending with ".png"
image_names = []
images_processed = []
for image_name in os.listdir(IMAGE_DIR):
if image_name.endswith(".png"):
image_names.append(image_name)
print(len(image_names), image_names)
# Get list of files ending with ".json"
vgg_jsons = []
for json_file in os.listdir(VGG_ANNOTATIONS_DIR):
if json_file.endswith(".json"):
vgg_jsons.append(json_file)
vgg_jsons = natsorted(vgg_jsons)
print(vgg_jsons)
if SPLIT_TRAIN_TEST:
indices = list(range(0, len(image_names)))
random.seed(a=1, version=2)
training_indices = random.sample(range(1, len(image_names)), 4000)
testing_indices = list(set(indices) - set(training_indices))
else:
training_indices = list(range(0, len(image_names)))
testing_indices = []
# go through each vgg json file
for vgg_json in vgg_jsons:
print(vgg_json)
vgg_json_path = os.path.join(VGG_ANNOTATIONS_DIR, vgg_json)
with open(vgg_json_path) as json_file:
data = json.load(json_file)
keys = list(data['_via_img_metadata'].keys())
print('num keys:', len(keys))
for key in keys:
image_name = data['_via_img_metadata'][key]['filename']
# search image file
if image_name in image_names and not(image_name in images_processed):
image_filename = os.path.join(IMAGE_DIR, image_name)
image = cv2.imread(image_filename)
regions = data['_via_img_metadata'][key]['regions']
if len(regions) > 0:
# save image info
image_info = {'id': image_id, 'file_name': image_name, 'width':image.shape[1],
'height': image.shape[0], 'licence': 1, 'coco_url': "" }
img_idx = image_names.index(image_name)
if img_idx in training_indices:
coco_output_train["images"].append(image_info)
print('Training:', image_name)
elif img_idx in testing_indices:
coco_output_test["images"].append(image_info)
print('Testing:', image_name)
# get annotations
regions = data['_via_img_metadata'][key]['regions']
for region in regions:
print(region)
if 'Class' in region['region_attributes']:
class_name = region['region_attributes']['Class']
x = region['shape_attributes']['x']
y = region['shape_attributes']['y']
w = region['shape_attributes']['width']
h = region['shape_attributes']['height']
cat_id = 0
for d in CATEGORIES:
if d["name"] == class_name:
cat_id = d["id"]
if cat_id != 0:
annotation = {"image_id": image_id, "iscrowd": 0, "area": int(w*h),
"bbox": [x, y, x+w, y+h], "segmentation": [],
"id": product_id, "category_id": cat_id}
if img_idx in training_indices:
annotations_train.append(annotation)
elif img_idx in testing_indices:
annotations_test.append(annotation)
product_id += 1
else:
print("CATEGORY NOT FOUND:", class_name)
else:
missing_annotations.append(image_name)
image_id += 1
images_processed.append(image_name)
else:
no_regions_count +=1
coco_output_train["annotations"] = annotations_train
coco_output_test["annotations"] = annotations_test
with open('{}/annotations_train_cam29_crop.json'.format(ROOT_DIR), 'w') as output_json_file:
json.dump(coco_output_train, output_json_file)
if SPLIT_TRAIN_TEST:
with open('{}/annotations_test_cam29_crop.json'.format(ROOT_DIR), 'w') as output_json_file:
json.dump(coco_output_test, output_json_file)
print(missing_annotations)
print('image_id:', image_id)
print('no regions count:', no_regions_count)
if __name__ == "__main__":
main()
| null |
tools_vgg_annotation_to_coco.py
|
tools_vgg_annotation_to_coco.py
|
py
| 9,190 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "natsort.natsorted",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 314,
"usage_type": "call"
}
] |
369097868
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('vault', '0002_bluray_dvd'),
]
operations = [
migrations.AlterField(
model_name='dcp',
name='subtitle_type',
field=models.CharField(blank=True, max_length=50, choices=[(b'burnin', b'Burn-in'), (b'smpte', b'SMPTE'), (b'interop', b'Interop')]),
preserve_default=True,
),
]
| null |
vault/migrations/0003_auto_20150512_1942.py
|
0003_auto_20150512_1942.py
|
py
| 526 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
}
] |
483082673
|
'''
Evaluate performance of a single trained classifier on multiple patient-stay-slices
'''
import os
import numpy as np
import pandas as pd
from joblib import dump, load
import sys
sys.path.append(os.path.join(os.path.abspath('../'), 'src'))
DEFAULT_PROJECT_REPO = os.path.sep.join(__file__.split(os.path.sep)[:-2])
PROJECT_REPO_DIR = os.path.abspath(
os.environ.get('PROJECT_REPO_DIR', DEFAULT_PROJECT_REPO))
sys.path.append(os.path.join(PROJECT_REPO_DIR, 'src'))
sys.path.append(os.path.join(PROJECT_REPO_DIR, 'src', 'SkorchLogisticRegression'))
from sklearn.preprocessing import StandardScaler
from SkorchLogisticRegression import SkorchLogisticRegression
#import LR model before importing other packages because joblib files act weird when certain packages are loaded
from feature_transformation import *
import matplotlib.pyplot as plt
from sklearn.metrics import (accuracy_score, balanced_accuracy_score, f1_score,
average_precision_score, confusion_matrix, log_loss,
roc_auc_score, roc_curve, precision_recall_curve, precision_score, recall_score)
from utils import load_data_dict_json
import ast
import random
import pickle
import glob
import seaborn as sns
from split_dataset import Splitter
def get_best_model(clf_models_dir, filename_aka):
''' Get the best model from training history'''
training_files = glob.glob(os.path.join(clf_models_dir, filename_aka))
valid_losses_np = np.zeros(len(training_files))
precision_valid_np = np.zeros(len(training_files))
recall_valid_np = np.zeros(len(training_files))
precision_train_np = np.zeros(len(training_files))
for i, f in enumerate(training_files):
training_hist_df = pd.DataFrame(json.load(open(f)))
# get the model with lowest validation loss
valid_losses_np[i] = training_hist_df.valid_loss.values[-1]
precision_valid_np[i] = training_hist_df.precision_valid.values[-1]
recall_valid_np[i] = training_hist_df.recall_valid.values[-1]
precision_train_np[i] = training_hist_df.precision_train.values[-1]
precision_valid_np[np.isnan(precision_valid_np)]=0
precision_train_np[np.isnan(precision_train_np)]=0
recall_valid_np[np.isnan(recall_valid_np)]=0
best_model_ind = np.argmax(recall_valid_np)
return training_files[best_model_ind]
def get_best_model_after_threshold_search(clf_models_dir, filename_aka):
''' Get the best model from training history'''
training_files = glob.glob(os.path.join(clf_models_dir, filename_aka))
valid_losses_np = np.zeros(len(training_files))
precision_train_np = np.zeros(len(training_files))
recall_train_np = np.zeros(len(training_files))
precision_valid_np = np.zeros(len(training_files))
recall_valid_np = np.zeros(len(training_files))
for i, f in enumerate(training_files):
training_hist_df = pd.read_csv(f)
precision_train_np[i] = training_hist_df.precision_train.values[-1]
recall_train_np[i] = training_hist_df.recall_train.values[-1]
precision_valid_np[i] = training_hist_df.precision_valid.values[-1]
recall_valid_np[i] = training_hist_df.recall_valid.values[-1]
precision_valid_np[np.isnan(precision_valid_np)]=0
recall_valid_np[np.isnan(recall_valid_np)]=0
# from IPython import embed; embed()
# get model with max recall at precision >=0.9
keep_inds = precision_train_np>=0.9
training_files = np.array(training_files)[keep_inds]
precision_valid_np = precision_valid_np[keep_inds]
recall_valid_np = recall_valid_np[keep_inds]
best_model_ind = np.argmax(recall_valid_np)
# best_model_ind = np.argmin(valid_losses_np)
return training_files[best_model_ind]
def plot_best_model_training_plots(best_model_history_file, plt_name):
metrics = ['precision', 'recall', 'bce_loss', 'surr_loss', 'fpu_bound', 'tpl_bound']
training_hist_df = pd.DataFrame(json.load(open(best_model_history_file)))
f, axs = plt.subplots(len(metrics), 1, figsize=(8,8), sharex=True)
for i, metric in enumerate(metrics):
# plot epochs vs precision on train and validation
if (metric == 'fpu_bound'):
axs[i].plot(training_hist_df.epoch, training_hist_df['fpu_bound_train'], color='r', label='FP upper bound')
axs[i].plot(training_hist_df.epoch, training_hist_df['fp_train'], color='b', label='FP train')
elif (metric == 'tpl_bound'):
axs[i].plot(training_hist_df.epoch, training_hist_df['tpl_bound_train'], color='r', label='TP lower bound')
axs[i].plot(training_hist_df.epoch, training_hist_df['tp_train'], color='b', label='TP train')
else:
try:
axs[i].plot(training_hist_df.epoch, training_hist_df['%s_train'%metric], color='b', label='%s(train)'%metric)
axs[i].plot(training_hist_df.epoch, training_hist_df['%s_valid'%metric], color='k', label='%s(validation)'%metric)
# axs[i].set_ylim([0.1, 1])
except:
axs[i].plot(training_hist_df.epoch, training_hist_df['train_%s'%metric], color='b', label='%s(train)'%metric)
axs[i].plot(training_hist_df.epoch, training_hist_df['valid_%s'%metric], color='k', label='%s(validation)'%metric)
axs[i].set_ylabel(metric)
axs[i].legend()
axs[i].grid(True)
axs[i].set_xlabel('epochs')
plt.suptitle(plt_name)
f.savefig(plt_name+'.png')
# from IPython import embed; embed()
def plot_all_models_training_plots(clf_models_dir, all_models_history_files_aka, plt_name):
metrics = ['precision', 'recall']
f, axs = plt.subplots(len(metrics), 1, figsize=(8,8), sharex=True)
sns.set_context("notebook", font_scale=1.25)
alpha=0.3
all_models_history_files = glob.glob(os.path.join(clf_models_dir, all_models_history_files_aka))
for f_ind, model_history_file in enumerate(all_models_history_files):
training_hist_df = pd.DataFrame(json.load(open(model_history_file)))
try:
if training_hist_df['precision_train'].values[-1]>0.2:
for i, metric in enumerate(metrics):
# plot epochs vs precision on train and validation
if (metric == 'fpu_bound'):
axs[i].plot(training_hist_df.epoch, training_hist_df['fpu_bound_train'], color='r',
label='FP upper bound', alpha=alpha)
axs[i].plot(training_hist_df.epoch, training_hist_df['fp_train'], color='b', label='FP train', alpha=alpha)
elif (metric == 'tpl_bound'):
axs[i].plot(training_hist_df.epoch, training_hist_df['tpl_bound_train'], color='r',
label='TP lower bound', alpha=alpha)
axs[i].plot(training_hist_df.epoch, training_hist_df['tp_train'], color='b', label='TP train', alpha=alpha)
# axs[i].set_ylim([0, 2500])
else:
try:
axs[i].plot(training_hist_df.epoch, training_hist_df['%s_train'%metric], color='b',
label='train', alpha=alpha)
axs[i].plot(training_hist_df.epoch, training_hist_df['%s_valid'%metric], color='r',
label='valid', alpha=alpha)
if (metric=='precision')|(metric=='recall'):
yticks = np.arange(0, 1.1, 0.2)
yticklabels = ['%.2f'%ii for ii in yticks]
axs[i].set_yticks(yticks)
axs[i].set_yticklabels(yticklabels)
except:
axs[i].plot(training_hist_df.epoch, training_hist_df['train_%s'%metric], color='b',
label='%s(train)'%metric, alpha=alpha)
axs[i].plot(training_hist_df.epoch, training_hist_df['valid_%s'%metric], color='r',
label='%s(valid)'%metric, alpha=alpha)
axs[i].set_ylabel(metric)
if f_ind == 0:
axs[i].legend(loc='upper left')
# axs[i].grid(True)
except:
continue
# for ax in axs:
# ax.legend(loc='upper left')
# ax.grid(True)
axs[i].set_xlabel('epochs')
axs[i].set_xlim([0, 500])
# plt.suptitle(plt_name)
f.savefig(plt_name+'.png')
# f.savefig(plt_name+'.pdf', bbox_inches='tight', pad_inches=0)
def get_all_precision_recalls(clf_models_dir, filename_aka):
''' Get the best model from training history'''
training_files = glob.glob(os.path.join(clf_models_dir, filename_aka))
precision_train_np = np.zeros(len(training_files))
recall_train_np = np.zeros(len(training_files))
precision_valid_np = np.zeros(len(training_files))
recall_valid_np = np.zeros(len(training_files))
precision_test_np = np.zeros(len(training_files))
recall_test_np = np.zeros(len(training_files))
for i, f in enumerate(training_files):
training_hist_df = pd.read_csv(f)
precision_train_np[i] = training_hist_df.precision_train.values[-1]
recall_train_np[i] = training_hist_df.recall_train.values[-1]
precision_valid_np[i] = training_hist_df.precision_valid.values[-1]
recall_valid_np[i] = training_hist_df.recall_valid.values[-1]
precision_test_np[i] = training_hist_df.precision_test.values[-1]
recall_test_np[i] = training_hist_df.recall_test.values[-1]
precision_train_unfiltered = precision_train_np
precision_valid_unfiltered = precision_valid_np
precision_test_unfiltered = precision_test_np
recall_train_unfiltered = recall_train_np
recall_valid_unfiltered = recall_valid_np
recall_test_unfiltered = recall_test_np
return precision_train_unfiltered, precision_valid_unfiltered, precision_test_unfiltered, recall_train_unfiltered, recall_valid_unfiltered, recall_test_unfiltered, training_files
def make_precision_recall_boxplots(precision_train_np, precision_valid_np, precision_test_np, recall_train_np, recall_valid_np, recall_test_np, plt_name, title_str=''):
# plot he boxplot of precision recall
f, axs = plt.subplots(1, 1, figsize=(8,8))
sns.set_context("notebook", font_scale=1.25)
xticks = [0.5, 1.0, 1.5, 3.5, 4.0, 4.5]
axs.boxplot([precision_train_np, precision_valid_np, precision_test_np, recall_train_np, recall_valid_np, recall_test_np], positions=xticks, widths=(0.3, 0.3, 0.3, 0.3, 0.3, 0.3))
xticklabels = ['precision_train', 'precision_valid', 'precision_test', 'recall_train', 'recall_valid', 'recall_test']
axs.set_xticks(xticks)
axs.set_xticklabels(xticklabels, rotation=20)
yticks = np.arange(0, 1.1, 0.1)
yticklabels = ['%.2f'%ii for ii in yticks]
axs.set_yticks(yticks)
axs.set_yticklabels(yticklabels)
axs.set_ylim([0, 1])
axs.grid(True)
axs.set_title('Precision and Recall Over all Hyperparameters '+ title_str , fontsize=14)
f.savefig(plt_name+'.png')
# f.savefig('precision_recall_boxplot.pdf', bbox_inches='tight', pad_inches=0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--clf_models_dir', default=None, type=str,
help='Directory where classifier models are saved')
parser.add_argument('--clf_train_test_split_dir', default=None, type=str,
help='Directory where the train-test split data for the classifier is saved')
parser.add_argument('--outcome_column_name', default='clinical_deterioration_outcome', type=str,
help='name of outcome column in test dataframe')
args = parser.parse_args()
clf_models_dir = args.clf_models_dir
## get the test patient id's
# get the test set's csv and dict
x_train_df = pd.read_csv(os.path.join(args.clf_train_test_split_dir, 'x_train.csv'))
y_train_df = pd.read_csv(os.path.join(args.clf_train_test_split_dir, 'y_train.csv'))
# x_valid_df = pd.read_csv(os.path.join(args.clf_train_test_split_dir, 'x_valid.csv'))
# y_valid_df = pd.read_csv(os.path.join(args.clf_train_test_split_dir, 'y_valid.csv'))
x_test_df = pd.read_csv(os.path.join(args.clf_train_test_split_dir, 'x_test.csv'))
y_test_df = pd.read_csv(os.path.join(args.clf_train_test_split_dir, 'y_test.csv'))
y_test_dict_file = os.path.join(args.clf_train_test_split_dir, 'y_dict.json')
x_test_dict_file = os.path.join(args.clf_train_test_split_dir, 'x_dict.json')
# import the y dict to get the id cols
y_test_dict = load_data_dict_json(y_test_dict_file)
x_test_dict = load_data_dict_json(x_test_dict_file)
id_cols = parse_id_cols(y_test_dict)
feature_cols = parse_feature_cols(x_test_dict)
outcome_col = args.outcome_column_name
# # get performance metrics
x_train = x_train_df[feature_cols].values.astype(np.float32)
y_train = y_train_df[outcome_col].values
x_test = x_test_df[feature_cols].values.astype(np.float32)
y_test = y_test_df[outcome_col].values
# get the validation data
splitter = Splitter(size=0.25, random_state=41,
n_splits=1,
cols_to_group='subject_id')
# Assign training instances to splits by provided keys
key_train = splitter.make_groups_from_df(x_train_df[id_cols])
for ss, (tr_inds, va_inds) in enumerate(splitter.split(x_train, y_train, groups=key_train)):
x_tr = x_train[tr_inds].copy()
y_tr = y_train[tr_inds].copy()
x_valid = x_train[va_inds]
y_valid = y_train[va_inds]
y_train = y_tr
del(y_tr)
# load the scaler
scaler = pickle.load(open(os.path.join(clf_models_dir, 'scaler.pkl'), 'rb'))
x_train_transformed = scaler.transform(x_tr)
x_valid_transformed = scaler.transform(x_valid)
x_test_transformed = scaler.transform(x_test)
# load model minimizing BCE loss
# skorch_lr_bce = SkorchLogisticRegression(n_features=x_test.shape[1])
# skorch_lr_bce.initialize()
'''
sl_rand_init_incremental_min_precision_filename_aka = 'skorch_logistic_regression*surrogate_loss_tight*warm_start=false*incremental_min_precision=true*history.json'
# plot_all_models_training_plots(clf_models_dir, sl_rand_init_incremental_min_precision_filename_aka, 'skorch_lr_incremental_min_precision')
precisions_train_incremental_min_precision, precisions_valid_incremental_min_precision, precisions_test_incremental_min_precision, recalls_train_incremental_min_precision, recalls_valid_incremental_min_precision, recalls_test_incremental_min_precision= get_all_precision_recalls(clf_models_dir, sl_rand_init_incremental_min_precision_filename_aka.replace('history.json', '.csv'))
make_precision_recall_boxplots(precisions_train_incremental_min_precision, precisions_valid_incremental_min_precision, precisions_test_incremental_min_precision, recalls_train_incremental_min_precision, recalls_valid_incremental_min_precision, recalls_test_incremental_min_precision, 'lr_precision_recall_boxplot_incremental_min_precision', '(Ramp Up)')
'''
sl_rand_init_direct_min_precision_filename_aka = 'skorch_logistic_regression*surrogate_loss_tight*warm_start=false*incremental_min_precision=false*history.json'
# plot_all_models_training_plots(clf_models_dir, sl_rand_init_direct_min_precision_filename_aka, 'skorch_lr_direct_min_precision')
precisions_train_direct_min_precision, precisions_valid_direct_min_precision, precisions_test_direct_min_precision, recalls_train_direct_min_precision, recalls_valid_direct_min_precision, recalls_test_direct_min_precision, training_files_direct_min_precision = get_all_precision_recalls(clf_models_dir, sl_rand_init_direct_min_precision_filename_aka.replace('history.json', '.csv'))
# make_precision_recall_boxplots(precisions_train_direct_min_precision, precisions_valid_direct_min_precision, precisions_test_direct_min_precision, recalls_train_direct_min_precision, recalls_valid_direct_min_precision, recalls_test_direct_min_precision, 'lr_precision_recall_boxplot_direct_min_precision', '(Direct)')
'''
sl_bce_perturb_filename_aka = 'skorch_logistic_regression*surrogate_loss_tight*warm_start=true*history.json'
plot_all_models_training_plots(clf_models_dir, sl_bce_perturb_filename_aka, 'skorch_lr_bce_perturb')
precisions_train_bce_perturb, precisions_valid_bce_perturb, precisions_test_bce_perturb, recalls_train_bce_perturb, recalls_valid_bce_perturb, recalls_test_bce_perturb = get_all_precision_recalls(clf_models_dir, sl_bce_perturb_filename_aka.replace('history.json', '.csv'))
make_precision_recall_boxplots(precisions_train_bce_perturb, precisions_valid_bce_perturb, precisions_test_bce_perturb, recalls_train_bce_perturb, recalls_valid_bce_perturb, recalls_test_bce_perturb, 'lr_precision_recall_boxplot_bce_perturb', '(BCE + Perturbation)')
'''
bce_plus_thresh_filename_aka = 'skorch_logistic_regression*cross_entropy_loss*warm_start=false*history.json'
# plot_all_models_training_plots(clf_models_dir, bce_plus_thresh_filename_aka, 'skorch_lr_bce_plus_thresh')
precisions_train_bce_plus_thresh, precisions_valid_bce_plus_thresh, precisions_test_bce_plus_thresh, recalls_train_bce_plus_thresh, recalls_valid_bce_plus_thresh, recalls_test_bce_plus_thresh, training_files_bce_plus_thresh = get_all_precision_recalls(clf_models_dir, bce_plus_thresh_filename_aka.replace('history.json', '.csv'))
# make_precision_recall_boxplots(precisions_train_bce_plus_thresh, precisions_valid_bce_plus_thresh, precisions_test_bce_plus_thresh, recalls_train_bce_plus_thresh, recalls_valid_bce_plus_thresh, recalls_test_bce_plus_thresh, 'lr_precision_recall_boxplot_bce_plus_thresh', '(BCE + Threshold Search)')
sl_loose_filename_aka = 'skorch_logistic_regression*surrogate_loss_loose*warm_start=false*history.json'
# plot_all_models_training_plots(clf_models_dir, sl_loose_filename_aka, 'skorch_lr_sl_loose')
precisions_train_sl_loose, precisions_valid_sl_loose, precisions_test_sl_loose, recalls_train_sl_loose, recalls_valid_sl_loose, recalls_test_sl_loose, training_files_sl_loose = get_all_precision_recalls(clf_models_dir, sl_loose_filename_aka.replace('history.json', '.csv'))
# make_precision_recall_boxplots(precisions_train_sl_loose, precisions_valid_sl_loose, precisions_test_sl_loose, recalls_train_sl_loose, recalls_valid_sl_loose, recalls_test_sl_loose, 'lr_precision_recall_boxplot_sl_loose', '(Surrogate Loss Hinge Bound)')
# from IPython import embed; embed()
best_files_dict = dict()
best_perf_dict = dict()
for ii, (method, prcs_train, recs_train, prcs_valid, recs_valid, prcs_test, recs_test, tr_files) in enumerate([
('direct min precision',
precisions_train_direct_min_precision,
recalls_train_direct_min_precision,
precisions_valid_direct_min_precision,
recalls_valid_direct_min_precision,
precisions_test_direct_min_precision,
recalls_test_direct_min_precision,
training_files_direct_min_precision),
# ('incremental min precision',
# precisions_train_incremental_min_precision,
# recalls_train_incremental_min_precision,
# precisions_valid_incremental_min_precision,
# recalls_valid_incremental_min_precision,
# precisions_test_incremental_min_precision,
# recalls_test_incremental_min_precision),
# ('bce + perturbaton',
# precisions_train_bce_perturb,
# recalls_train_bce_perturb,
# precisions_valid_bce_perturb,
# recalls_valid_bce_perturb,
# precisions_test_bce_perturb,
# recalls_test_bce_perturb),
('bce + threshold search',
precisions_train_bce_plus_thresh,
recalls_train_bce_plus_thresh,
precisions_valid_bce_plus_thresh,
recalls_valid_bce_plus_thresh,
precisions_test_bce_plus_thresh,
recalls_test_bce_plus_thresh,
training_files_bce_plus_thresh),
('Surrogate Loss (Hinge Bound)',
precisions_train_sl_loose,
recalls_train_sl_loose,
precisions_valid_sl_loose,
recalls_valid_sl_loose,
precisions_test_sl_loose,
recalls_test_sl_loose,
training_files_sl_loose)
]):
min_prec_tr = 0.7
min_prec_va = 0.6
keep_inds = (prcs_train>min_prec_tr)&(prcs_valid>min_prec_va)
if keep_inds.sum()==0:
keep_inds = (prcs_train>min_prec_tr)&(prcs_valid>0.7)
fracs_above_min_precision = (keep_inds).sum()/len(prcs_train)
prcs_train = prcs_train[keep_inds]
prcs_valid = prcs_valid[keep_inds]
prcs_test = prcs_test[keep_inds]
recs_train = recs_train[keep_inds]
recs_valid = recs_valid[keep_inds]
recs_test = recs_test[keep_inds]
tr_files = np.array(tr_files)[keep_inds]
best_ind = np.argmax(recs_valid)
# max_recall = max(recs[keep_inds])
print('\nMethod - %s'%method)
print('=================================================')
print('Frac hypers achieving above %.4f on training set : %.5f'%(min_prec_tr, fracs_above_min_precision))
print('Precision on train/valid/test with best model :')
print('--------------------------------------------------')
print('Train : %.5f'%prcs_train[best_ind])
print('Valid : %.5f'%prcs_valid[best_ind])
print('Test : %.5f'%prcs_test[best_ind])
print('Recall on train/valid/test with best model :')
print('--------------------------------------------------')
print('Train : %.5f'%recs_train[best_ind])
print('Valid : %.5f'%recs_valid[best_ind])
print('Test : %.5f'%recs_test[best_ind])
print('--------------------------------------------------')
print('best training file : %s'%tr_files[best_ind])
best_files_dict[method] = tr_files[best_ind]
best_perf_dict[method] = dict()
best_perf_dict[method]['precision_train'] = prcs_train[best_ind]
best_perf_dict[method]['precision_valid'] = prcs_valid[best_ind]
best_perf_dict[method]['precision_test'] = prcs_test[best_ind]
best_perf_dict[method]['recall_train'] = recs_train[best_ind]
best_perf_dict[method]['recall_valid'] = recs_valid[best_ind]
best_perf_dict[method]['recall_test'] = recs_test[best_ind]
## get the 5th, 50th and 95th percentile of recall scores
random_seed_list = [111, 412, 5318, 90, 101, 8491, 8213, 1721, 1, 58, 892, 55, 623, 199, 1829, 902, 1992, 24, 8]
for ii, (method, best_model_fname, thr) in enumerate([
('Sigmoid bound', best_files_dict['direct min precision'], .5),
('BCE + Threshold search', best_files_dict['bce + threshold search'], .9696),
('Hinge Bound', best_files_dict['Surrogate Loss (Hinge Bound)'], .5)
]):
skorch_lr_clf = SkorchLogisticRegression(n_features=x_test.shape[1])
skorch_lr_clf.initialize()
skorch_lr_clf.load_params(f_params=os.path.join(clf_models_dir,
best_model_fname.replace('_perf.csv', 'params.pt')))
y_train_pred_probas = skorch_lr_clf.predict_proba(x_train_transformed)[:,1]
y_train_preds = y_train_pred_probas>=thr
y_test_pred_probas = skorch_lr_clf.predict_proba(x_test_transformed)[:,1]
y_test_preds = y_test_pred_probas>=thr
precisions_train_np, precisions_test_np = np.zeros(len(random_seed_list)), np.zeros(len(random_seed_list))
recalls_train_np, recalls_test_np = np.zeros(len(random_seed_list)), np.zeros(len(random_seed_list))
for k, seed in enumerate(random_seed_list):
rnd_inds_tr = random.sample(range(x_train_transformed.shape[0]), int(0.8*x_train_transformed.shape[0]))
precisions_train_np[k] = precision_score(y_train[rnd_inds_tr], y_train_preds[rnd_inds_tr])
recalls_train_np[k] = recall_score(y_train[rnd_inds_tr], y_train_preds[rnd_inds_tr])
rnd_inds_te = random.sample(range(x_test.shape[0]), int(0.8*x_test.shape[0]))
precisions_test_np[k] = precision_score(y_test[rnd_inds_te], y_test_preds[rnd_inds_te])
recalls_test_np[k] = recall_score(y_test[rnd_inds_te], y_test_preds[rnd_inds_te])
print('Method : %s'%method)
train_perf_dict = {'precision_5' : np.percentile(precisions_train_np, 5),
'precision_50' : np.percentile(precisions_train_np, 50),
'precision_95' : np.percentile(precisions_train_np, 95),
'recall_5' : np.percentile(recalls_train_np, 5),
'recall_50' : np.percentile(recalls_train_np, 50),
'recall_95' : np.percentile(recalls_train_np, 95),}
test_perf_dict = {'precision_5' : np.percentile(precisions_test_np, 5),
'precision_50' : np.percentile(precisions_test_np, 50),
'precision_95' : np.percentile(precisions_test_np, 95),
'recall_5' : np.percentile(recalls_test_np, 5),
'recall_50' : np.percentile(recalls_test_np, 50),
'recall_95' : np.percentile(recalls_test_np, 95),}
print('Training set performance : ')
print(train_perf_dict)
print('Test set performance : ')
print(test_perf_dict)
### select 1 classifier and plot its precision and recalls across many thresholds on train, valid and test
f_tr, axs_tr = plt.subplots(1, 1, figsize=(8, 8))
f_va, axs_va = plt.subplots(1, 1, figsize=(8, 8))
f_te, axs_te = plt.subplots(1, 1, figsize=(8, 8))
sns.set_context("notebook", font_scale=1.75)
sns.set_style("whitegrid")
fontsize=12
for ii, (method, best_model_fname, model_color, chosen_prec_recall_dict) in enumerate([
('Sigmoid bound', best_files_dict['direct min precision'], 'r', best_perf_dict['direct min precision']),
('BCE + Threshold search', best_files_dict['bce + threshold search'], 'b', best_perf_dict['bce + threshold search']),
('Hinge Bound', best_files_dict['Surrogate Loss (Hinge Bound)'], 'g', best_perf_dict['Surrogate Loss (Hinge Bound)'])
]):
skorch_lr_clf = SkorchLogisticRegression(n_features=x_test.shape[1])
skorch_lr_clf.initialize()
skorch_lr_clf.load_params(f_params=os.path.join(clf_models_dir,
best_model_fname.replace('_perf.csv', 'params.pt')))
y_train_pred_probas = skorch_lr_clf.predict_proba(x_train_transformed)[:,1]
y_train_preds = y_train_pred_probas>=0.5
y_valid_pred_probas = skorch_lr_clf.predict_proba(x_valid_transformed)[:,1]
y_valid_preds = y_valid_pred_probas>=0.5
y_test_pred_probas = skorch_lr_clf.predict_proba(x_test_transformed)[:,1]
y_test_preds = y_test_pred_probas>=0.5
precision_train, recall_train, thresholds_pr_train = precision_recall_curve(y_train, y_train_pred_probas)
precision_valid, recall_valid, thresholds_pr_valid = precision_recall_curve(y_valid, y_valid_pred_probas)
precision_test, recall_test, thresholds_pr_test = precision_recall_curve(y_test, y_test_pred_probas)
target_precisions = np.arange(0.1, 1, 0.01)
recalls_at_target_precisions_train, recalls_at_target_precisions_valid, recalls_at_target_precisions_test = [np.zeros(len(target_precisions)), np.zeros(len(target_precisions)), np.zeros(len(target_precisions))]
for kk, target_precision in enumerate(target_precisions):
keep_inds_tr = precision_train>=target_precision
keep_inds_va = precision_valid>=target_precision
keep_inds_te = precision_test>=target_precision
recalls_at_target_precisions_train[kk] = max(recall_train[keep_inds_tr])
recalls_at_target_precisions_valid[kk] = max(recall_valid[keep_inds_va])
recalls_at_target_precisions_test[kk] = max(recall_test[keep_inds_te])
# chosen_thresh_ind = (target_precisions>=0.599)&(target_precisions<=0.601)
for jj, (split, precs, recs, chosen_prec, chosen_rec, f, axs) in enumerate([
('train', target_precisions, recalls_at_target_precisions_train,
chosen_prec_recall_dict['precision_train'], chosen_prec_recall_dict['recall_train'], f_tr, axs_tr),
('valid', target_precisions, recalls_at_target_precisions_valid,
chosen_prec_recall_dict['precision_valid'], chosen_prec_recall_dict['recall_valid'], f_va, axs_va),
('test', target_precisions, recalls_at_target_precisions_test,
chosen_prec_recall_dict['precision_test'], chosen_prec_recall_dict['recall_test'], f_te, axs_te)]):
axs.plot(recs, precs, color=model_color, label=method, linewidth=3, zorder=1)
# axs[jj].plot(chosen_rec, chosen_prec, color=model_color, marker = 'x', markersize=8)
axs.set_ylabel('Target precision', fontsize=fontsize+2)
axs.set_title('Recalls at target precision (%s set)'%split, fontsize=fontsize+4)
xmin = -0.003
xmax = 0.35
ymin = 0.35
ymax = 1.0
xticks = np.arange(0.0, 1.0, 0.05)
xticklabels = ['%.2f'%x for x in xticks]
axs.set_xticks(xticks)
axs.set_xticklabels(xticklabels)
axs.set_xlim([xmin, xmax])
axs.set_ylim([ymin, ymax])
axs.set_xlabel('Recall at target precision', fontsize=fontsize+2)
chosen_thresh_ind = np.argmin(abs(chosen_prec - precs))
if ii==2:
axs.plot(recs[chosen_thresh_ind], precs[chosen_thresh_ind], color='k', marker='+', mew=3, markersize=25,
label='selected operating point', zorder=2)
else:
axs.plot(recs[chosen_thresh_ind], precs[chosen_thresh_ind], color='k', marker='+', mew=3, markersize=25, zorder=2)
axs.legend(fontsize=fontsize)
# f.savefig('skorch_lr_recalls_at_various_target_precisions.png', pad_inches=0)
f.savefig('skorch_lr_recalls_at_various_target_precisions_%s.pdf'%split, bbox_inches='tight', pad_inches=0)
f.savefig('skorch_lr_recalls_at_various_target_precisions_%s.png'%split, bbox_inches='tight',
pad_inches=0.5)
from IPython import embed; embed()
## plot the PR curve
f, axs = plt.subplots(2, 2, figsize=(15, 15))
sns.set_context("notebook", font_scale=1.25)
sns.set_style("whitegrid")
for ii, (method, best_model_fname, model_color) in enumerate([('Sigmoid bound',
best_files_dict['direct min precision'],
'r'),
('BCE',
best_files_dict['bce + threshold search'],
'b'),
('Hinge bound',
best_files_dict['Surrogate Loss (Hinge Bound)'],
'g')]):
skorch_lr_clf = SkorchLogisticRegression(n_features=x_test.shape[1])
skorch_lr_clf.initialize()
skorch_lr_clf.load_params(f_params=os.path.join(clf_models_dir,
best_model_fname.replace('_perf.csv', 'params.pt')))
y_valid_pred_probas = skorch_lr_clf.predict_proba(x_valid_transformed)[:,1]
y_valid_preds = y_valid_pred_probas>=0.5
y_test_pred_probas = skorch_lr_clf.predict_proba(x_test_transformed)[:,1]
y_test_preds = y_test_pred_probas>=0.5
#compute roc curve
fpr_valid, tpr_valid, thresholds_auc_valid = roc_curve(y_valid, y_valid_pred_probas)
precision_valid, recall_valid, thresholds_pr_valid = precision_recall_curve(y_valid, y_valid_pred_probas)
roc_valid = roc_auc_score(y_valid, y_valid_pred_probas)
ap_valid = average_precision_score(y_valid, y_valid_pred_probas)
fpr_test, tpr_test, thresholds_auc_test = roc_curve(y_test, y_test_pred_probas)
precision_test, recall_test, thresholds_pr_test = precision_recall_curve(y_test, y_test_pred_probas)
roc_test = roc_auc_score(y_test, y_test_pred_probas)
ap_test = average_precision_score(y_test, y_test_pred_probas)
# get the thresholds, precisions, recalls, tprs satisfying the validation precision greater than 0.8 on validation
select_inds_pr_valid = precision_valid>=0.8
select_thresholds_valid = thresholds_pr_valid[select_inds_pr_valid[:-1]]
select_precision_valid = precision_valid[select_inds_pr_valid]
select_recall_valid = recall_valid[select_inds_pr_valid]
select_inds_auc_valid = fpr_valid<=0.2
select_thresholds_auc_valid = thresholds_auc_valid[select_inds_auc_valid]
select_tpr_valid = tpr_valid[select_inds_auc_valid]
select_fpr_valid = fpr_valid[select_inds_auc_valid]
select_inds_pr_test = precision_test>=0.8
select_thresholds_test = thresholds_pr_test[select_inds_pr_test[:-1]]
select_precision_test = precision_test[select_inds_pr_test]
select_recall_test = recall_test[select_inds_pr_test]
select_inds_auc_test = fpr_test<=0.2
select_thresholds_auc_test = thresholds_auc_test[select_inds_auc_test]
select_tpr_test = tpr_test[select_inds_auc_test]
select_fpr_test = fpr_test[select_inds_auc_test]
# get the threshold, fpr, tpr of max recall with precision greater than 0.8 on validation
if method=='BCE':
best_ind_pr_valid = np.argmax(select_recall_valid)
best_precision_valid = select_precision_valid[best_ind_pr_valid]
best_recall_valid = select_recall_valid[best_ind_pr_valid]
best_threshold_valid = select_thresholds_valid[best_ind_pr_valid]
best_ind_auc_valid = np.argmax(select_tpr_valid)
best_tpr_valid = select_tpr_valid[best_ind_auc_valid]
best_fpr_valid = select_fpr_valid[best_ind_auc_valid]
best_threshold_auc_valid = select_thresholds_auc_valid[best_ind_auc_valid]
else:
keep_inds = (thresholds_pr_valid>=0.50)&(precision_valid[:-1]>=0.8)
keep_thresholds_valid = thresholds_pr_valid[keep_inds]
keep_precision_valid = precision_valid[:-1][keep_inds]
keep_recall_valid = recall_valid[:-1][keep_inds]
best_ind_pr_valid = 0
best_precision_valid = keep_precision_valid[best_ind_pr_valid]
best_recall_valid = keep_recall_valid[best_ind_pr_valid]
best_threshold_valid = keep_thresholds_valid[best_ind_pr_valid]
# keep_inds_auc = (thresholds_auc_valid>=0.50)&(fpr_valid<=0.2)
# keep_thresholds_auc_valid = thresholds_auc_valid[keep_inds_auc]
# keep_fpr_valid = fpr_valid[keep_inds_auc]
# keep_tpr_valid = tpr_valid[keep_inds_auc]
# best_ind_auc_valid = np.argmax(keep_tpr_valid)
# best_fpr_valid = keep_fpr_valid[best_ind_auc_valid]
# best_tpr_valid = keep_tpr_valid[best_ind_auc_valid]
# best_threshold_auc_valid = keep_thresholds_auc_valid[best_ind_auc_valid]
chosen_ind_auc_valid = np.argmax(select_tpr_valid)
best_tpr_valid = select_tpr_valid[chosen_ind_auc_valid]
best_fpr_valid = select_fpr_valid[chosen_ind_auc_valid]
chosen_ind_test = np.argmax(select_recall_test)
best_recall_test = select_recall_test[chosen_ind_test]
best_precision_test = select_precision_test[chosen_ind_test]
chosen_ind_auc_test = np.argmax(select_tpr_test)
best_tpr_test = select_tpr_test[chosen_ind_auc_test]
best_fpr_test = select_fpr_test[chosen_ind_auc_test]
fontsize=12
axs[0, 0].plot(fpr_valid, tpr_valid, c=model_color, label=method +' (AUROC : %.2f)'%roc_valid)
axs[0, 0].set_xlabel('False Positive Rate', fontsize=fontsize)
axs[0, 0].set_ylabel('True Positive Rate', fontsize=fontsize)
axs[0, 0].set_title('ROC (Valid)')
axs[0, 0].legend(fontsize=fontsize)
axs[0, 0].plot(select_fpr_valid, select_tpr_valid, linewidth=8, c=model_color, alpha=0.5)
if ii==2:
axs[0, 0].plot(best_fpr_valid, best_tpr_valid, 'kx', markersize=10, label='chosen threshold')
else:
axs[0, 0].plot(best_fpr_valid, best_tpr_valid, 'kx', markersize=10)
axs[1, 0].plot(fpr_test, tpr_test, c=model_color, label=method +' (AUROC : %.2f)'%roc_test)
axs[1, 0].set_xlabel('False Positive Rate', fontsize=fontsize)
axs[1, 0].set_ylabel('True Positive Rate', fontsize=fontsize)
axs[1, 0].set_title('ROC (Test)')
axs[1, 0].legend(fontsize=fontsize)
axs[1, 0].plot(select_fpr_test, select_tpr_test, linewidth=8, c=model_color, alpha=0.5)
if ii==2:
axs[1, 0].plot(best_fpr_test, best_tpr_test, 'kx', markersize=10, label='chosen threshold')
else:
axs[1, 0].plot(best_fpr_test, best_tpr_test, 'kx', markersize=10)
axs[0, 1].plot(recall_valid, precision_valid, c=model_color, label=method +' (AUPRC : %.2f)'%ap_valid)
axs[0, 1].set_xlabel('Recall', fontsize=fontsize)
axs[0, 1].set_ylabel('Precision', fontsize=fontsize)
axs[0, 1].set_title('Precision Recall Curve (Valid)')
axs[0, 1].plot(select_recall_valid, select_precision_valid, linewidth=8, c=model_color, alpha=0.5)
if ii==2:
axs[0, 1].plot(best_recall_valid, best_precision_valid, 'kx', markersize=10, label='chosen threshold')
else:
axs[0, 1].plot(best_recall_valid, best_precision_valid, 'kx', markersize=10)
axs[0, 1].legend(fontsize=fontsize)
axs[1, 1].plot(recall_test, precision_test, c=model_color, label=method +' (AUPRC : %.2f)'%ap_test)
axs[1, 1].set_xlabel('Recall', fontsize=fontsize)
axs[1, 1].set_ylabel('Precision', fontsize=fontsize)
axs[1, 1].set_title('Precision Recall Curve (Test)')
axs[1, 1].plot(select_recall_test, select_precision_test, linewidth=8, c=model_color, alpha=0.5)
if ii==2:
axs[1, 1].plot(best_recall_test, best_precision_test, 'kx', markersize=10, label='chosen threshold')
else:
axs[1, 1].plot(best_recall_test, best_precision_test, 'kx', markersize=10)
axs[1, 1].legend(fontsize=fontsize)
f.savefig('roc_prc_all_methods.pdf', bbox_inches='tight', pad_inches=0)
f.savefig('roc_prc_all_methods.png')
from IPython import embed; embed()
| null |
scripts/mimic3benchmarks_inhospital_mortality/src/evaluate_skorch_lr_surrogate_loss_vs_bce_plus_threshold_search.py
|
evaluate_skorch_lr_surrogate_loss_vs_bce_plus_threshold_search.py
|
py
| 40,298 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.sep.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.suptitle",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "seaborn.set_context",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "seaborn.set_context",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 249,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "utils.load_data_dict_json",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "utils.load_data_dict_json",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "split_dataset.Splitter",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "SkorchLogisticRegression.SkorchLogisticRegression",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 451,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_score",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.recall_score",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_score",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.recall_score",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 498,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 499,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 500,
"usage_type": "name"
},
{
"api_name": "seaborn.set_context",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "seaborn.set_style",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "SkorchLogisticRegression.SkorchLogisticRegression",
"line_number": 510,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 512,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 512,
"usage_type": "attribute"
},
{
"api_name": "sklearn.metrics.precision_recall_curve",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_recall_curve",
"line_number": 525,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_recall_curve",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 559,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 567,
"usage_type": "call"
},
{
"api_name": "IPython.embed",
"line_number": 582,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 584,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 584,
"usage_type": "name"
},
{
"api_name": "seaborn.set_context",
"line_number": 585,
"usage_type": "call"
},
{
"api_name": "seaborn.set_style",
"line_number": 586,
"usage_type": "call"
},
{
"api_name": "SkorchLogisticRegression.SkorchLogisticRegression",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 600,
"usage_type": "attribute"
},
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 610,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_recall_curve",
"line_number": 611,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 612,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.average_precision_score",
"line_number": 613,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 615,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_recall_curve",
"line_number": 616,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 617,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.average_precision_score",
"line_number": 618,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 645,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 650,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 673,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 677,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 681,
"usage_type": "call"
},
{
"api_name": "IPython.embed",
"line_number": 738,
"usage_type": "call"
}
] |
348403993
|
import discord
from discord.ext import commands
class General(commands.Cog):
def __init__(self,bot):
self.bot = bot
@commands.command()
async def role(self, ctx):
import random
roles = ['Damage','Tank','Support']
output = random.choice(roles)
await ctx.send(output)
def setup(bot):
bot.add_cog(General(bot))
| null |
cogs/general/__init__.py
|
__init__.py
|
py
| 366 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "discord.ext.commands.Cog",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 8,
"usage_type": "name"
}
] |
297732434
|
from list_ui import *
import json
import random
import cv2
import json
import os
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
dic={}
files=[]
is_useful = {}
alpha = 0.4
sli_min,sli_max = 0, 10
all_files = []
key=""
sample = []
com_lists = []
curr_box = ["001","nm"]
check_path = "checklists/is_useful.json"
def __init__(self, *args, **kwargs):
QtWidgets.QMainWindow.__init__(self, *args, **kwargs)
self.setupUi(self)
MainWindow.setWindowTitle(self,"Mask Viewer")
#self.listWidget.currentItemChanged.connect(self.clicked)
self.dic = self.read_json('folder_list_cat.json')
self.update_lists()
self.update_combo()
self.subBox.currentIndexChanged.connect(self.on_combo_changed)
self.typeBox.currentIndexChanged.connect(self.on_combo_changed)
self.model = QtGui.QStandardItemModel(self.listView)
self.listView.setModel(self.model)
self.saveButton.clicked.connect(self.save_list)
self.alphaSlider.valueChanged.connect(self.on_slider_changed)
self.alphaSlider.setValue(self.alpha*self.sli_max)
self.alphaLabel.setText(str(self.alpha))
self.randomButton.clicked.connect(self.randomize)
self.update_curr_box()
self.update_files()
self.all_files = self.files
self.read_list()
def val2alpha(self,val):
return val/self.sli_max
def on_slider_changed(self,value):
self.alpha = self.val2alpha(value)
self.alphaLabel.setText(str(self.alpha))
self.show_sample()
def on_check_changed(self,item):
current_key = item.index().data()
self.is_useful[current_key] = item.checkState()
# self.is_useful[key] = item.checkState()
def on_list_changed(self, current, previous):
if(current.data()!=None):
self.key = current.data()
self.create_sample()
self.show_sample()
def create_sample(self, sample_size=9):
keys = self.key.split("-")
num_files =len(self.dic[keys[0]][keys[1]][keys[2]][keys[3]])
if num_files <9:
sample_size = num_files
self.sample = random.sample(self.dic[keys[0]][keys[1]][keys[2]][keys[3]], sample_size)
def show_sample(self):
self.clearLayout(self.gridLayout_2)
labels=[]
c=0
for img_name in self.sample:
i_label = QtWidgets.QLabel()
# i_label.setText(img_name)
sil_path = "../../datasets/casia_B1_silhouettes/{}-{}.png".format(self.key, img_name)
img_path = "../../datasets/casia_B1_images/{}-{}.jpg".format(self.key, img_name)
img = cv2.imread(img_path)
sil = cv2.imread(sil_path)
beta = (1.0-self.alpha)
new_img = cv2.addWeighted(img, self.alpha, sil, beta, 0.0)
#cv2 image converted to qt image
qtimg = QtGui.QImage(new_img.data, img.shape[1], img.shape[0],QtGui.QImage.Format_RGB888)
pixmap = QtGui.QPixmap(QtGui.QPixmap.fromImage(qtimg))
i_label.setPixmap(pixmap)
labels.append(i_label)
self.gridLayout_2.addWidget(i_label, c//3, c%3, 1, 1)
c+=1
def read_json(self,path):
with open(path) as f:
return json.load(f)
def update_lists(self):
self.com_lists.append(list(self.dic.keys()))
self.com_lists.append(list(self.dic[self.curr_box[0]].keys()))
for c_list in self.com_lists:
c_list.sort()
def update_combo(self):
self.subBox.addItem("All")
self.typeBox.addItem("All")
self.subBox.addItems(self.com_lists[0])
self.typeBox.addItems(self.com_lists[1])
def on_combo_changed(self):
self.update_curr_box()
self.update_files()
self.write_chechables()
def update_curr_box(self):
sub = []
if(self.subBox.currentText() == "All"):
sub = self.com_lists[0]
else:
sub.append(self.subBox.currentText())
typ = []
if(self.typeBox.currentText() == "All"):
typ = self.com_lists[1]
else:
typ.append(self.typeBox.currentText())
self.curr_box = [sub,typ]
# self.check_path = 'checklists/{}-{}.json'.format(self.subBox.currentText(),self.typeBox.currentText())
# self.write_chechables()
def update_files(self):
self.files = []
for sub in self.curr_box[0]:
for typ in self.curr_box[1]:
tns = list(self.dic[sub][typ].keys())
tns.sort()
for tn in tns:
angles = list(self.dic[sub][typ][tn].keys())
angles.sort()
for angle in angles:
self.files.append("{}-{}-{}-{}".format(sub,typ,tn,angle))
self.list_files()
self.totalFilesLabel.setText("N° of folders: {}".format(len(self.files)))
def list_files(self):
#Clear previous list
self.model.removeRows( 0, self.model.rowCount() )
#Create new list
self.model.itemChanged.connect(self.on_check_changed)
self.selModel = self.listView.selectionModel()
self.selModel.currentChanged.connect(self.on_list_changed)
for f in self.files:
# Create an item with a caption
item = QtGui.QStandardItem(f)
# Add a checkbox to it
item.setCheckable(True)
item.setUserTristate(True)
# Add the item to the model
self.model.appendRow(item)
def write_chechables(self):
for i in range(self.model.rowCount()):
key = self.model.item(i).text()
value = self.is_useful[key]
self.model.item(i).setCheckState(value)
def clearLayout(self,layout):
for i in reversed(range(layout.count())):
layout.itemAt(i).widget().setParent(None)
def init_useful_dic(self):
self.is_useful = {}
for key in self.all_files:
self.is_useful[key] = 0
def save_list(self):
with open(self.check_path, 'w') as f:
json.dump(self.is_useful, f)
msg = QtWidgets.QMessageBox()
msg.setText("Lista guardada correctamente")
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.exec_()
def read_list(self):
# print(self.check_path, os.path.exists(self.check_path))
if os.path.exists(self.check_path):
with open(self.check_path) as f:
self.is_useful = json.load(f)
self.write_chechables()
else:
self.init_useful_dic()
def randomize(self):
self.create_sample()
self.show_sample()
if __name__ == "__main__":
app = QtWidgets.QApplication([])
window = MainWindow()
window.show()
app.exec_()
| null |
qt_files/mul_images/listv3.py
|
listv3.py
|
py
| 7,178 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "random.sample",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "cv2.addWeighted",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 222,
"usage_type": "call"
}
] |
106091764
|
import unittest
import json
from ..server import app
class TestServer(unittest.TestCase):
def setUp(self):
app.config["TESTING"] = True
app.config["DEBUG"] = True
self.app = app.test_client()
self.assertEqual(app.debug, True)
def test_home_root(self):
response = self.app.get('/', follow_redirects = False)
self.assertEqual(response.status_code, 200)
def test_add_root(self):
response = self.app.post('/add', data= json.dumps({'description':'ecrire scenarios python 3', 'status': 'ToDo'}),
content_type='application/json')
self.assertEqual(response.status_code, 200)
| null |
ToDo/Tests/test_server.py
|
test_server.py
|
py
| 681 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "server.app.config",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "server.app",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "server.app.config",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "server.app",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "server.app.test_client",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "server.app",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "server.app.debug",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "server.app",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 18,
"usage_type": "call"
}
] |
79873548
|
import dash
import dash_leaflet as dl
import dash_leaflet.express as dlx
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import dash_table
import dash_bootstrap_components as dbc
import pandas as pd
from dash.exceptions import PreventUpdate
from dash.dependencies import Input, Output
from animal_shelter import AnimalShelter # required module for MongoDB operations
# Data Manipulation / Model ############################################################################################
# Username and password for needed to access animal shelter database
username = "aacuser"
password = "password"
shelter = AnimalShelter(username, password)
# Create dataframe from MongoDB database
df = pd.DataFrame.from_records(shelter.read({}))
# Get animal properties for dropdown menu options
genders = list(sorted(df['sex_upon_outcome'].unique()))
types = list(sorted(df['animal_type'].unique()))
breeds = list(sorted(df['breed'].unique()))
gender_options = [{'label': str(o), 'value': str(o)} for o in genders]
type_options = [{'label': str(o), 'value': str(o)} for o in types]
breed_options = [{'label': str(o), 'value': str(o)} for o in breeds]
# Define age range slider limits
age_range_max = int(df['age_upon_outcome_in_weeks'].max())
# Appearance settings
pie_chart_text_color = 'white'
table_background_color = '#333'
table_title_color = '#444'
table_outline_color = '#000'
# Dashboard ############################################################################################################
# Start Dash application
app = dash.Dash(__name__, prevent_initial_callbacks=True, external_stylesheets=[dbc.themes.DARKLY])
# Define dashboard layout
app.layout = html.Div([
# Title
html.Center(html.P(html.H2('Animal Shelter Dashboard'))),
# Controls
html.Div([
# Reset button to clear menu selections
html.Div([
html.Button('Reset', id='reset-button', n_clicks=0),
], style={'width': '50px',
'margin-top': '24px',
'margin-left': '10px',
'margin-right': '10px',
'verticalAlign': 'top',
'display': 'inline-block'}
),
# Animal type dropdown
html.Label([
"Animal Type:",
dcc.Dropdown(
id="types-dropdown",
options=type_options,
placeholder="Select a type",
searchable=False,
)
], style={'width': '15vw',
'margin-left': '10px',
'verticalAlign': 'top',
'display': 'inline-block'}
),
# Animal breed dropdown
html.Label([
"Animal Breed:",
dcc.Dropdown(
id="breeds-dropdown",
options=breed_options,
placeholder="Select a breed",
)
], style={'width': '25vw',
'margin-left': '10px',
'verticalAlign': 'top',
'display': 'inline-block'}
),
# Animal gender dropdown
html.Label([
"Animal Gender:",
dcc.Dropdown(
id="genders-dropdown",
options=gender_options,
placeholder="Select a Gender",
)
], style={'width': '15vw',
'margin-left': '10px',
'verticalAlign': 'top',
'display': 'inline-block'}
),
# Animal age range slider
html.Div([
html.Center(id='slider-text',
style={'margin-bottom': 10},
children=['Age Range: 0 to {max_age} weeks'.format(max_age=int(age_range_max))]
),
dcc.RangeSlider(
id='age-range-slider',
min=0,
max=age_range_max,
value=[0, age_range_max],
step=1,
updatemode='mouseup',
allowCross=False,
),
], style={'width': '30vw',
'margin-left': '10px',
'verticalAlign': 'top',
'display': 'inline-block'}
),
]),
# Data table
dash_table.DataTable(
id='datatable-id',
columns=[{"name": i, "id": i, "deletable": False, "selectable": True} for i in df.columns],
style_header={'backgroundColor': table_title_color},
style_cell={
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'maxWidth': 0, # Adjust cell width so data fits on screen
'backgroundColor': table_background_color,
'border': '1px solid ' + table_outline_color
},
data=df.to_dict('records'),
editable=False, # Prevent column-level editing
filter_action="native", # Enable UI filtering
sort_action="native", # Allow columns to be sorted
sort_mode="multi", # Enable multi-column sorting
column_selectable=False, # Prevent columns from being selected
row_selectable="single", # Enable single-row selection
row_deletable=False, # Prevent rows from being deleted
selected_columns=[], # Indices of the selected columns in table
selected_rows=[], # Indices of the selected rows in table
page_action="native", # Paging logic is handled by the table
page_current=0, # Define start page
page_size=10, # Define number of rows per page
style_table={'overflowY': 'auto', 'height': '365px'}
),
html.Br(),
html.Hr(),
# Map and pie chart
html.Div(
style={'display': 'flex'},
children=[
html.Div(id="map-id", style={'display': 'inline-block'}),
html.Div(id="graph-id", style={'display': 'inline-block'})
]),
],
style={'margin': '10px'} # Create border around page
)
# Callbacks ############################################################################################################
# Callback to update text above age range slider
@app.callback(
Output('slider-text', 'children'),
[Input('age-range-slider', 'drag_value')]
)
def update_output(value):
# Update text above age range slider to show min and max values
return 'Age Range: {min_age} to {max_age} weeks'.format(min_age=value[0], max_age=value[1])
# Callback to reset all dropdown selections
@app.callback(
[Output('types-dropdown', 'value'),
Output('breeds-dropdown', 'value'),
Output('genders-dropdown', 'value')],
[Input('reset-button', 'n_clicks')]
)
def update_dropdowns(reset):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
# Clear menus selections when reset is clicked
if 'reset-button' in changed_id:
return "", "", ""
# Callback to update the table when a menu selection is made
@app.callback(
[Output('datatable-id', 'data'),
Output('datatable-id', 'columns'),
Output('datatable-id', 'selected_rows')],
[Input('genders-dropdown', 'value'),
Input('types-dropdown', 'value'),
Input('breeds-dropdown', 'value'),
Input('age-range-slider', 'value')]
)
def update_dashboard(genders_dropdown, types_dropdown, breeds_dropdown, age_range):
# Define age range slider limits and prevent range settings where no animals appear
age_min = max(age_range[0], 0) # Ensure age_min is positive
age_max = max(age_range[1] + 1, age_min + 1) # Ensure age_max is 1 greater than age_min
# Conditional block that determines which query to execute based on dropdown choices
if genders_dropdown and types_dropdown and breeds_dropdown:
dff = pd.DataFrame.from_records(shelter.read({"sex_upon_outcome": genders_dropdown,
"animal_type": types_dropdown,
"breed": breeds_dropdown,
"age_upon_outcome_in_weeks": {"$gte": age_min, "$lte": age_max}}))
elif genders_dropdown and types_dropdown and not breeds_dropdown:
dff = pd.DataFrame.from_records(shelter.read({"sex_upon_outcome": genders_dropdown,
"animal_type": types_dropdown,
"age_upon_outcome_in_weeks": {"$gte": age_min, "$lte": age_max}}))
elif genders_dropdown and not types_dropdown and breeds_dropdown:
dff = pd.DataFrame.from_records(shelter.read({"sex_upon_outcome": genders_dropdown,
"breed": breeds_dropdown,
"age_upon_outcome_in_weeks": {"$gte": age_min, "$lte": age_max}}))
elif genders_dropdown and not types_dropdown and not breeds_dropdown:
dff = pd.DataFrame.from_records(shelter.read({"sex_upon_outcome": genders_dropdown,
"age_upon_outcome_in_weeks": {"$gte": age_min, "$lte": age_max}}))
elif not genders_dropdown and types_dropdown and breeds_dropdown:
dff = pd.DataFrame.from_records(shelter.read({"animal_type": types_dropdown,
"breed": breeds_dropdown,
"age_upon_outcome_in_weeks": {"$gte": age_min, "$lte": age_max}}))
elif not genders_dropdown and types_dropdown and not breeds_dropdown:
dff = pd.DataFrame.from_records(shelter.read({"animal_type": types_dropdown,
"age_upon_outcome_in_weeks": {"$gte": age_min, "$lte": age_max}}))
elif not genders_dropdown and not types_dropdown and breeds_dropdown:
dff = pd.DataFrame.from_records(shelter.read({"breed": breeds_dropdown,
"age_upon_outcome_in_weeks": {"$gte": age_min, "$lte": age_max}}))
elif not genders_dropdown and not types_dropdown and not breeds_dropdown:
dff = pd.DataFrame.from_records(shelter.read({"age_upon_outcome_in_weeks": {"$gte": age_min, "$lte": age_max}}))
# If the reset button is selected
else:
dff = pd.DataFrame.from_records(shelter.read(
{"age_upon_outcome_in_weeks": {"$gte": age_min, "$lte": age_max}}))
# Table column labels
columns = [{"name": i, "id": i, "deletable": False, "selectable": True} for i in df.columns]
# If there are no matches to selection, use empty data
if dff.empty:
dff = pd.DataFrame(columns=df.columns)
# Convert dataframe to dictionary to display in table
data = dff.to_dict('records')
# Return data, columns, clear selected rows, set page number to 0
return data, columns, []
# Callback to update other dropdowns and slider when a selection is made
@app.callback(
[Output('types-dropdown', 'options'),
Output('breeds-dropdown', 'options'),
Output('genders-dropdown', 'options'),
Output('age-range-slider', 'min'),
Output('age-range-slider', 'max'),
Output('age-range-slider', 'value'),
Output('datatable-id', "page_current")],
[Input('types-dropdown', 'value'),
Input('breeds-dropdown', 'value'),
Input('genders-dropdown', 'value')]
)
def update_dropdowns(animal_type, animal_breed, animal_gender):
# Animal type selected
if animal_type and animal_breed and animal_gender:
dff = pd.DataFrame.from_records(shelter.read({"animal_type": animal_type, "breed": animal_breed, "sex_upon_outcome": animal_gender}))
elif animal_type and animal_breed and not animal_gender:
dff = pd.DataFrame.from_records(shelter.read({"animal_type": animal_type, "breed": animal_breed}))
elif animal_type and not animal_breed and animal_gender:
dff = pd.DataFrame.from_records(shelter.read({"animal_type": animal_type, "sex_upon_outcome": animal_gender}))
elif animal_type and not animal_breed and not animal_gender:
dff = pd.DataFrame.from_records(shelter.read({"animal_type": animal_type}))
# Breed selected
elif not animal_type and animal_breed and animal_gender:
dff = pd.DataFrame.from_records(shelter.read({"breed": animal_breed, "sex_upon_outcome": animal_gender}))
elif not animal_type and animal_breed and not animal_gender:
dff = pd.DataFrame.from_records(shelter.read({"breed": animal_breed}))
# Gender selected
elif not animal_type and not animal_breed and animal_gender:
dff = pd.DataFrame.from_records(shelter.read({"sex_upon_outcome": animal_gender}))
# Nothing selected
elif not animal_type and not animal_breed and not animal_gender:
dff = df
# Define age range slider limits
age_min = max(int(dff['age_upon_outcome_in_weeks'].min()), 0) # Ensure age_min is positive
age_max = max(int(dff['age_upon_outcome_in_weeks'].max()), age_min) # Ensure age_max is >= age_min
# Define menu options by sorting the unique values for each category
selected_type = list(sorted(dff['animal_type'].unique()))
selected_breeds = list(sorted(dff['breed'].unique()))
selected_genders = list(sorted(dff['sex_upon_outcome'].unique()))
selected_type_options = [{'label': str(o), 'value': str(o)} for o in selected_type]
selected_breed_options = [{'label': str(o), 'value': str(o)} for o in selected_breeds]
selected_gender_options = [{'label': str(o), 'value': str(o)} for o in selected_genders]
# Return 0 for page number to reset table page when a selection is made
return selected_type_options, selected_breed_options, selected_gender_options, age_min, age_max, [age_min, age_max], 0
# Callback to create a pie chart that displays the percentage of each breed shown in the current table view
@app.callback(
Output('graph-id', "children"),
Input('datatable-id', "derived_viewport_data")
)
def update_graphs(view_data):
if view_data:
# If the table data is empty do not update chart
if len(view_data) == 0:
return
# Use table data in chart
dff = pd.DataFrame.from_dict(view_data) # Create dataframe from table data
dfb = dff['breed'] # Get breeds from current table page
percent = dfb.value_counts(normalize=True).mul(100).round(2) # Compute percentage of each breed in data
labels = percent.index.tolist() # Get labels for pie chart
values = percent.values.tolist() # Get percentages for pie chart
# Keep label length fixed and set label font to courier to prevent pie chart from shifting around
new_labels = ["{:<40}".format(label[:40]) for label in labels]
# Create plotly express pie chart
fig = px.pie(dfb, values=values, names=new_labels, hole=.4)
# Update title and labels
fig.update_layout({'title': {'text': 'Breeds',
'x': 0.305, 'xanchor': 'center', # Center chart title horizontally
'y': 0.540, 'yanchor': 'top'}, # Center chart title vertically
'font': {'color': pie_chart_text_color}, # Text color
'paper_bgcolor': 'rgba(0, 0, 0, 0)', # Make background transparent
'font_family': 'Courier New'}) # Use courier font to prevent chart from shifting
# Return pie chart definition
return [
dcc.Graph(
figure=fig
)
]
# Callback to create a map showing the positions of the animals
@app.callback(
Output('map-id', 'children'),
[Input('datatable-id', "derived_viewport_data"),
Input('datatable-id', "derived_viewport_selected_rows")]
)
def update_map(data, selected_rows):
# If table is empty do not draw map
if len(data) == 0:
return
# If a table row is selected, show the location of the animal on the map
# otherwise show the locations of all animals in the current table view
if selected_rows:
# Create Pandas dataframe from selected animal data
dff = pd.DataFrame.from_dict(data).iloc[selected_rows]
else:
# Create Pandas dataframe from current table data
dff = pd.DataFrame.from_dict(data)
# Load animal latitude and longitude
lats = dff['location_lat'].to_list()
lons = dff['location_long'].to_list()
# Load animal information
animal_names = dff['name'].to_list()
animal_types = dff['animal_type'].to_list()
animal_breeds = dff['breed'].to_list()
animal_ages = dff['age_upon_outcome_in_weeks'].to_list()
animal_sexes = dff['sex_upon_outcome'].to_list()
# Generate markers for each animal
markers = []
for i in range(len(lats)):
# Add a maker definition for each animal
markers += [
dict(lat=lats[i],
lon=lons[i],
tooltip=animal_names[i] if animal_names[i] else "Unnamed",
# if the animal name empty replace it with "Unnamed"
popup="<body><h4>Name: " + str(animal_names[i] if animal_names[i] else "Unnamed") + "</h4>"
+ "Type: " + str(animal_types[i]) + "<br>"
+ "Breed: " + str(animal_breeds[i]) + "<br>"
+ "Age: " + str(int(animal_ages[i])) + " weeks<br>"
+ "Sex: " + str(animal_sexes[i]) + "</body>"
)
]
# Convert markers to geojson format
geojson_markers = dlx.dicts_to_geojson(markers)
# Return map with markers
return [
dl.Map(style={'width': '50vw', 'height': '480px'}, # make width 50% and height 45%
center=[0.5*(max(lats)+min(lats)), 0.5*(max(lons)+min(lons))], # center map within markers
zoom=9, # zoom level smaller=closer
children=[
dl.TileLayer(id="base-layer-id"),
dl.GeoJSON(data=geojson_markers)]
)
]
if __name__ == '__main__':
app.run_server(debug=False)
| null |
web_dashboard.py
|
web_dashboard.py
|
py
| 18,615 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "animal_shelter.AnimalShelter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "dash.Dash",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.themes",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "dash_html_components.Div",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Center",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "dash_html_components.P",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H2",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Button",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Dropdown",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Dropdown",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Dropdown",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Center",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "dash_core_components.RangeSlider",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "dash_table.DataTable",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Br",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Hr",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "dash.callback_context",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 249,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 291,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 295,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 303,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 307,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 342,
"usage_type": "attribute"
},
{
"api_name": "plotly.express.pie",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 386,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 389,
"usage_type": "attribute"
},
{
"api_name": "dash_leaflet.express.dicts_to_geojson",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "dash_leaflet.express",
"line_number": 420,
"usage_type": "name"
},
{
"api_name": "dash_leaflet.Map",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "dash_leaflet.TileLayer",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "dash_leaflet.GeoJSON",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 374,
"usage_type": "call"
}
] |
475914742
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import KFold, cross_val_score, cross_val_predict, LeaveOneOut
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import preprocessing
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.datasets import make_hastie_10_2
from sklearn.ensemble import GradientBoostingClassifier
import csv
# import pydot
# Reading data points -- input
x = []
with open('/Users/samimac2/Desktop/PythonDataFiles/Top15NoheaderNoZero.csv') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader: # Reading each row
data_point = []
for column in row: # Reading each column of the row
data_point.append(float(column))
x.append(data_point)
x = np.array(x)
print(x)
# Reading results -- response
y = []
# with open('/Users/samimac2/Desktop/PythonProject/testResults.csv') as csvfile:
with open('/Users/samimac2/Desktop/PythonProject/testResults.csv') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
y.append([float(row[0])])
y = np.array(y)
print(y)
print("Number of data points: ", len(y))
# normalize the data attributes
# normalized_X = preprocessing.normalize(x)
# standardize the data attributes
# standardized_X = preprocessing.scale(x)
# Scorer (function to calculate the score)
def scorer(estimator, X, y):
estimator.fit(X, y)
predictions = estimator.predict(X)
score = ((predictions - y) ** 2).sum() # it should return one value
return score
# Defining the model to use
# decisionTree = DecisionTreeRegressor(criterion='mse', splitter='best', max_depth=2, random_state=3)
decisionTree = DecisionTreeClassifier(criterion='entropy',splitter='best', max_depth=2,random_state=3)
decisionTree = RandomForestClassifier(max_features="log2")
# decisionTree = RandomForestClassifier(n_estimators=10, max_depth=None,
# min_samples_split=1, random_state=0)
# decisionTree = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,
# max_depth=1, random_state=0).fit(x, y)
# decisionTree = RandomForestClassifier(random_state=1)
print(decisionTree)
# Defining the cross validation generator
# cv = KFold(len(y), n_folds=2, shuffle=False)
cv = KFold(len(y), n_folds=2, shuffle=True)
# Another option for cross validation:
cv2 = LeaveOneOut(len(y)) # only needs the number of points
# Calculating scores for the model
scores = cross_val_score(decisionTree, x, y, cv=cv)
print("SCORES: ", scores)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# dot_data = StringIO()
# tree.export_graphviz(decisionTree, out_file=dot_data)
# graph = pydot.graph_from_dot_data(dot_data.getvalue())
# graph.write_svg("iris.svg")
# Value of the output when it was in the test set
estimated_results = cross_val_predict(decisionTree, x, y, cv=cv)
print("PREDICTED VALUES:", estimated_results)
# model = DecisionTreeClassifier(splitter='best', max_depth=2, random_state=3)
# model.fit(x, y)
# print(model)
# # make predictions
decisionTree.fit(x,y)
predicted = decisionTree.predict(x)
expected = y
# summarize the fit of the model
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
# from sklearn.externals.six import StringIO
# from sklearn import tree
| null |
MachineLearning_Python/DecisionTreeNoEntropy_v1.py
|
DecisionTreeNoEntropy_v1.py
|
py
| 3,455 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "csv.reader",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "sklearn.cross_validation.KFold",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "sklearn.cross_validation.LeaveOneOut",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "sklearn.cross_validation.cross_val_score",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "sklearn.cross_validation.cross_val_predict",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 103,
"usage_type": "name"
}
] |
574168276
|
# -*- coding: utf-8 -*-
"""Utilities."""
from six.moves import input as raw_input
import os
import sys
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
_global_config = """
[DodoCommands]
projects_dir=~/projects
python_interpreter=python
diff_tool=diff
"""
def create_global_config():
"""Create config file and default_commands dir."""
base_dir = os.path.expanduser('~/.dodo_commands')
if not os.path.exists(base_dir):
os.mkdir(base_dir)
config_filename = os.path.join(base_dir, "config")
if not os.path.exists(config_filename):
with open(config_filename, 'w') as f:
f.write(_global_config)
default_commands_dir = os.path.join(base_dir, "default_commands")
if not os.path.exists(default_commands_dir):
os.mkdir(default_commands_dir)
init_py = os.path.join(default_commands_dir, "__init__.py")
if not os.path.exists(init_py):
with open(init_py, 'w') as f:
pass
def remove_trailing_dashes(args):
"""Removes first -- item from args."""
return args[1:] if args[:1] == ['--'] else args
def bordered(text):
lines = text.splitlines()
width = max(len(s) for s in lines)
res = ['┌' + '─' * width + '┐']
for s in lines:
res.append('│' + (s + ' ' * width)[:width] + '│')
res.append('└' + '─' * width + '┘')
return '\n'.join(res) + '\n'
| null |
dodo_commands/framework/util.py
|
util.py
|
py
| 2,457 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.stdout.write",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "six.moves.input",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.path.expanduser",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 66,
"usage_type": "attribute"
}
] |
218579025
|
import string
import numpy as np
import cv2
import math
def get_vocabulary(voc_type, EOS='EOS', PADDING='PAD', UNKNOWN='UNK'):
voc = None
types = ['LOWERCASE', 'ALLCASES', 'ALLCASES_SYMBOLS']
if voc_type == 'LOWERCASE':
voc = list(string.digits + string.ascii_lowercase)
elif voc_type == 'ALLCASES':
voc = list(string.digits + string.ascii_letters)
elif voc_type == 'ALLCASES_SYMBOLS':
voc = list(string.printable[:-6])
else:
raise KeyError('voc_type must be one of "LOWERCASE", "ALLCASES", "ALLCASES_SYMBOLS"')
# update the voc with specifical chars
voc.append(EOS)
voc.append(PADDING)
voc.append(UNKNOWN)
char2id = dict(zip(voc, range(len(voc))))
id2char = dict(zip(range(len(voc)), voc))
return voc, char2id, id2char
def rotate_img(img, angle, scale=1):
H, W, _ = img.shape
rangle = np.deg2rad(angle) # angle in radians
new_width = (abs(np.sin(rangle) * H) + abs(np.cos(rangle) * W)) * scale
new_height = (abs(np.cos(rangle) * H) + abs(np.sin(rangle) * W)) * scale
rot_mat = cv2.getRotationMatrix2D((new_width * 0.5, new_height * 0.5), angle, scale)
rot_move = np.dot(rot_mat, np.array([(new_width - W) * 0.5, (new_height - H) * 0.5, 0]))
rot_mat[0, 2] += rot_move[0]
rot_mat[1, 2] += rot_move[1]
rot_img = cv2.warpAffine(img, rot_mat, (int(math.ceil(new_width)), int(math.ceil(new_height))),
flags=cv2.INTER_LANCZOS4)
return rot_img
| null |
data_provider/data_utils.py
|
data_utils.py
|
py
| 1,541 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "string.digits",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "string.ascii_lowercase",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "string.digits",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "string.ascii_letters",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "string.printable",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.deg2rad",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.getRotationMatrix2D",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.warpAffine",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_LANCZOS4",
"line_number": 41,
"usage_type": "attribute"
}
] |
356384684
|
import os, sys, threading
from pyspark import SparkConf, SparkContext, SQLContext
from pyspark.sql import SparkSession, DataFrame
conf = SparkConf().setAppName('partitioning')
sc = SparkContext(conf=conf)
spark = (SparkSession.builder.config(conf=conf)
# .config("hive.exec.dynamic.partition", "true") \
# .config("hive.exec.dynamic.partition.mode", "nonstrict")
.enableHiveSupport()
.getOrCreate())
sql_context = SQLContext(sc)
# def install_and_import(packages):
# for package_name in packages:
# try:
# __import__(package_name)
# except ImportError:
# import os
# os.system('pip-2.7 install --user --upgrade ' + package_name)
# # sc.install_pypi_package(package_name)
# exec(package_name + ' =__import__(package_name)')
# packages = ['boto3', 'pandas', 'numpy']
# install_and_import(packages)
try: import boto3
except ImportError:
os.system('pip-2.7 install --user --upgrade boto3')
import boto3
try: import pandas
except ImportError:
os.system('pip-2.7 install --user --upgrade pandas')
import pandas
try: import numpy
except ImportError:
os.system('pip-2.7 install --user --upgrade numpy')
import numpy
print(sys.version_info)
s3 = boto3.resource('s3')
bucket_name = 'gbsc-aws-project-annohive-dev-user-krferrit-us-west-1'
prefix = '1000Orig-half2-parquet'
print('Loading data')
df = spark.read.load('s3n://%s/%s/*' % (bucket_name, prefix))
print('Loaded %d rows from s3' % df.count())
print('Converting POS to int')
df = df.withColumn('POS', df['POS'].cast('int'))
df = df.orderBy('POS')
print(df)
print('Bucketing and writing dataframe')
def get_max_pos(df):
return int(df.selectExpr('max(POS) as m').collect()[0].asDict()['m'])
def partition_and_submit_reference(global_df, reference_name, pos_bin_count=4000):
print('partition_and_submit_reference, reference_name=' + reference_name)
filtered_df = global_df.filter(global_df['reference_name'] == str(reference_name))
print('partition reference_name=%s count=%d' % (
reference_name,
filtered_df.count()
))
# add binning column
# get max pos
global_max_pos = get_max_pos(global_df)
bin_count = int(float(global_max_pos) / pos_bin_count)
print('global_max_pos=%d, bin_count=%d' % (global_max_pos, bin_count))
filtered_df = filtered_df.withColumn('POS_BIN_ID', (filtered_df.POS / bin_count).cast('int'))
# output_path = 's3a://{bucket}/{prefix}/POS_BIN_ID={binID}/' % ()
filtered_df.repartition('POS_BIN_ID', 'reference_name') \
.write.mode('overwrite') \
.partitionBy('POS_BIN_ID', 'reference_name') \
.parquet('s3n://' + bucket_name + '/1000Orig-half2-bucketed-4000/')
print('finished writing parquet')
references = [str(c) for c in range(1, 23)] + ['X', 'Y']
# references = ['1']
def print_counts(global_df):
print('global_df count=%d' % global_df.count())
for reference_name in references:
filtered_df = global_df.filter('REF = "%s"' % str(reference_name))
print('REF=%s, count=%d' % (reference_name, filtered_df.count()))
# For simple test of connectivity
# print_counts(df)
# t_list = []
for ref_name in references:
partition_and_submit_reference(df, ref_name, 4000)
# Turned off threading for now, just do one reference at a time
# t = threading.Thread(
# target=partition_and_submit_reference,
# args=(df, ref_name, 4000))
# t.start()
# t_list.append(t)
# for t in t_list:
# t.join()
| null |
scripts/partition.py
|
partition.py
|
py
| 3,578 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyspark.SparkConf",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder.config",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "pyspark.SQLContext",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sys.version_info",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "boto3.resource",
"line_number": 43,
"usage_type": "call"
}
] |
561675358
|
#!/usr/bin/env python
import numpy as np
import random
from ddpg_model import Actor, Critic
import torch
import torch.nn.functional as F
import torch.optim as optim
from ounoise import OUNoise
# Hyperparameters
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 300 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR_ACTOR = 1e-4 # learning rate of the actor
LR_CRITIC = 5e-4 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size=8, action_size=2, num_agents=2, noise_theta=0, noise_sigma=0, noise_decay_rate=1, random_seed=10):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
num_agents (int): The number of agents sharing the common replay buffer
noise_theta (float): The parameter theta in the Ornstein–Uhlenbeck process
noise_sigma (float): The parameter sigma in the Ornstein–Uhlenbeck process
noise_decay_rate (float): The decay rate in the Ornstein–Uhlenbeck process
cuda (bool): If True, try to use the GPU
"""
self.state_size = state_size
self.action_size = action_size
# self.seed = random.seed(random_seed)
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size * num_agents, action_size * num_agents, random_seed).to(device)
self.critic_target = Critic(state_size * num_agents, action_size * num_agents, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Noise process
self.ounoise = OUNoise(action_size, True, 0., noise_theta, noise_sigma, noise_decay_rate)
def decide(self, states, use_target=False, as_tensor=False, add_noise=True, autograd=False):
"""Returns actions for given states as per current policy.
Parameters
==========
states (np.Ndarray or torch.Tensor): The states that the actor will evaluate
use_target (bool): Use the target actor network if True, else use the local actor network
as_tensor (bool): Return actions as a tensor if True, else return as numpy array
add_noise (bool): Add noise from the OU process to the actor's output
autograd (bool): Activate autograd when evaluating the states
"""
# Check input type
if isinstance(states, np.ndarray):
states = torch.from_numpy(states).float().to(device)
# Select appropiate network
if use_target:
network = self.actor_target
else:
network = self.actor_local
# To autograd or not to autograd, that is the question
if autograd:
actions = network(states)
else:
network.eval()
with torch.no_grad():
actions = network(states)
network.train()
# Noise
if add_noise:
actions = actions + self.ounoise.sample()
# Clipping & casting
if as_tensor:
actions = torch.clamp(actions, -1, 1)
else:
actions = np.clip(actions.cpu().data.numpy(), -1, 1)
return actions
def learn(self, experiences, next_actions, current_actions, agent_number):
"""Update actor and critics using the sampled experiences and the updated actions.
Params
======
experiences (Tuple of (states, actions, rewards, next_states, dones)):
The experiences sampled from the replay buffer. Each tuple element
is a list of tensors, where the ith tensor corresponds to the ith
agent.
next_actions (list of tensors):
The target actors' output, for all next_states in experiences.
The ith tensor corresponds to the output from the ith agent.
current_actions (list of tensors):
The local actors' output, for all states in experiences.
The ith tensor corresponds to the output from the ith agent.
agent_number (int):
The index of the current agent, to extract the correct tensors
from experiences.
"""
# Extract and pre-process data
states, actions, rewards, next_states, dones = experiences
states = torch.cat(states, dim=1)
actions = torch.cat(actions, dim=1)
next_states = torch.cat(next_states, dim=1)
rewards = rewards[agent_number]
dones = dones[agent_number]
next_actions = torch.cat(next_actions, dim=1)
current_actions = torch.cat([ca if i == agent_number else ca.detach()
for i, ca in enumerate(current_actions)], dim=1)
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
with torch.no_grad():
Q_targets_next = self.critic_target(next_states, next_actions)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (GAMMA * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = self.critic_local(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets.detach())
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
#torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1) # Clip gradient
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actor_loss = -self.critic_local(states, current_actions).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
| null |
ddpg_agent.py
|
ddpg_agent.py
|
py
| 7,438 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.device",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "ddpg_model.Actor",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "ddpg_model.Actor",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "ddpg_model.Critic",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "ddpg_model.Critic",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "ounoise.OUNoise",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 138,
"usage_type": "name"
}
] |
569805392
|
#!/usr/bin/env python3
import sys
import os
import pandas as pd
import numpy as np
import pandas as pd
import folium
import urllib
import json
import socket
from ipwhois import IPWhois
import pycountry
import io
import requests
import xarray as xr
import numpy as np
import pandas as pd
import holoviews as hv
import geoviews as gv
import geoviews.feature as gf
import geoviews.tile_sources as gts
import geopandas
from bokeh.palettes import YlOrBr3 as palette
import cartopy
from cartopy import crs as ccrs
from bokeh.tile_providers import STAMEN_TONER
from bokeh.models import WMTSTileSource
hv.notebook_extension('bokeh')
DATA_PATH = "data/"
MAP_PATH = DATA_PATH + "world.geo.json/countries/"
COUNTRY_CODE_DATA = DATA_PATH + "country-codes/data/country-codes.csv"
try:
from_day = int(sys.argv[1])
to_day = int(sys.argv[2])
out_name = "./results/" + str(from_day) + "to" + str(to_day)
except:
print("Arguemnts should be like :")
print("python scraper.py 'From' 'to'")
def isNaN(num):
return num != num
def get_export_names():
file = open(DATA_PATH + "event_table_name", "r")
names = file.readlines()[0].split(" ")
return names
def get_mentions_names():
file = open(DATA_PATH + "mentions_table_name", "r")
names = file.readlines()[0].split(" ")
return names
def get_gkg_names(): # GKG
file = open(DATA_PATH + "gkg_table_name", "r")
names = file.readlines()[0].split(" ")
names[-1] = names[-1][:-1]
return names
def get_map_site():
file = pd.read_csv(COUNTRY_CODE_DATA)
return dict(zip(file['TLD'], file['ISO3166-1-Alpha-3'])), dict(zip(file['ISO3166-1-Alpha-3'], file['TLD']))
def scrape_list(url_ex, url_men, url_gkg, export_df, mentions_df, gkg_df):
'''
This function will use the list of export.csv and mentions.csv files to cash their contents and only keep relavant
columns
'''
for i in range(url_ex.shape[0]):
# Appending is slightly faster than Concat when ignore_index=True, so we used append to add new scraped dataFrame
## But appending gets inefficient for large dataFrame, so instead of appending the new scraped dataframe to a ...
## ... large dataFrame, we recursively call our function to use a new empty dataFrame for appending to achieve...
## ... much faster speed in scraping large number of dataframes
if i>= 50:
export_df_2 = pd.DataFrame(columns=col_ex_list)
mentions_df_2 = pd.DataFrame(columns=col_men_list)
gkg_df_2 = pd.DataFrame(columns=col_gkg_list)
export_df_2, mentions_df_2, gkg_df_2 = scrape_list(url_ex.iloc[100:], url_men.loc[100:], url_gkg.loc[100:], \
export_df_2, mentions_df_2, gkg_df_2)
export_df = export_df.append(export_df_2,ignore_index=True)
mentions_df = mentions_df.append(mentions_df_2,ignore_index=True)
gkg_df = gkg_df.append(gkg_df_2, ignore_index=True)
break
else:
s_ex=requests.get(url_ex.iloc[i])
s_men = requests.get(url_men.iloc[i])
s_gkg=requests.get(url_gkg.iloc[i])
if s_ex.status_code==200 and s_men.status_code==200 and s_gkg.status_code==200:
df_i_m=pd.read_csv(io.BytesIO(s_ex.content), sep='\t', compression='zip', names=col_ex)
df_i_x=pd.read_csv(io.BytesIO(s_men.content), sep='\t',compression='zip', names=col_men)
df_i_g=pd.read_csv(io.BytesIO(s_gkg.content), sep='\t',compression='zip', names=col_gkg)
export_df = export_df.append(df_i_m[col_ex_list],ignore_index=True)
mentions_df = mentions_df.append(df_i_x[col_men_list],ignore_index=True)
gkg_df = gkg_df.append(df_i_g[col_gkg_list],ignore_index=True)
#print("Export: ", export_df.shape, " - ",i)
#print("Mentions: ", mentions_df.shape, " - ",i)
#print("GKG: ", gkg_df.shape, " - ",i)
#print("GKG: ", gkg_df.shape, " - ",i)
#display(gkg_df.head(5))
return export_df, mentions_df, gkg_df
url='http://data.gdeltproject.org/gdeltv2/masterfilelist.txt'
s=requests.get(url).content
df_list=pd.read_csv(io.StringIO(s.decode('utf-8')), sep='\s', header=None, names=['Size', 'Code', 'url'])
df_list = df_list.dropna(subset=['url'])
# We get the columns names of the datasets from the text files we've created
col_ex = get_export_names()
col_men = get_mentions_names()
col_gkg = get_gkg_names()
# We define create a list of the column names of the columns we want to keep in the datasets
col_ex_list = ['GlobalEventID', 'Day', 'MounthYear', 'Year', 'ActionGeo_CountryCode', 'ActionGeo_Lat', 'ActionGeo_Long', 'AvgTone', 'GoldsteinScale', 'NumMentions','SOURCEURL']
col_men_list = ['GlobalEventId', 'MentionSourceName', 'MentionIdentifier', 'Confidence', 'MentionDocTone']
col_gkg_list = ['GKGRECORDID', 'DATE', 'Counts', 'SourceCommonName', 'Locations', 'DocumentIdentifier', 'V2Themes', 'Themes', 'V2Tone'] # GKG
# For col_ex_list, we don't need 'MounthYear' and 'Year', but please add 'SOURCEURL' column to it
# We create the empty the aggregated dataframes with the column names we want to keep
export_df = pd.DataFrame(columns=col_ex_list)
mentions_df = pd.DataFrame(columns=col_men_list)
gkg_df = pd.DataFrame(columns=col_gkg_list)
# We filter out the urls keeping only those containing an export dataset
df_ex_w01 = df_list[df_list['url'].str.contains('.export.CSV')]
df_ex_w01 = df_ex_w01.iloc[96*from_day:96*to_day,2:3] #This will filter events for 7 days
# We filter the urls keeping only those containing an export dataset
df_men_w01 = df_list[df_list['url'].str.contains('.mentions.CSV')]
df_men_w01 = df_men_w01.iloc[96*from_day:96*to_day,2:3] #This will filter events for 7 days
# We filter the urls keeping only those containing a gkg dataset
df_gkg_w01 = df_list[df_list['url'].str.contains('.gkg.csv')]
df_gkg_w01 = df_gkg_w01.iloc[96*from_day:96*to_day,2:3] #This will filter events for 7 days
print("Load data")
# Parsing the data and returning the aggregated dataFrame
export_df, mentions_df, gkg_df = scrape_list(df_ex_w01['url'], df_men_w01['url'], df_gkg_w01['url'], export_df, mentions_df, gkg_df)
print("Save data")
# Saving the resulted dataframes
export_df.to_csv(out_name + "export.csv.gz", compression="gzip")
mentions_df.to_csv(out_name + "mentions.csv.gz", compression="gzip")
gkg_df.to_csv(out_name + "gkg.csv.gz", compression="gzip")
| null |
scripts/scraper.py
|
scraper.py
|
py
| 6,638 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "holoviews.notebook_extension",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 136,
"usage_type": "call"
}
] |
361257440
|
from django.shortcuts import render
from django.views import generic
from catalogue.models import Post
from django.contrib.postgres.search import SearchVector
from django.http import HttpResponse
from member.models import Member
class IndexView(generic.ListView):
template_name="catalogue/catalogue.html"
def get_queryset(self):
queryset_list =Post.objects.all().order_by("-date")
query = self.request.GET.get("q")
filter_q = self.request.GET.get("f")
noviews = self.request.GET.get("i")
if query:
if filter_q=="Title":
queryset_list = queryset_list.filter(Title__icontains =query)
elif filter_q=="Author":
queryset_list = queryset_list.filter(Author__icontains =query)
elif filter_q=="Genre":
queryset_list = queryset_list.filter(Genre__icontains =query)
elif filter_q=="CallNum":
queryset_list = queryset_list.filter(CallNum__icontains =query)
elif filter_q=="Publisher":
queryset_list = queryset_list.filter(Publisher__icontains =query)
if noviews=="1-30":
return queryset_list[:30]
elif noviews=="1-50":
return queryset_list[:50]
elif noviews=="All":
return queryset_list
return queryset_list[:15]
class DetailView(generic.DetailView):
model = Post
template_name = "catalogue/post.html"
'''
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
post_list = context['post']
post_list.Status = Member.objects.get(id=(post_list.Status-1)).Name
return context
def save(self, *args, **kwargs):
member= Member.objects.get(id=int('0' +self.model.Status))
member.Status = self.model.Title
member.save()
super(Post, self).save(*args, **kwargs)'''
| null |
A_library_V3/catalogue/views.py
|
views.py
|
py
| 1,672 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.views.generic.ListView",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "catalogue.models.Post.objects.all",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "catalogue.models.Post.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "catalogue.models.Post",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.views.generic.DetailView",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "catalogue.models.Post",
"line_number": 33,
"usage_type": "name"
}
] |
117829544
|
#!/usr/bin/env python3
import hashlib
import random
import subprocess
import unittest
from pathlib import Path
from faker import Faker
from src.anonfile import AnonFile, get_logfile_path
TOKEN = None
def test_option(token):
TOKEN = token
def md5_checksum(path: Path) -> str:
with open(path, mode='rb') as file_handler:
return hashlib.md5(file_handler.read()).hexdigest()
def init_anon() -> AnonFile:
chrome_ua = Faker().chrome(version_from=90, version_to=93, build_from=4400, build_to=4500)
return AnonFile(token=TOKEN, user_agent=chrome_ua) if TOKEN else AnonFile(user_agent=chrome_ua)
def write_file(file: str, lines: list) -> Path:
with open(file, mode='w+') as file_handler:
file_handler.write('\n'.join(lines))
return Path(file)
def remove_file(file: str) -> None:
Path(file).unlink() if Path(file).exists() else None
class TestAnonFileLibrary(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.anon = init_anon()
cls.test_file = Path("tests/test.txt")
cls.test_small_file = "https://anonfiles.com/93k5x1ucu0/test_txt"
cls.test_med_file = "https://anonfiles.com/b7NaVd0cu3/topsecret_mkv"
cls.garbage = []
def test_upload(self):
upload = self.anon.upload(self.test_file, progressbar=True, enable_logging=True)
self.assertTrue(upload.status, msg="Expected 200 HTTP Error Code")
self.assertTrue(all([upload.url.scheme, upload.url.netloc, upload.url.path]), msg="Invalid URL.")
def test_preview(self):
preview = self.anon.preview(self.test_small_file)
self.assertTrue(preview.status, msg="Error in status property.")
self.assertEqual(self.test_small_file, preview.url.geturl(), msg="Error in URL property.")
self.assertEqual("93k5x1ucu0", preview.id, msg="Error in ID property.")
self.assertEqual("test.txt", preview.file_path.name, msg="Error in name property.")
self.assertEqual(271, preview.size, msg="Error in size property.")
def test_download(self):
download = self.anon.download(self.test_small_file, progressbar=True, enable_logging=True)
self.assertTrue(download.file_path.exists(), msg="Download not successful.")
self.assertEqual(download.file_path.name, self.test_file.name, msg="Different file in download path detected.")
self.garbage.append(download.file_path)
def test_multipart_encoded_files(self):
# use pre-computed checksum for faster unit tests
download = self.anon.download(self.test_med_file, progressbar=True, enable_logging=True)
self.assertEqual("06b6a6bea6ba82900d144d3b38c65347", md5_checksum(download.file_path), msg="MD5 hash is corrupted.")
self.garbage.append(download.file_path)
@classmethod
def tearDownClass(cls):
for file in cls.garbage:
remove_file(file)
class TestAnonFileCLI(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.anon = init_anon()
cls.test_urls = [
"https://anonfiles.com/n5j2O8G9u0/test_txt",
"https://anonfiles.com/pdj2O8Gbud/test_txt",
"https://anonfiles.com/n5j2O8G9u0/test_txt"
]
cls.test_url = random.choice(cls.test_urls)
cls.batch_file = write_file('batch.txt', cls.test_urls)
cls.logfile = get_logfile_path()
def test_cli_download(self):
call = subprocess.call("anonfile --verbose download --url %s --no-check" % self.test_url, shell=True)
self.assertFalse(call, msg=f"Download failed for: {self.test_url!r}")
def test_cli_batch_download(self):
call = subprocess.call("anonfile --verbose --logging download --batch-file %s --no-check" % self.batch_file, shell=True)
self.assertFalse(call, msg=f"Download failed for: {str(self.batch_file)!r}")
def test_cli_log(self):
print()
call = subprocess.call("anonfile log --read", shell=True)
self.assertTrue(self.logfile.exists() and (call == 0), msg=f"Error: no log file produced in {str(self.logfile)!r}")
@classmethod
def tearDownClass(cls):
remove_file(cls.batch_file)
remove_file(cls.logfile)
| null |
tests/test_anonfile.py
|
test_anonfile.py
|
py
| 4,203 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pathlib.Path",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "hashlib.md5",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "faker.Faker",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "src.anonfile.AnonFile",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "src.anonfile.AnonFile",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "src.anonfile.get_logfile_path",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 103,
"usage_type": "call"
}
] |
377789540
|
import sys
import traceback
from functools import wraps
from tornado import web
from tornado.escape import json_decode, json_encode
from tornado.log import app_log
from graphql.error import GraphQLError
from graphql.error import format_error as format_graphql_error
from graphql import graphql
from .session_control import GraphQLSession
from modules.api import api_schema
def error_status(exception):
if isinstance(exception, web.HTTPError):
return exception.status_code
elif isinstance(exception, (ExecutionError, GraphQLError)):
return 400
else:
return 500
def error_format(exception):
if isinstance(exception, ExecutionError):
return [{"message": e} for e in exception.errors]
elif isinstance(exception, GraphQLError):
return [format_graphql_error(exception)]
elif isinstance(exception, web.HTTPError):
return [{"message": exception.log_message, "reason": exception.reason}]
else:
return [{"message": "Unknown server error"}]
def error_response(func):
@wraps(func)
def wrapper_error_response(self, *args, **kwargs):
try:
result = func(self, *args, **kwargs)
except Exception as ex:
if not isinstance(ex, (web.HTTPError, ExecutionError, GraphQLError)):
tb = "".join(traceback.format_exception(*sys.exc_info()))
app_log.error("Error: {0} {1}".format(ex, tb))
self.set_status(error_status(ex))
error_json = json_encode({"errors": error_format(ex)})
app_log.debug("error_json: %s", error_json)
self.write(error_json)
else:
return result
return wrapper_error_response
class ExecutionError(Exception):
def __init__(self, status_code=400, errors=None):
self.status_code = status_code
if errors is None:
self.errors = []
else:
self.errors = [str(e) for e in errors]
self.message = "\n".join(self.errors)
class GraphQLHandler(web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Credentials", "true")
self.set_header(
"Access-Control-Allow-Headers",
"Origin, X-Requested-With, Content-Type, Accept, Authorization",
)
self.set_header("Access-Control-Allow-Methods", "HEAD, PUT, POST, GET, OPTIONS")
def options(self):
self.set_status(204)
self.finish()
@GraphQLSession.ensure_active_session
@error_response
async def post(self):
# print(self.current_user)
# print(self.get_current_user().authenticated)
# if not self.current_user.authenticated:
# self.current_user.set_authenticated(True)
# else:
# self.current_user.set_authenticated(False)
return await self.handle_graqhql()
async def handle_graqhql(self):
result = await self.execute_graphql()
app_log.debug("GraphQL result data: %s errors: %s", result.data, result.errors)
if result and result.errors:
ex = ExecutionError(errors=result.errors)
app_log.warn("GraphQL Error: %s", ex.message)
self.write("GraphQL Error: {}".format(ex.message))
if not self.application.settings.get("debug", False):
# Return a 500 server error to the client if we are not running the
# server in debug mode
raise ex
response = {"data": result.data}
self.write(response)
async def execute_graphql(self):
graphql_req = self.graphql_request
print(self.graphql_request)
# print(graphql_req["context"])
app_log.debug("graphql request: %s", graphql_req)
context_value = graphql_req.get("context", {})
print("context", context_value)
context_value["session"] = self.current_user
# context_value["db_client"] = self.opts["db_client"]
result = await graphql(
schema=self.schema,
source=graphql_req.get("query"),
root_value=None, # resolve root
context_value=context_value, # resolve info
)
print(result)
return result
@property
def graphql_request(self):
return json_decode(self.request.body)
@property
def content_type(self):
return self.request.headers.get("Content-Type", "text/plain").split(";")[0]
@property
def schema(self):
# raise NotImplementedError("schema must be provided")
return api_schema
@property
def middleware(self):
return []
@property
def context(self):
return None
@property
def active_session(self):
return None
| null |
maverick_api/modules/base/tornadoql/graphql_handler.py
|
graphql_handler.py
|
py
| 4,872 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tornado.web.HTTPError",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "graphql.error.GraphQLError",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "graphql.error.GraphQLError",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "graphql.error.format_error",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tornado.web.HTTPError",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "tornado.web.HTTPError",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "graphql.error.GraphQLError",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "traceback.format_exception",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sys.exc_info",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tornado.log.app_log.error",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tornado.log.app_log",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "tornado.escape.json_encode",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tornado.log.app_log.debug",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "tornado.log.app_log",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tornado.web.RequestHandler",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "session_control.GraphQLSession.ensure_active_session",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "session_control.GraphQLSession",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "tornado.log.app_log.debug",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "tornado.log.app_log",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "tornado.log.app_log.warn",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "tornado.log.app_log",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "tornado.log.app_log.debug",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "tornado.log.app_log",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "graphql.graphql",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "tornado.escape.json_decode",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "modules.api.api_schema",
"line_number": 134,
"usage_type": "name"
}
] |
448220795
|
import os
import cv2
from skimage import io
import shutil
from imgseg import segmentationUtils
from imgseg import annotationUtils
from geojson import FeatureCollection, dump
from skimage import measure
def masks_to_annotation(datasets_dir, save_path):
masks_dir = os.path.join(datasets_dir, "labels_all")
nucleis_dir = os.path.join(datasets_dir, "images_all")
# %% Process one folder and save as one json file allowing multiple annotation types
simplify_tol = 0 # Tolerance for polygon simplification with shapely (0 to not simplify)
# outputs_dir = os.path.abspath(os.path.join('..', 'data', 'postProcessing', 'mask2json'))
if os.path.exists(masks_dir):
print(f'Analyzing folder:{masks_dir}')
for file in [f for f in os.listdir(masks_dir)]:
file_id = os.path.splitext(file)[0]
# Read png with mask
print(f'Analyzing file:{file}')
file_full = os.path.join(masks_dir, file)
mask_img = io.imread(file_full)
print("mask_img.shape:", mask_img.shape)
mask = measure.label(mask_img)
label = "nuclei"
print("label:", label)
sample_path = os.path.join(save_path, file_id)
if not os.path.exists(sample_path):
os.makedirs(sample_path)
io.imsave(os.path.join(sample_path, "mask_labels.png"), mask_img)
shutil.copyfile(os.path.join(nucleis_dir, file.replace(".tif", ".png")),
os.path.join(sample_path, "nuclei.png"))
segmentationUtils.masks_to_polygon(mask, label=label, simplify_tol=simplify_tol,
save_name=os.path.join(sample_path, "annotation.json"))
def gen_mask_from_geojson(files_proc, masks_to_create_value=['filled', 'edge', 'distance', 'weigthed', 'border_mask'], img_size=None, infer=False, border_detection_threshold=6):
masks_to_create = {}
# annot_types = list(masks_to_create.keys())
annotationsImporter = annotationUtils.GeojsonImporter()
# Instance to save masks
masks = annotationUtils.MaskGenerator()
weightedEdgeMasks = annotationUtils.WeightedEdgeMaskGenerator(sigma=8, w0=10)
distMapMasks = annotationUtils.DistanceMapGenerator(truncate_distance=None)
borderMasks = annotationUtils.BorderMaskGenerator(border_detection_threshold=border_detection_threshold)
# %% Loop over all files
for i, file_proc in enumerate(files_proc):
print('PROCESSING FILE:')
print(file_proc)
# Decompose file name
drive, path_and_file = os.path.splitdrive(file_proc)
path, file = os.path.split(path_and_file)
# file_base, ext = os.path.splitext(file)
# Read annotation: Correct class has been selected based on annot_type
annot_dict_all, roi_size_all, image_size = annotationsImporter.load(file_proc)
if img_size is not None:
image_size = img_size
annot_types = set(annot_dict_all[k]['properties']['label'] for k in annot_dict_all.keys())
print("annot_types: ", annot_types)
for annot_type in annot_types:
if infer:
file_name_save = os.path.join(drive, path, annot_type + '_filled_output.png')
else:
file_name_save = os.path.join(drive, path, annot_type + '_filled.png')
if os.path.exists(file_name_save):
print("skip to generate mask:", file_name_save)
continue
# print("annot_type: ", annot_type)
masks_to_create[annot_type] = masks_to_create_value
# Filter the annotations by label
annot_dict = {k: annot_dict_all[k] for k in annot_dict_all.keys() if
annot_dict_all[k]['properties']['label'] == annot_type}
# print("len(annot_dict):", len(annot_dict))
# print("annot_dict.keys():", annot_dict.keys())
# Create masks
# Binary - is always necessary to creat other masks
print(' .... creating binary masks .....')
binaryMasks = annotationUtils.BinaryMaskGenerator(image_size=image_size, erose_size=5, obj_size_rem=500,
save_indiv=True)
mask_dict = binaryMasks.generate(annot_dict)
# Save binary masks FILLED if specified
if 'filled' in masks_to_create[annot_type]:
if infer:
file_name_save = os.path.join(drive, path, annot_type + '_filled_output.png')
else:
file_name_save = os.path.join(drive, path, annot_type + '_filled.png')
masks.save(mask_dict, 'fill', file_name_save)
# Edge mask
if 'edge' in masks_to_create[annot_type]:
if infer:
file_name_save = os.path.join(drive,path, annot_type + '_edge_output.png')
else:
file_name_save = os.path.join(drive,path, annot_type + '_edge.png')
masks.save(mask_dict,'edge',file_name_save)
# Distance map
if 'distance' in masks_to_create[annot_type]:
print(' .... creating distance maps .....')
mask_dict = distMapMasks.generate(annot_dict, mask_dict)
# Save
if infer:
file_name_save = os.path.join(drive, path, annot_type + '_distmap_output.png')
else:
file_name_save = os.path.join(drive, path, annot_type + '_distmap.png')
masks.save(mask_dict, 'distance_map', file_name_save)
# Weighted edge mask
if 'weigthed' in masks_to_create[annot_type]:
print(' .... creating weighted edge masks .....')
mask_dict = weightedEdgeMasks.generate(annot_dict,mask_dict)
# Save
if infer:
file_name_save = os.path.join(drive,path, annot_type + '_edgeweight_output.png')
else:
file_name_save = os.path.join(drive,path, annot_type + '_edgeweight.png')
masks.save(mask_dict,'edge_weighted',file_name_save)
# border_mask
if 'border_mask' in masks_to_create[annot_type]:
print(' .... creating border masks .....')
mask_dict = borderMasks.generate(annot_dict,mask_dict)
# Save
if infer:
file_name_save = os.path.join(drive,path, annot_type + '_border_mask_output.png')
else:
file_name_save = os.path.join(drive,path, annot_type + '_border_mask.png')
cv2.imwrite(file_name_save, mask_dict['border_mask'],
[cv2.IMWRITE_PNG_COMPRESSION, 9])
if __name__ == "__main__":
# # generate annotation from mask
# datasets_dir = "/home/alex/Downloads/test/data"
# save_path = "/home/alex/Downloads/test/data/kaggle_data/train"
# masks_to_annotation(datasets_dir, save_path)
#
# # move the mask.png to the mask_labels.png
# # for id in os.listdir(save_path):
# # shutil.move(os.path.join(save_path, id, "mask.png"),
# # os.path.join(save_path, id, "mask_labels.png"))
# generate mask from annotation.josn
datasets_dir = "/home/alex/Downloads/test/data/kaggle_data"
err_list = []
for file_id in os.listdir(os.path.join(datasets_dir, "train")):
file_path = os.path.join(datasets_dir, "train", file_id, "annotation.json")
# gen_mask_from_geojson([file_path], masks_to_create_value=["border_mask"])
try:
gen_mask_from_geojson([file_path], masks_to_create_value=["border_mask"])
except:
print("generate mask error:", os.path.join(datasets_dir, "train", file_id))
err_list.append(file_id)
print("err_list:", err_list)
# # change the mask file name
# for file_id in os.listdir(os.path.join(datasets_dir, "train")):
# file_path = os.path.join(datasets_dir, "train", file_id)
# for id in os.listdir(file_path):
# try:
# shutil.move(os.path.join(file_path, "nuclei_weighted_boarder.png"),
# os.path.join(file_path, "nuclei_border_mask.png"))
# except:
# if os.path.exists(os.path.join(file_path, "nuclei_border_mask.png")):
# print("file exist:", os.path.join(file_path, "nuclei_border_mask.png"))
# elif not os.path.exists(os.path.join(file_path, "nuclei_weighted_boarder.png")):
# print("file not exist:", os.path.join(file_path, "nuclei_weighted_boarder.png"))
# else:
# print("move error:", os.path.join(file_path, "nuclei_weighted_boarder.png"))
| null |
src/imgseg/geojson_utils.py
|
geojson_utils.py
|
py
| 8,914 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "skimage.io.imread",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "skimage.io",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "skimage.measure.label",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "skimage.measure",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "skimage.io.imsave",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "skimage.io",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "shutil.copyfile",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "imgseg.segmentationUtils.masks_to_polygon",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "imgseg.segmentationUtils",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "imgseg.annotationUtils.GeojsonImporter",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "imgseg.annotationUtils",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "imgseg.annotationUtils.MaskGenerator",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "imgseg.annotationUtils",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "imgseg.annotationUtils.WeightedEdgeMaskGenerator",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "imgseg.annotationUtils",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "imgseg.annotationUtils.DistanceMapGenerator",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "imgseg.annotationUtils",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "imgseg.annotationUtils.BorderMaskGenerator",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "imgseg.annotationUtils",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "os.path.splitdrive",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "imgseg.annotationUtils.BinaryMaskGenerator",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "imgseg.annotationUtils",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "cv2.IMWRITE_PNG_COMPRESSION",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 175,
"usage_type": "attribute"
}
] |
213927346
|
# encoding: UTF-8
import abc
import inspect
import types
import six
from combo_x40.core.frozen_types import FrozenObject
from combo_x40.core.utils import resolve_module
class CodeElement(object):
MODULE = 1
ANNOTATION = 2
CLASS = 4
FUNCTION = 8
PROPERTY = 16
DESCRIPTOR = 32
ANY = MODULE | ANNOTATION | CLASS | FUNCTION | PROPERTY | DESCRIPTOR
is_module = staticmethod(inspect.ismodule)
@staticmethod
def is_annotation(obj):
return inspect.isclass(obj) and issubclass(obj, Annotation)
@staticmethod
def is_class(obj):
return inspect.isclass(obj) and not issubclass(obj, Annotation)
is_function = staticmethod(inspect.isfunction)
@staticmethod
def is_property(obj):
return isinstance(obj, property)
@staticmethod
def is_descriptor(obj):
return inspect.isdatadescriptor(obj) \
or inspect.ismethoddescriptor(obj) \
or inspect.ismemberdescriptor(obj) \
or inspect.isgetsetdescriptor(obj)
@staticmethod
def is_any(obj):
return inspect.isdatadescriptor(obj) \
or inspect.isgetsetdescriptor(obj) \
or inspect.ismethoddescriptor(obj) \
or inspect.ismemberdescriptor(obj) \
or inspect.ismemberdescriptor(obj) \
or inspect.isfunction(obj) \
or inspect.isclass(obj) \
or inspect.ismodule(obj)
class _AnnotationContainer(object):
__annotations = dict()
@staticmethod
def _not_applicable_error(annotation_clz, code_element_name):
msg = "Annotation '%r' not applicable to " + code_element_name
return TypeError(msg % annotation_clz)
@classmethod
def add_annotation(cls, obj, annotation):
annotation_clz = type(annotation)
annotation_desc = annotation.__desc__
if isinstance(obj, six.string_types) or CodeElement.is_module(obj):
if not annotation_desc.has_target(CodeElement.MODULE):
raise cls._not_applicable_error(annotation_clz, "module")
if isinstance(obj, six.string_types):
obj = resolve_module(obj)
elif CodeElement.is_annotation(obj):
if not annotation_desc.has_target(CodeElement.ANNOTATION):
raise cls._not_applicable_error(annotation_clz, "annotation")
elif CodeElement.is_class(obj):
if not annotation_desc.has_target(CodeElement.CLASS):
raise cls._not_applicable_error(annotation_clz, "class")
elif CodeElement.is_function(obj):
if not annotation_desc.has_target(CodeElement.FUNCTION):
raise cls._not_applicable_error(annotation_clz, "function")
elif CodeElement.is_property(obj):
if not annotation_desc.has_target(CodeElement.PROPERTY):
raise cls._not_applicable_error(annotation_clz, "property")
elif CodeElement.is_descriptor(obj):
if not annotation_desc.has_target(CodeElement.DESCRIPTOR):
raise cls._not_applicable_error(annotation_clz, "descriptor")
else:
msg = "Annotations not applicable to objects of type '%r'"
raise TypeError(msg % type(obj))
annotation_list = cls.__annotations \
.setdefault(obj, {}) \
.setdefault(annotation_clz, [])
if not annotation_list or annotation_desc.repeatable:
annotation_list.append(annotation)
else:
msg = "Annotation '%r' is not repeatable"
raise TypeError(msg % annotation_clz)
@classmethod
def __check_and_normalize(cls, obj):
if isinstance(obj, types.UnboundMethodType):
return six.get_method_function(obj)
elif CodeElement.is_any(obj):
return obj
elif isinstance(obj, six.string_types):
return resolve_module(obj)
msg = "Annotations not applicable to objects of type '%r'"
raise TypeError(msg % type(obj))
@classmethod
def get_annotation(cls, obj, annotation_clz):
obj = cls.__check_and_normalize(obj)
annotation_list = cls.__annotations \
.get(obj, {}) \
.get(annotation_clz, [])
return list(annotation_list)
@classmethod
def get_annotations(cls, obj):
obj = cls.__check_and_normalize(obj)
annotation_map = cls.__annotations \
.get(obj, {})
return dict((k, list(v)) for k, v in annotation_map.items())
class _AnnotationDescription(object):
def __init__(self, target, repeatable):
self.target = target
self.repeatable = repeatable
def has_target(self, target):
return self.target & target == target
class _AnnotationMeta(abc.ABCMeta):
def __new__(cls, name, bases, namespace):
try:
for base in bases:
if issubclass(base, Annotation) and base != Annotation:
msg = "Can't create subclass from '%r'"
raise TypeError(msg % base)
except NameError:
pass
return abc.ABCMeta.__new__(cls, name, bases, namespace)
def __repr__(cls):
return "<annotation '%s.%s'>" % (cls.__module__, cls.__name__)
class Annotation(FrozenObject):
__metaclass__ = _AnnotationMeta
@abc.abstractmethod
def __init__(self, target=0, repeatable=False):
self.__desc__ = _AnnotationDescription(target, repeatable)
def __call__(self, obj):
self._freeze()
_AnnotationContainer.add_annotation(obj, self)
return obj
def __repr__(self):
return "<%s.%s annotation at 0x%x>" % (
self.__class__.__module__, self.__class__.__name__, id(self))
def get_annotation(obj, annotation_clz):
return _AnnotationContainer.get_annotation(obj, annotation_clz)
def get_annotations(obj):
return _AnnotationContainer.get_annotations(obj)
def search_annotation(obj, annotation_clz):
result = []
visited = set()
queue = [obj]
while queue:
current = queue.pop(0)
annotations = _AnnotationContainer.get_annotations(current)
result.extend(annotations.pop(annotation_clz, []))
for i in annotations.values():
queue.extend(set(map(type, i)) - visited)
visited.add(current)
return result
| null |
combo_x40/core/annotations.py
|
annotations.py
|
py
| 6,384 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "inspect.ismodule",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "inspect.isclass",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "inspect.isclass",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "inspect.isfunction",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "inspect.isdatadescriptor",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "inspect.ismethoddescriptor",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "inspect.ismemberdescriptor",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "inspect.isgetsetdescriptor",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "inspect.isdatadescriptor",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "inspect.isgetsetdescriptor",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "inspect.ismethoddescriptor",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "inspect.ismemberdescriptor",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "inspect.ismemberdescriptor",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "inspect.isfunction",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "inspect.isclass",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "inspect.ismodule",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "six.string_types",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "six.string_types",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "combo_x40.core.utils.resolve_module",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "types.UnboundMethodType",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "six.get_method_function",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "six.string_types",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "combo_x40.core.utils.resolve_module",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "abc.ABCMeta",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "abc.ABCMeta.__new__",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "abc.ABCMeta",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "combo_x40.core.frozen_types.FrozenObject",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 169,
"usage_type": "attribute"
}
] |
338550461
|
from django.urls import path, include
from rest_framework import routers
from . import views
app_name = 'blog'
router = routers.DefaultRouter()
router.register('posts', views.AllPostsViewSet)
router.register('post', views.PostViewSet)
urlpatterns = [
path('', views.home, name="homepage"),
path('list', views.list_page, name="posts_list_page"),
path('post/<slug:post>', views.single_post, name="single_post_page"),
path('json_', include(router.urls))
]
| null |
src/blog/urls.py
|
urls.py
|
py
| 471 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 15,
"usage_type": "call"
}
] |
396635104
|
import os
import time
from pathlib import Path
import pandas as pd
import torch
from torch.nn import SmoothL1Loss
from torch.utils.data import DataLoader
from ts.es_rnn.config import get_config
from ts.es_rnn.model import ESRNN
from ts.es_rnn.trainer import ESRNNTrainer
from ts.utils.data_loading import SeriesDataset
from ts.utils.helper_funcs import MODEL_TYPE, set_seed, create_datasets, generate_timeseries_length_stats, \
filter_timeseries
set_seed(0)
run_id = str(int(time.time()))
print("Starting run={}, model={} ".format(run_id, MODEL_TYPE.ESRNN.value))
try:
user_paths = os.environ["PYTHONPATH"].split(os.pathsep)
print(user_paths)
except KeyError:
user_paths = []
BASE_DIR = Path("data/raw/")
LOG_DIR = Path("logs/" + MODEL_TYPE.ESRNN.value)
FIGURE_PATH = Path("figures-temp/" + MODEL_TYPE.ESRNN.value)
print("loading config")
config = get_config("Daily")
print("Frequency:{}".format(config["variable"]))
print("loading data")
info = pd.read_csv(str(BASE_DIR / "M4info.csv"))
train_path = str(BASE_DIR / "train/%s-train.csv") % (config["variable"])
test_path = str(BASE_DIR / "test/%s-test.csv") % (config["variable"])
sample = config["sample"]
sample_ids = config["sample_ids"] if "sample_ids" in config else []
train, ts_labels, val, test, test_idx = create_datasets(train_path, test_path, config["output_size"],
sample_ids=sample_ids, sample=sample,
sampling_size=4)
generate_timeseries_length_stats(train)
print("#.Train before chopping:{}".format(train.shape[0]))
train_before_chopping_count = train.shape[0]
chop_val = config["chop_val"]
print("Chop value:{:6.3f}".format(chop_val))
train, val, test, data_infocat_ohe, data_infocat_headers, data_info_cat = \
filter_timeseries(info, config["variable"], sample, ts_labels, train, chop_val, val, test)
print("#.Train after chopping:{}, lost:{:5.2f}%".format(len(train),
(train_before_chopping_count - len(
train)) / train_before_chopping_count * 100.))
print("#.train:{}, #.validation ts:{}, #.test ts:{}".format(len(train), len(val), len(test)))
dataset = SeriesDataset(data_infocat_ohe, data_infocat_headers, data_info_cat, ts_labels,
train, val, test, config["device"])
config["num_of_categories"] = len(dataset.data_info_cat_headers)
dataloader = DataLoader(dataset, batch_size=config["batch_size"], shuffle=True)
reload = config["reload"]
model = ESRNN(num_series=len(dataset), config=config)
optimizer = torch.optim.Adam(model.parameters(), lr=config["learning_rate"])
add_run_id = config["add_run_id"]
# criterion = PinballLoss(config["training_tau"], config["output_size"] * config["batch_size"], config["device"])
criterion = SmoothL1Loss()
tr = ESRNNTrainer(MODEL_TYPE.ESRNN.value, model, optimizer, criterion, dataloader, run_id, add_run_id, config,
ohe_headers=dataset.data_info_cat_headers,
csv_path=LOG_DIR,
figure_path=FIGURE_PATH, sampling=sample, reload=reload)
tr.train_epochs()
| null |
ts/es_rnn/main.py
|
main.py
|
py
| 3,204 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "ts.utils.helper_funcs.set_seed",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "ts.utils.helper_funcs.MODEL_TYPE.ESRNN",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "ts.utils.helper_funcs.MODEL_TYPE",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.pathsep",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "ts.utils.helper_funcs.MODEL_TYPE.ESRNN",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "ts.utils.helper_funcs.MODEL_TYPE",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "ts.utils.helper_funcs.MODEL_TYPE.ESRNN",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "ts.utils.helper_funcs.MODEL_TYPE",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "ts.es_rnn.config.get_config",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "ts.utils.helper_funcs.create_datasets",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "ts.utils.helper_funcs.generate_timeseries_length_stats",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "ts.utils.helper_funcs.filter_timeseries",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "ts.utils.data_loading.SeriesDataset",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "ts.es_rnn.model.ESRNN",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.SmoothL1Loss",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "ts.es_rnn.trainer.ESRNNTrainer",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "ts.utils.helper_funcs.MODEL_TYPE.ESRNN",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "ts.utils.helper_funcs.MODEL_TYPE",
"line_number": 72,
"usage_type": "name"
}
] |
347395599
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ReleaseEnvironment(Model):
"""ReleaseEnvironment.
:param conditions: Gets list of conditions.
:type conditions: list of :class:`ReleaseCondition <release.v4_1.models.ReleaseCondition>`
:param created_on: Gets date on which it got created.
:type created_on: datetime
:param definition_environment_id: Gets definition environment id.
:type definition_environment_id: int
:param demands: Gets demands.
:type demands: list of :class:`object <release.v4_1.models.object>`
:param deploy_phases_snapshot: Gets list of deploy phases snapshot.
:type deploy_phases_snapshot: list of :class:`object <release.v4_1.models.object>`
:param deploy_steps: Gets deploy steps.
:type deploy_steps: list of :class:`DeploymentAttempt <release.v4_1.models.DeploymentAttempt>`
:param environment_options: Gets environment options.
:type environment_options: :class:`EnvironmentOptions <release.v4_1.models.EnvironmentOptions>`
:param id: Gets the unique identifier of this field.
:type id: int
:param modified_on: Gets date on which it got modified.
:type modified_on: datetime
:param name: Gets name.
:type name: str
:param next_scheduled_utc_time: Gets next scheduled UTC time.
:type next_scheduled_utc_time: datetime
:param owner: Gets the identity who is owner for release environment.
:type owner: :class:`IdentityRef <release.v4_1.models.IdentityRef>`
:param post_approvals_snapshot: Gets list of post deploy approvals snapshot.
:type post_approvals_snapshot: :class:`ReleaseDefinitionApprovals <release.v4_1.models.ReleaseDefinitionApprovals>`
:param post_deploy_approvals: Gets list of post deploy approvals.
:type post_deploy_approvals: list of :class:`ReleaseApproval <release.v4_1.models.ReleaseApproval>`
:param post_deployment_gates_snapshot:
:type post_deployment_gates_snapshot: :class:`ReleaseDefinitionGatesStep <release.v4_1.models.ReleaseDefinitionGatesStep>`
:param pre_approvals_snapshot: Gets list of pre deploy approvals snapshot.
:type pre_approvals_snapshot: :class:`ReleaseDefinitionApprovals <release.v4_1.models.ReleaseDefinitionApprovals>`
:param pre_deploy_approvals: Gets list of pre deploy approvals.
:type pre_deploy_approvals: list of :class:`ReleaseApproval <release.v4_1.models.ReleaseApproval>`
:param pre_deployment_gates_snapshot:
:type pre_deployment_gates_snapshot: :class:`ReleaseDefinitionGatesStep <release.v4_1.models.ReleaseDefinitionGatesStep>`
:param process_parameters: Gets process parameters.
:type process_parameters: :class:`ProcessParameters <release.v4_1.models.ProcessParameters>`
:param queue_id: Gets queue id.
:type queue_id: int
:param rank: Gets rank.
:type rank: int
:param release: Gets release reference which specifies the reference of the release to which this release environment is associated.
:type release: :class:`ReleaseShallowReference <release.v4_1.models.ReleaseShallowReference>`
:param release_created_by: Gets the identity who created release.
:type release_created_by: :class:`IdentityRef <release.v4_1.models.IdentityRef>`
:param release_definition: Gets releaseDefinitionReference which specifies the reference of the release definition to which this release environment is associated.
:type release_definition: :class:`ReleaseDefinitionShallowReference <release.v4_1.models.ReleaseDefinitionShallowReference>`
:param release_description: Gets release description.
:type release_description: str
:param release_id: Gets release id.
:type release_id: int
:param scheduled_deployment_time: Gets schedule deployment time of release environment.
:type scheduled_deployment_time: datetime
:param schedules: Gets list of schedules.
:type schedules: list of :class:`ReleaseSchedule <release.v4_1.models.ReleaseSchedule>`
:param status: Gets environment status.
:type status: object
:param time_to_deploy: Gets time to deploy.
:type time_to_deploy: float
:param trigger_reason: Gets trigger reason.
:type trigger_reason: str
:param variable_groups: Gets the list of variable groups.
:type variable_groups: list of :class:`VariableGroup <release.v4_1.models.VariableGroup>`
:param variables: Gets the dictionary of variables.
:type variables: dict
:param workflow_tasks: Gets list of workflow tasks.
:type workflow_tasks: list of :class:`WorkflowTask <release.v4_1.models.WorkflowTask>`
"""
_attribute_map = {
'conditions': {'key': 'conditions', 'type': '[ReleaseCondition]'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'definition_environment_id': {'key': 'definitionEnvironmentId', 'type': 'int'},
'demands': {'key': 'demands', 'type': '[object]'},
'deploy_phases_snapshot': {'key': 'deployPhasesSnapshot', 'type': '[object]'},
'deploy_steps': {'key': 'deploySteps', 'type': '[DeploymentAttempt]'},
'environment_options': {'key': 'environmentOptions', 'type': 'EnvironmentOptions'},
'id': {'key': 'id', 'type': 'int'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'next_scheduled_utc_time': {'key': 'nextScheduledUtcTime', 'type': 'iso-8601'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'post_approvals_snapshot': {'key': 'postApprovalsSnapshot', 'type': 'ReleaseDefinitionApprovals'},
'post_deploy_approvals': {'key': 'postDeployApprovals', 'type': '[ReleaseApproval]'},
'post_deployment_gates_snapshot': {'key': 'postDeploymentGatesSnapshot', 'type': 'ReleaseDefinitionGatesStep'},
'pre_approvals_snapshot': {'key': 'preApprovalsSnapshot', 'type': 'ReleaseDefinitionApprovals'},
'pre_deploy_approvals': {'key': 'preDeployApprovals', 'type': '[ReleaseApproval]'},
'pre_deployment_gates_snapshot': {'key': 'preDeploymentGatesSnapshot', 'type': 'ReleaseDefinitionGatesStep'},
'process_parameters': {'key': 'processParameters', 'type': 'ProcessParameters'},
'queue_id': {'key': 'queueId', 'type': 'int'},
'rank': {'key': 'rank', 'type': 'int'},
'release': {'key': 'release', 'type': 'ReleaseShallowReference'},
'release_created_by': {'key': 'releaseCreatedBy', 'type': 'IdentityRef'},
'release_definition': {'key': 'releaseDefinition', 'type': 'ReleaseDefinitionShallowReference'},
'release_description': {'key': 'releaseDescription', 'type': 'str'},
'release_id': {'key': 'releaseId', 'type': 'int'},
'scheduled_deployment_time': {'key': 'scheduledDeploymentTime', 'type': 'iso-8601'},
'schedules': {'key': 'schedules', 'type': '[ReleaseSchedule]'},
'status': {'key': 'status', 'type': 'object'},
'time_to_deploy': {'key': 'timeToDeploy', 'type': 'float'},
'trigger_reason': {'key': 'triggerReason', 'type': 'str'},
'variable_groups': {'key': 'variableGroups', 'type': '[VariableGroup]'},
'variables': {'key': 'variables', 'type': '{ConfigurationVariableValue}'},
'workflow_tasks': {'key': 'workflowTasks', 'type': '[WorkflowTask]'}
}
def __init__(self, conditions=None, created_on=None, definition_environment_id=None, demands=None, deploy_phases_snapshot=None, deploy_steps=None, environment_options=None, id=None, modified_on=None, name=None, next_scheduled_utc_time=None, owner=None, post_approvals_snapshot=None, post_deploy_approvals=None, post_deployment_gates_snapshot=None, pre_approvals_snapshot=None, pre_deploy_approvals=None, pre_deployment_gates_snapshot=None, process_parameters=None, queue_id=None, rank=None, release=None, release_created_by=None, release_definition=None, release_description=None, release_id=None, scheduled_deployment_time=None, schedules=None, status=None, time_to_deploy=None, trigger_reason=None, variable_groups=None, variables=None, workflow_tasks=None):
super(ReleaseEnvironment, self).__init__()
self.conditions = conditions
self.created_on = created_on
self.definition_environment_id = definition_environment_id
self.demands = demands
self.deploy_phases_snapshot = deploy_phases_snapshot
self.deploy_steps = deploy_steps
self.environment_options = environment_options
self.id = id
self.modified_on = modified_on
self.name = name
self.next_scheduled_utc_time = next_scheduled_utc_time
self.owner = owner
self.post_approvals_snapshot = post_approvals_snapshot
self.post_deploy_approvals = post_deploy_approvals
self.post_deployment_gates_snapshot = post_deployment_gates_snapshot
self.pre_approvals_snapshot = pre_approvals_snapshot
self.pre_deploy_approvals = pre_deploy_approvals
self.pre_deployment_gates_snapshot = pre_deployment_gates_snapshot
self.process_parameters = process_parameters
self.queue_id = queue_id
self.rank = rank
self.release = release
self.release_created_by = release_created_by
self.release_definition = release_definition
self.release_description = release_description
self.release_id = release_id
self.scheduled_deployment_time = scheduled_deployment_time
self.schedules = schedules
self.status = status
self.time_to_deploy = time_to_deploy
self.trigger_reason = trigger_reason
self.variable_groups = variable_groups
self.variables = variables
self.workflow_tasks = workflow_tasks
| null |
vsts/vsts/release/v4_1/models/release_environment.py
|
release_environment.py
|
py
| 10,222 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "msrest.serialization.Model",
"line_number": 12,
"usage_type": "name"
}
] |
279469332
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
#%matplotlib inline
from glob import glob
from tqdm import tqdm
import cv2
from PIL import Image
from imgaug import augmenters as iaa
#from keras.callbacks import TensorBoard
#from keras.utils import plot_model
train_path = 'visual_china_train1.csv'
train_df = pd.read_csv(train_path)
train_df.head()
train_df.shape
for i in range(35000):
train_df['img_path'].iloc[i] = train_df['img_path'].iloc[i].split('/')[-1]
img_paths = list(train_df['img_path'])
def hash_tag(filepath):
fo = open(filepath, "r",encoding='utf-8')
hash_tag = {}
i = 0
for line in fo.readlines():
line = line.strip()
hash_tag[i] = line
i += 1
return hash_tag
def load_ytrain(filepath):
y_train = np.load(filepath)
y_train = y_train['tag_train']
return y_train
def arr2tag(arr):
tags = []
for i in range(arr.shape[0]):
tag = []
index = np.where(arr[i] > 0.3)
index = index[0].tolist()
tag = [hash_tag[j] for j in index]
tags.append(tag)
return tags
filepath = "valid_tags.txt"
hash_tag = hash_tag(filepath)
hash_tag[1]
y_train = load_ytrain('tag_train.npz')
y_train.shape
#(35000, 6941)
nub_train = 35000
X_train = np.zeros((nub_train,299,299,3),dtype=np.uint8)
i = 0
for img_path in img_paths[:nub_train]:
img = Image.open('train/' + img_path)
if img.mode != 'RGB':
img = img.convert('RGB')
img = img.resize((299,299))
arr = np.asarray(img)
X_train[i,:,:,:] = arr
i += 1
seq = iaa.Sequential([
iaa.CropAndPad(percent=(-0.1, 0.1)),
iaa.Sometimes(0.5,
iaa.GaussianBlur(sigma=(0, 0.5))
),
iaa.ContrastNormalization((0.75, 1.5)),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255)),
], random_order=True)
imglist=[]
imglist.append(X_train)
images_aug = seq.augment_images(X_train)
from sklearn.model_selection import train_test_split
X_train2,X_val,y_train2,y_val = train_test_split(X_train, y_train[:nub_train], test_size=0.01, random_state=2018)
from keras.layers import *
from keras.models import *
from keras.optimizers import *
from keras.callbacks import *
from keras.models import load_model
from keras.applications.inception_resnet_v2 import InceptionResNetV2,preprocess_input
from keras.utils.training_utils import multi_gpu_model
#from keras.applications.inception_resnet_v2 import InceptionResNetV2,preprocess_input
#from keras.applications.densenet import DenseNet201,preprocess_input
#base_model = DenseNet201(weights='imagenet', include_top=False)
base_model = InceptionResNetV2(weights='imagenet',include_top=False)
#base_model = InceptionResNetV2(weights='imagenet', include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024,activation='relu')(x)
predictions = Dense(6941,activation='sigmoid')(x)
model = Model(inputs=base_model.input, outputs=predictions)
#plot_model(model,to_file='model.png')
model.summary()
import keras.backend as K
def precision(y_true, y_pred):
# Calculates the precision
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
# Calculates the recall
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def fbeta_score(y_true, y_pred, beta=1):
# Calculates the F score, the weighted harmonic mean of precision and recall.
if beta < 0:
raise ValueError('The lowest choosable beta is zero (only precision).')
# If there are no true positives, fix the F score at 0 like sklearn.
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
def fmeasure(y_true, y_pred):
# Calculates the f-measure, the harmonic mean of precision and recall.
return fbeta_score(y_true, y_pred, beta=1)
def setup_to_transfer_learning(model, base_model):
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy',fmeasure,recall,precision])
def setup_to_fine_tune(model,base_model):
GAP_LAYER = 17
for layer in base_model.layers[:GAP_LAYER+1]:
layer.trainable = False
for layer in base_model.layers[GAP_LAYER+1:]:
layer.trainable = True
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy',fmeasure,recall,precision])
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(width_shift_range = 0.1,
height_shift_range = 0.1,
zoom_range = 0.1,
horizontal_flip= True)
val_datagen = ImageDataGenerator()
batch_size = 128
train_generator = train_datagen.flow(X_train2,y_train2,batch_size=batch_size,shuffle=False)
val_generator = val_datagen.flow(X_val,y_val,batch_size=batch_size,shuffle=False)
checkpointer = ModelCheckpoint(filepath='weights_best_simple_model.hdf5',
monitor='val_fmeasure',verbose=1, save_best_only=True, mode='max')
reduce = ReduceLROnPlateau(monitor='val_fmeasure',factor=0.5,patience=2,verbose=1,min_lr=1e-4)
model = multi_gpu_model(model, gpus=4)
setup_to_transfer_learning(model, base_model)
history_t1 = model.fit_generator(train_generator,
steps_per_epoch=274,
validation_data = val_generator,
epochs=10,
callbacks=[reduce],
verbose=1
)
setup_to_fine_tune(model,base_model)
history_ft = model.fit_generator(train_generator,
steps_per_epoch=274,
epochs=8,
validation_data=val_generator,
validation_steps=10,
callbacks=[reduce],
verbose=1)
nub_test = len(glob('test/*'))
X_test = np.zeros((nub_test,299,299,3),dtype=np.uint8)
path = []
i = 0
for img_path in tqdm(glob('test/*')):
img = Image.open(img_path)
if img.mode != 'RGB':
img = img.convert('RGB')
img = img.resize((299,299))
arr = np.asarray(img)
X_test[i,:,:,:] = arr
i += 1
y_pred = model.predict(X_test)
model.save('model1.h5')
from pandas import DataFrame
data= DataFrame(y_pred)
data.to_csv('data1.csv')
y_tags = arr2tag(y_pred)
import os
img_name = os.listdir('test/')
img_name[:10]
df = pd.DataFrame({'img_path':img_name, 'tags':y_tags})
for i in range(df['tags'].shape[0]):
df['tags'].iloc[i] = ','.join(str(e) for e in df['tags'].iloc[i])
df.to_csv('submit2.csv',index=None)
df.head()
| null |
test_inception_resnet_v2.py
|
test_inception_resnet_v2.py
|
py
| 7,481 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.switch_backend",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "imgaug.augmenters.Sequential",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "imgaug.augmenters",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "imgaug.augmenters.CropAndPad",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "imgaug.augmenters",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "imgaug.augmenters.Sometimes",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "imgaug.augmenters",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "imgaug.augmenters.GaussianBlur",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "imgaug.augmenters",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "imgaug.augmenters.ContrastNormalization",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "imgaug.augmenters",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "imgaug.augmenters.AdditiveGaussianNoise",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "imgaug.augmenters",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "keras.applications.inception_resnet_v2.InceptionResNetV2",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "keras.backend.sum",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "keras.backend.round",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "keras.backend.clip",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "keras.backend.sum",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "keras.backend.round",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "keras.backend.clip",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "keras.backend.epsilon",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "keras.backend.sum",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "keras.backend.round",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "keras.backend.clip",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "keras.backend.sum",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "keras.backend.round",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "keras.backend.clip",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "keras.backend.epsilon",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "keras.backend.sum",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "keras.backend.round",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "keras.backend.clip",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "keras.backend.epsilon",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "keras.preprocessing.image.ImageDataGenerator",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.ImageDataGenerator",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "keras.utils.training_utils.multi_gpu_model",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 228,
"usage_type": "call"
}
] |
9251953
|
"""
www.dorkal.com
:author
Dedaldino M. Antonio (3D)
:decryption
This software was developed Dedaldino Mambuenze Antonio,
an programmer Python, love a lot python and others languages like Java, JavaScript, C++, Earlang, Go
Dorkal, Inc - All right reserved © 2020
"""
import re
from backend.posts.models import TimeStampedModel
from django.conf import settings
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import UserManager, PermissionsMixin
from django.core import validators
from django.db import models
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
# Here has model of users, all users
# The main model for users,
# this model will be a main model for all users
class CustomUserManager(BaseUserManager):
# For others customization, you'll need this for add
# new features for your users when you create them
# for now i don't need this, but if i decide to add
# new features for user, i'll
# override de UserManager with this
# Custom User Manager
pass
class User(AbstractBaseUser, PermissionsMixin, TimeStampedModel):
VALIDATOR = [validators.RegexValidator(re.compile('^[\w.@]+$'),
_('Username can only contain letters and numbers '), 'invalid')]
user_id = models.AutoField(primary_key=True)
username = models.CharField(unique=True, verbose_name=_('user'),
max_length=250, blank=False, null=False, validators=VALIDATOR,
error_messages={
'unique': _("A user with that username already exists."),
},
)
email = models.EmailField(unique=True, verbose_name=_('email'),
max_length=250, blank=False, null=False)
phone_number = models.CharField(unique=True, max_length=16, blank=False, null=True)
push_token = models.TextField(default='')
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
objects = UserManager()
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def __unicode__(self):
return self.username
class Meta:
db_table = 'users'
verbose_name = _('user')
verbose_name_plural = _('users')
def clean(self):
super().clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_following(self, user):
"""
List of users that this user has following
:param self:
:param user:
:return:
"""
return self.relationships.objects.following(user)
def get_followers(self, user):
"""
List of users that following this user
:param user:
:return:
"""
return self.relationships.objects.followers(user)
def get_blocking(self, user):
"""
List of users are blocking this user
:param user:
:return:
"""
return self.relationships.objects.blocking(user)
def get_blockers(self, user):
"""
List of users that this user is blocking
:param user:
:return:
"""
return self.relationships.objects.blockers(user)
def get_friends(self, user):
"""
List of user friends, when some user has symmetrical relationship
with another user, automatically they become friends
:param user:
:return:
"""
return self.relationships.objects.friends(user)
UserModel = settings.AUTH_USER_MODEL
# Model for locations
# where is user from or where is a user?
class Locations(models.Model):
location_id = models.AutoField(primary_key=True)
city = models.CharField(verbose_name=_('city'), max_length=150, blank=True, null=False)
state = models.CharField(verbose_name=_('state'), max_length=150, blank=True, null=False)
country = models.CharField(verbose_name=_('country'), max_length=150, blank=True, null=False)
class Meta:
db_table = 'locations'
verbose_name = _('location')
verbose_name_plural = _('locations')
# This a model that will cover a user profile
# here will be information about a user
class UserProfile(TimeStampedModel):
MALE = 'M'
FEMALE = 'F'
NOT_SPECIFIED = 'NS'
GENDER_CHOICES = (
(MALE, _('Male')),
(FEMALE, _('Female')),
(NOT_SPECIFIED, _('Not specified'))
)
VALIDATOR = [validators.RegexValidator(re.compile('^[\w]+$'),
_('Invalid name, can only contain letters.'), 'invalid')]
id = models.AutoField(primary_key=True)
user = models.OneToOneField(UserModel, verbose_name=_('user'), blank=False, null=False,
on_delete=models.CASCADE, related_name='profile')
first_name = models.CharField(max_length=125, verbose_name=_('first name'),
validators=VALIDATOR, blank=True)
last_name = models.CharField(max_length=125, verbose_name=_('last name'),
validators=VALIDATOR, blank=True)
location = models.ForeignKey(Locations, on_delete=models.DO_NOTHING, verbose_name=_('location'),
related_name='location', blank=True, null=True)
profile_image = models.ImageField(verbose_name=_('profile image'), blank=True, null=True)
gender = models.CharField(verbose_name=_('gender'), max_length=2, choices=GENDER_CHOICES, blank=True,
default=NOT_SPECIFIED)
DOB = models.DateField(verbose_name=_('date of birth'), blank=True, null=True)
occupation = models.TextField(verbose_name=_('occupation'), blank=True)
about = models.TextField(verbose_name=_('about'), blank=True)
def get_full_name(self):
"""
Return the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""Return the short name for the user."""
return self.first_name
class Meta:
db_table = 'user_profile'
verbose_name = _('user profile')
verbose_name_plural = _('users profiles')
def __unicode__(self):
return self.user.username
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
#
# @receiver(post_save, sender=User)
# def save_user_profile(sender, instance, **kwargs):
# instance.profile.save()
@receiver(pre_delete, sender=User)
def delete_user_profile(sender, instance=None, **kwargs):
if instance:
profile = UserProfile.objects.get(user=instance)
profile.delete()
# Relationships for one user with others users, it's helpful
# to relation people, thinking on real life where you can
# make relationship with others, following or 'admirar'
class RelationshipStatusManager(models.Manager):
# convenience methods to handle some default statuses
def following(self):
return self.get(from_slug='following')
def blocking(self):
return self.get(from_slug='blocking')
def by_slug(self, status_slug):
return self.get(
models.Q(from_slug=status_slug) |
models.Q(to_slug=status_slug) |
models.Q(symmetrical_slug=status_slug)
)
class RelationshipStatus(models.Model):
name = models.CharField(_('name'), max_length=100)
verb = models.CharField(_('verb'), max_length=100)
from_slug = models.CharField(_('from slug'), max_length=100,
help_text=_("Denote the relationship from the user, i.e. 'following'"))
to_slug = models.CharField(_('to slug'), max_length=100,
help_text=_("Denote the relationship to the user, i.e. 'followers'"))
symmetrical_slug = models.CharField(_('symmetrical slug'), max_length=100,
help_text=_("When a mutual relationship exists, i.e. 'friends'"))
login_required = models.BooleanField(_('login required'), default=False,
help_text=_("Users must be logged in to see these relationships"))
private = models.BooleanField(_('private'), default=False,
help_text=_("Only the user who owns these relationships can see them"))
objects = RelationshipStatusManager()
class Meta:
ordering = ('name',)
verbose_name = _('relationship status')
verbose_name_plural = _('relationship statuses')
def __unicode__(self):
return self.name
class RelationshipManager(User._default_manager.__class__):
def __init__(self, *args, **kwargs):
super(RelationshipManager, self).__init__(*args, **kwargs)
@staticmethod
def add(from_user, user, status=None, symmetrical=False):
"""
Add a relationship from one user to another with the given status,
which defaults to "following".
Adding a relationship is by default asymmetrical (akin to following
someone on twitter). Specify a symmetrical relationship (akin to being
friends on facebook) by passing in :param:`symmetrical` = True
.. note::
If :param:`symmetrical` is set, the function will return a tuple
containing the two relationship objects created
"""
if not status:
status = RelationshipStatus.objects.following()
relationship, created = Relationship.objects.get_or_create(
from_user=from_user,
to_user=user,
status=status
)
if symmetrical:
return relationship, user.relationships.add(from_user, status, False)
else:
return relationship
@staticmethod
def remove(from_user, user, status=None, symmetrical=False):
"""
Remove a relationship from one user to another, with the same caveats
and behavior as adding a relationship.
"""
if not status:
status = RelationshipStatus.objects.following()
res = Relationship.objects.filter(
from_user=from_user,
to_user=user,
status=status
).delete()
if symmetrical:
return res, user.relationships.remove(from_user, status, False)
else:
return res
@staticmethod
def _get_from_query(from_user, status):
return dict(
to_users__from_user=from_user,
to_users__status=status,
)
@staticmethod
def _get_to_query(from_user, status):
return dict(
from_users__to_user=from_user,
from_users__status=status,
)
def get_relationships(self, from_user, status, symmetrical=False):
"""
Returns a QuerySet of user objects with which the given user has
established a relationship.
"""
query = self._get_from_query(from_user, status)
if symmetrical:
query.update(self._get_to_query(from_user, status))
return User.objects.filter(**query)
def get_related_to(self, from_user, status):
"""
Returns a QuerySet of user objects which have created a relationship to
the given user.
"""
return User.objects.filter(**self._get_to_query(from_user, status))
def only_to(self, from_user, status):
"""
Returns a QuerySet of user objects who have created a relationship to
the given user, but which the given user has not reciprocated
"""
from_relationships = self.get_relationships(from_user, status)
to_relationships = self.get_related_to(from_user, status)
return to_relationships.exclude(pk__in=from_relationships.values_list('pk'))
def only_from(self, from_user, status):
"""
Like :method:`only_to`, returns user objects with whom the given user
has created a relationship, but which have not reciprocated
"""
from_relationships = self.get_relationships(from_user, status)
to_relationships = self.get_related_to(from_user, status)
return from_relationships.exclude(pk__in=to_relationships.values_list('pk'))
@staticmethod
def exists(from_user, user, status=None, symmetrical=False):
"""
Returns boolean whether or not a relationship exists between the given
users. An optional :class:`RelationshipStatus` instance can be specified.
"""
query = dict(
to_users__from_user=from_user,
to_users__to_user=user,
)
if status:
query.update(to_users__status=status)
if symmetrical:
query.update(
from_users__to_user=from_user,
from_users__from_user=user
)
if status:
query.update(from_users__status=status)
return User.objects.filter(**query).exists()
# some defaults
def following(self, from_user):
return self.get_relationships(from_user, RelationshipStatus.objects.following())
def followers(self, from_user):
return self.get_related_to(from_user, RelationshipStatus.objects.following())
def blocking(self, from_user):
return self.get_relationships(from_user, RelationshipStatus.objects.blocking())
def blockers(self, from_user):
return self.get_related_to(from_user, RelationshipStatus.objects.blocking())
def friends(self, from_user):
return self.get_relationships(from_user, RelationshipStatus.objects.following(), True)
class Relationship(models.Model):
from_user = models.ForeignKey(UserModel, models.CASCADE, related_name='from_users')
to_user = models.ForeignKey(UserModel, models.CASCADE, related_name='to_users')
status = models.ForeignKey(RelationshipStatus, models.CASCADE, related_name='status')
created = models.DateTimeField(_('created'), auto_now_add=True)
objects = RelationshipManager()
def __unicode__(self):
return (_('Relationship from %(from_user)s to %(to_user)s')
% {'from_user': self.from_user.username,
'to_user': self.to_user.username})
class Meta:
unique_together = (('from_user', 'to_user', 'status'),)
ordering = ('-created',)
verbose_name = _('relationship')
verbose_name_plural = _('relationships')
setattr(User, 'relationships', Relationship)
# External Accounts, this model cover a users of others
# social network, with it a user can sign in in our
# system without worry with form sign up, becoming it very quick
class ExternalAccounts(models.Model):
id = models.AutoField(primary_key=True)
facebook_email = models.EmailField(verbose_name=_('facebook email'), max_length=45, blank=True, null=True)
twitter_username = models.CharField(verbose_name=_('twitter username'), max_length=45, blank=True,
null=True)
class Meta:
db_table = 'external_accounts'
verbose_name = _('external account')
verbose_name_plural = _('external accounts')
# Settings for a user, this model will cover
# a all settings for a user(messages, following, notifications)
# and others stuffs that can be helpful to become and perform the User Experience
# and User Interface more pragmatic
class UserSettings(models.Model):
OPT = (
(0, False),
(1, True),
)
id = models.AutoField(primary_key=True)
user_id = models.ForeignKey(UserModel, on_delete=models.CASCADE)
notifications_followers = models.CharField(_('followers notifications'), max_length=2,
choices=OPT, blank=False, null=False, default=1)
notifications_comments = models.CharField(_('comments notifications'), max_length=2,
choices=OPT, blank=False, null=False, default=1)
notifications_messages = models.CharField(_('messages notifications'), max_length=2,
choices=OPT, blank=False, null=False, default=1)
class Meta:
db_table = 'user_settings'
verbose_name = _('user settings')
verbose_name_plural = _('users settings')
| null |
backend/users/models.py
|
models.py
|
py
| 16,866 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.contrib.auth.base_user.BaseUserManager",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.base_user.AbstractBaseUser",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.PermissionsMixin",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "backend.posts.models.TimeStampedModel",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "django.core.validators.RegexValidator",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.core.validators",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "django.db.models.EmailField",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.UserManager",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.AUTH_USER_MODEL",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "backend.posts.models.TimeStampedModel",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "django.core.validators.RegexValidator",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "django.core.validators",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "django.db.models.DO_NOTHING",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "django.db.models.ImageField",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "django.db.models.DateField",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "django.db.models.TextField",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "django.db.models.TextField",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 204,
"usage_type": "argument"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.pre_delete",
"line_number": 214,
"usage_type": "argument"
},
{
"api_name": "django.db.models.Manager",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 245,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 247,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 413,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 413,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 414,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 415,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 415,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 416,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 416,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 417,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 441,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 441,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 442,
"usage_type": "name"
},
{
"api_name": "django.db.models.EmailField",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 444,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 458,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 458,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 463,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 464,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 464,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.CharField",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 465,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 467,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 469,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 475,
"usage_type": "call"
}
] |
604230836
|
import os
import numpy as np
import pandas as pd
from PIL import Image
from torchvision import transforms
import torch.utils.data as data
class DatasetFLViT(data.Dataset):
def __init__(self, args, phase ):
super(DatasetFLViT, self).__init__()
self.phase = phase
if args.dataset == "cifar10" or args.dataset == 'CelebA':
# data_all = np.load(os.path.join(args.data_path, args.dataset + '.npy'), allow_pickle = True)
data_all = np.load(os.path.join('./data/', args.dataset + '.npy'), allow_pickle = True)
data_all = data_all.item()
self.data_all = data_all[args.split_type]
if self.phase == 'train':
self.transform = transforms.Compose([
transforms.RandomResizedCrop((args.img_size, args.img_size), scale=(0.05, 1.0)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
if args.dataset == 'cifar10':
self.data = self.data_all['data'][args.single_client]
self.labels = self.data_all['target'][args.single_client]
else:
self.data = self.data_all['train'][args.single_client]['x']
self.labels = data_all['labels']
else:
self.transform = transforms.Compose([
transforms.Resize((args.img_size, args.img_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
if args.dataset == 'cifar10':
self.data = data_all['union_' + phase]['data']
self.labels = data_all['union_' + phase]['target']
else:
if args.split_type == 'real' and phase == 'val':
self.data = self.data_all['val'][args.single_client]['x']
elif args.split_type == 'central' or phase == 'test':
self.data = list(data_all['central']['val'].keys())
self.labels = data_all['labels']
# for Retina dataset
elif args.dataset =='Retina':
args.labels = {line.strip().split(',')[0]: float(line.strip().split(',')[1]) for line in
open(os.path.join(args.data_path, 'labels.csv'))}
args.loadSize = 256
args.fineSize_w = 224
args.fineSize_h = 224
self.args = args
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.args.dataset == 'cifar10':
img, target = self.data[index], self.labels[index]
img = Image.fromarray(img)
elif self.args.dataset == 'CelebA':
name = self.data[index]
target = self.labels[name]
path = os.path.join(self.args.data_path, 'img_align_celeba', name)
img = Image.open(path).convert('RGB')
target = np.asarray(target).astype('int64')
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.data)
def create_dataset_and_evalmetrix(args):
## get the joined clients
if args.split_type == 'central':
args.dis_cvs_files = ['central']
else:
if args.dataset == 'cifar10':
# get the client with number
data_all = np.load(os.path.join('./data/', args.dataset + '.npy'), allow_pickle = True)
data_all = data_all.item()
data_all = data_all[args.split_type]
args.dis_cvs_files =[key for key in data_all['data'].keys() if 'train' in key]
args.clients_with_len = {name: data_all['data'][name].shape[0] for name in args.dis_cvs_files}
elif args.dataset == 'Retina':
args.dis_cvs_files = os.listdir(os.path.join(args.data_path, args.split_type))
elif args.dataset == 'CelebA':
data_all = np.load(os.path.join('./data/', args.dataset + '.npy'), allow_pickle = True)
# data_all = np.load(os.path.join(args.data_path, args.dataset + '.npy'), allow_pickle = True)
data_all = data_all.item()
args.dis_cvs_files = list(data_all[args.split_type]['train'].keys())
if args.split_type == 'real':
args.clients_with_len = {name: len(data_all['real']['train'][name]['x']) for name in data_all['real']['train']}
## step 2: get the evaluation matrix
args.learning_rate_record = []
args.record_val_acc = pd.DataFrame(columns=args.dis_cvs_files)
args.record_test_acc = pd.DataFrame(columns=args.dis_cvs_files)
args.save_model = False # set to false donot save the intermeidate model
args.best_eval_loss = {}
for single_client in args.dis_cvs_files:
args.best_acc[single_client] = 0 if args.num_classes > 1 else 999
args.current_acc[single_client] = []
args.current_test_acc[single_client] = []
args.best_eval_loss[single_client] = 9999
| null |
utils/data_utils.py
|
data_utils.py
|
py
| 5,279 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.utils.data.Dataset",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomResizedCrop",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 131,
"usage_type": "call"
}
] |
590898886
|
# Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
from hathor.conf.settings import HathorSettings as Settings
_config_file = None
def HathorSettings() -> Settings:
""" Return configuration file namedtuple
Get the file from environment variable 'HATHOR_CONFIG_FILE'
If not set we return the config file of the mainnet
"""
global _config_file
# Import config file for network
default_file = 'hathor.conf.mainnet'
config_file = os.environ.get('HATHOR_CONFIG_FILE', default_file)
if _config_file is None:
_config_file = config_file
elif _config_file != config_file:
raise Exception('loading config twice with a different file')
try:
module = importlib.import_module(config_file)
except ModuleNotFoundError:
module = importlib.import_module(default_file)
return module.SETTINGS # type: ignore
| null |
hathor/conf/get_settings.py
|
get_settings.py
|
py
| 1,437 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "importlib.import_module",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "importlib.import_module",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "hathor.conf.settings.HathorSettings",
"line_number": 23,
"usage_type": "name"
}
] |
644000189
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from os.path import join
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
# define the CLI arguments
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--train_steps', default=1000, type=int,
help='number of training steps')
parser.add_argument('--no_train', help='disable training', action='store_true')
# Constants for int labels
CAT_LABEL = 0
DOG_LABEL = 1
# the basepath of the training images. Make sure to have the data setup in the described way.
base_path = 'data'
# Dimensions of the images. Preventing some magic numbers
image_size_rows = 112
image_size_cols = 112
# set this to 1 to have grayscale images. Then you will have to check how to display grayscale images.
image_size_channels = 3
image_size = [image_size_cols, image_size_rows, image_size_channels]
# load and decode the image from the disk.
def prepare_image_fn(image_path, label):
img_file = tf.read_file(image_path)
# TODO: This might fail from time to time. Take a close look at image 666.jpg. There are others like this :)
# TODO: see https://www.tensorflow.org/api_docs/python/tf/contrib/data/ignore_errors, I din't try it...
img = tf.image.decode_image(img_file, channels=image_size_channels)
img.set_shape([None, None, None])
# TODO: You may want to handle this externally
img = tf.image.resize_images(img, [image_size_cols, image_size_rows])
return img, label
# Get all images from a folder, assign a label to each of them.
def get_class_dataset(dataset, label, class_name):
# list all images
files = tf.data.Dataset.list_files(join(base_path, dataset, class_name, '*.jpg'))
# TODO: We process 4 imahes in parallel, maybe use more, maybe less.
images = files.map(lambda x: (x, label), 4)
return images
def get_dataset(dataset='train', shuffle=True, batch_size=250, buffer_size=20000, repeat=True, prefetch=500):
'''
Create a dataset using the tensorflow.data.Dataset API.
:param dataset: The name of the dataset, eg. train or test
:param shuffle: Shuffle the data?
:param batch_size: Return batches of this size.
:param buffer_size: Buffer size for shuffle. Should be equal to dataset size.
:param repeat: If true, repeats infinitely
:param prefetch: Prefetch some data. Can be used to load new data while old data is processed on the GPU.
:return: Returns a tf.data.Dataset of both cats and dogs
'''
cats = get_class_dataset(dataset, tf.constant(CAT_LABEL), 'Cat')
dogs = get_class_dataset(dataset, tf.constant(DOG_LABEL), 'Dog')
data = cats.concatenate(dogs)
if repeat:
data = data.repeat()
if shuffle:
data = data.shuffle(buffer_size=buffer_size)
# use this, if you have a weaker gpu :(
# TODO: We load 10 images parallel. That might be too much.
# https://www.tensorflow.org/api_docs/python/tf/contrib/data/ignore_errors
data = data.map(prepare_image_fn, 10)
data = data.apply(tf.contrib.data.ignore_errors())
data = data.batch(batch_size)
# Use this, if you have a proper gpu :)
# data = data.apply(tf.contrib.data.map_and_batch(
# map_func=prepare_image_fn, batch_size=batch_size, num_parallel_batches=1))
if prefetch is not None:
data = data.prefetch(prefetch)
return data
# see https://www.tensorflow.org/tutorials/layers
# see https://github.com/tensorflow/tensorflow/blob/r1.8/tensorflow/examples/tutorials/layers/cnn_mnist.py
def cnn_model_fn(features, labels, mode):
'''Model function for CNN.'''
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
input_layer = tf.reshape(features, [-1, image_size_cols, image_size_rows, image_size_channels])
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=16,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
conv2 = tf.layers.conv2d(
inputs=conv1,
filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[4, 4], strides=4)
conv3 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[4, 4], strides=4)
# TODO: If you change the input size, make sure to adapt these settings. 7*7 is the image size, 64 the number of filters.
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dense = tf.layers.dense(inputs=dense, units=512, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Use 2 units, as we have 2 classes.
logits = tf.layers.dense(inputs=dropout, units=2)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
'classes': tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.002)
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predictions['classes'])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(argv):
args = parser.parse_args(argv[1:])
# TODO: change the model dir, if you want to start a new model
classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir='model/model_01')
if not args.no_train:
# train the model
print('Start training...')
classifier.train(
input_fn=lambda: get_dataset('train',
shuffle=True,
batch_size=args.batch_size,
repeat=True,
prefetch=500
),
steps=args.train_steps)
# Evaluate the model. Use 5000 steps, as we have that many test images
eval_result = classifier.evaluate(
input_fn=lambda: get_dataset('test',
shuffle=False,
batch_size=100,
repeat=False,
prefetch=100
),
steps=5000)
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
test_dataset = get_dataset('test',
shuffle=True,
batch_size=1,
repeat=False,
prefetch=1,
buffer_size=5000
).make_one_shot_iterator()
next_batch = test_dataset.get_next()
while True:
try:
# TODO: This whole thing feels like an ugly hack. But works...
image = next_batch[0].eval()
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x=image,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=predict_input_fn)
prediction = predictions.__next__()
image = np.reshape(image, image.shape[-3:])
# Images have output values from 0-255, matplot expects values from 0-1
image = image / 255
plt.imshow(image)
plt.title('Predicted class: ' + str(prediction['classes']))
plt.show()
plt.waitforbuttonpress(timeout=1)
# if you use scientific mode of pycharm, this might not work. Feel free to find a solution.
# closes all plots to prevent OOM errors of pycharm.
plt.close('all')
except Exception as e:
print(e)
break
if __name__ == '__main__':
# TODO: if you want to train on your CPU for some reason (eg. not enough RAM)
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
sess = tf.Session()
with sess.as_default():
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
| null |
fullstack/python/tensorflow/cat-dogs-dataset-with-tensorflow-pfehrmann.py
|
cat-dogs-dataset-with-tensorflow-pfehrmann.py
|
py
| 9,322 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tensorflow.read_file",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tensorflow.image.decode_image",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "tensorflow.image",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.image.resize_images",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "tensorflow.image",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.data.Dataset.list_files",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tensorflow.data",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib.data.ignore_errors",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reshape",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers.conv2d",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.layers.conv2d",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.layers.max_pooling2d",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.layers.conv2d",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.layers.max_pooling2d",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reshape",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers.dense",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.layers.dense",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.layers.dropout",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.layers.dense",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.argmax",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.softmax",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator.EstimatorSpec",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "tensorflow.estimator",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.losses.sparse_softmax_cross_entropy",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "tensorflow.losses",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.MomentumOptimizer",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.get_global_step",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator.EstimatorSpec",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "tensorflow.estimator",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.metrics.accuracy",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "tensorflow.metrics",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator.EstimatorSpec",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "tensorflow.estimator",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator.Estimator",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "tensorflow.estimator",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator.inputs.numpy_input_fn",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "tensorflow.estimator",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "numpy.reshape",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.waitforbuttonpress",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "tensorflow.Session",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "tensorflow.logging.set_verbosity",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "tensorflow.logging",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.app.run",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "tensorflow.app",
"line_number": 247,
"usage_type": "attribute"
}
] |
314570330
|
# Copyright (c) 2018, Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide session level fixtures."""
from datetime import datetime
import pytest
from jose import jwt
from warehouse import models
from warehouse.app import app as app_
from warehouse.app import init_app
from warehouse.models import db as db_
@pytest.fixture(scope="session")
def app():
"""Provide an initialized Flask for use in certain test cases."""
init_app(app_)
with app_.app_context():
yield app_
@pytest.fixture(scope="session")
def client(app):
"""Provide a Flask test client to be used by almost all test cases."""
with app.test_client() as client:
yield client
@pytest.fixture(scope="session")
def reset_tables(app):
"""Ensure a clean database."""
# Clean up anything that might reside in the testing database.
db_.session.remove()
db_.drop_all()
# Re-create tables.
db_.create_all()
@pytest.fixture(scope="session")
def connection():
"""
Use a connection such that transactions can be used.
Notes
-----
Follows a transaction pattern described in the following:
http://docs.sqlalchemy.org/en/latest/orm/session_transaction.html#session-begin-nested
"""
with db_.engine.connect() as connection:
yield connection
@pytest.fixture(scope="function")
def session(reset_tables, connection):
"""
Create a transaction and session per test unit.
Rolling back a transaction removes even committed rows
(``session.commit``) from the database.
https://docs.sqlalchemy.org/en/latest/orm/session_transaction.html#joining-a-session-into-an-external-transaction-such-as-for-test-suites
"""
flask_sqlalchemy_session = db_.session
transaction = connection.begin()
db_.session = db_.create_scoped_session(
options={"bind": connection, "binds": {}}
)
yield db_.session
db_.session.close()
transaction.rollback()
db_.session = flask_sqlalchemy_session
@pytest.fixture(scope="session")
def tokens(app):
"""Provide read, write and admin JWT claims to project 1."""
return {
"read": jwt.encode(
{"prj": {1: "read"}}, app.config["JWT_PRIVATE_KEY"], "RS512"
),
"write": jwt.encode(
{"prj": {1: "write"}}, app.config["JWT_PRIVATE_KEY"], "RS512"
),
"admin": jwt.encode(
{"prj": {1: "admin"}}, app.config["JWT_PRIVATE_KEY"], "RS512"
),
}
@pytest.fixture(scope="function")
def data_fixtures(session):
organism = models.Organism(project_id=1, name="E. coli fixture")
strain = models.Strain(
project_id=1,
name="Strain fixture",
organism=organism,
parent=None,
genotype="Lorem ipsum",
)
medium = models.Medium(name="Medium fixture")
medium_compound = models.MediumCompound(
medium=medium,
compound_name="Medium compound fixture",
compound_identifier="M1234",
compound_namespace="custom",
)
experiment = models.Experiment(
project_id=1, name="Experiment fixture", description="Lorem ipsum"
)
condition = models.Condition(
experiment=experiment,
strain=strain,
medium=medium,
name="Condition fixture",
)
sample = models.Sample(
condition=condition,
name="Sample fixture",
start_time=datetime(2019, 10, 28, 14, 00),
end_time=None,
)
session.add(organism)
session.add(strain)
session.add(medium)
session.add(medium_compound)
session.add(experiment)
session.add(condition)
session.add(sample)
session.commit()
return {
"organism": organism,
"strain": strain,
"medium": medium,
"medium_compound": medium_compound,
"experiment": experiment,
"condition": condition,
"sample": sample,
}
| null |
tests/conftest.py
|
conftest.py
|
py
| 4,483 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "warehouse.app.init_app",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "warehouse.app.app",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "warehouse.app.app.app_context",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "warehouse.app.app",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "warehouse.app.app",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "warehouse.models.db.session.remove",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "warehouse.models.db.session",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "warehouse.models.db",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "warehouse.models.db.drop_all",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "warehouse.models.db",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "warehouse.models.db.create_all",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "warehouse.models.db",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "warehouse.models.db.engine.connect",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "warehouse.models.db.engine",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "warehouse.models.db",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "warehouse.models.db.session",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "warehouse.models.db",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "warehouse.models.db.session",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "warehouse.models.db",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "warehouse.models.db.create_scoped_session",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "warehouse.models.db.session",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "warehouse.models.db",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "warehouse.models.db.session.close",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "warehouse.models.db.session",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "warehouse.models.db",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "warehouse.models.db.session",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "warehouse.models.db",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "jose.jwt.encode",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "jose.jwt",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "jose.jwt.encode",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "jose.jwt",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "jose.jwt.encode",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "jose.jwt",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "warehouse.models.Organism",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "warehouse.models",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "warehouse.models.Strain",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "warehouse.models",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "warehouse.models.Medium",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "warehouse.models",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "warehouse.models.MediumCompound",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "warehouse.models",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "warehouse.models.Experiment",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "warehouse.models",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "warehouse.models.Condition",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "warehouse.models",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "warehouse.models.Sample",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "warehouse.models",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 106,
"usage_type": "call"
}
] |
380656250
|
from django.urls import path
from . import views
app_name='homemap'
urlpatterns=[
path('',views.index,name='index'),
path('create/',views.create,name='create'),
path('delete/<int:id>',views.delete,name='delete'),
path('update/<int:id>',views.update,name='update'),
path("select/",views.select,name='select'),
path('foodcollection/',views.foodcollection,name='foodcollection'),
path('about/',views.about,name='about'),
]
| null |
homemap/urls.py
|
urls.py
|
py
| 448 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
}
] |
411491777
|
import re
import json
import os.path
import unicodedata
from urllib import parse, request
import requests
import urllib.request
import sys
import json
def gethouselink(ln):
link=ln.replace('- ','-')
link=link.replace('? ','-')
link=link.replace(', ','-')
link=link.replace(' ','-')
print(link)
prefix='http://www.estateblock.com/'
locmap={'Coquitlam':'coquitlam-real-estate','North-Vancouver':'north-vancouver-real-estate',
'Burnaby':'burnaby-real-estate','Port-Moody':'port-moody-real-estate','Anmore':'anmore-real-estate'}
for location in locmap.keys():
if location in link:
link=prefix+locmap[location]+'/'+link
break
print(link.lower())
return link.lower()
def getmlsinfo(mlsid):
r = requests.post('https://www.realtor.ca/api/Listing.svc/PropertySearch_Post',data = {'CultureId':'1','ApplicationId':'1','ReferenceNumber':mlsid})
parsed_json = r.json()
try:
phone=parsed_json['Results'][0]['Individual'][0]['Phones'][0]['PhoneNumber']
area=parsed_json['Results'][0]['Individual'][0]['Phones'][0]['AreaCode']
print ('hone from json'+area+phone)
return area+phone
except Exception:
pass
return ''
def format_item(houseinfo):
price=houseinfo['price'].replace(',','')
price=price.replace('$','')
if (int(price))> 2000000:
return ''
lot=houseinfo['lot'].replace(',','')
if lot and (int(lot)) < 6000:
return '';
roadn=houseinfo['roadnoise'].lower();
if not 'unlikely' in roadn :
return '';
item='<house><address>'+houseinfo['address']+'</address><type>'+houseinfo['type']+'</type><link>'+houseinfo['houselink']+'</link>'
item=item+'<mlsid>'+houseinfo['mlsid'] +'</mlsid>'+'<price>'+houseinfo['price']+'</price>'
item+='<realtorphone>'+houseinfo['realtorphone']+'</realtorphone>'+'<lot>'
elementary='<elementaryname>'+houseinfo['elementary_name']+'</elementaryname>'
elementary+='<elementarydist>'+houseinfo['elementary_dist']+'</elementarydist>'
elementary+='<elementaryrank>'+houseinfo['elementary_rank']+'</elementaryrank>'
middle='<middlename>'+houseinfo['middle_name']+'</middlename>'
middle+='<middledist>'+houseinfo['middle_dist']+'</middledist>'
middle+='<middlerank>'+houseinfo['middle_rank']+'</middlerank>'
secondary='<secondaryname>'+houseinfo['secondary_name']+'</secondaryname>'
secondary+='<secondarydist>'+houseinfo['secondary_dist']+'</secondarydist>'
secondary+='<secondaryrank>'+houseinfo['secondary_rank']+'</secondaryrank>'
item=item+houseinfo['lot']+'</lot>'+'<sqft>'+houseinfo['sqft']+'</sqft>'+'<year>'+houseinfo['year']+'</year>'+elementary
item=item+middle+secondary+'<roadnoise>'+houseinfo['roadnoise']+'</roadnoise>'
item=item+'<dayson>'+houseinfo['dayson']+'</dayson>'
item=item+'<openinfo>'+houseinfo['openinfo'] +'</openinfo></house>'
return item.replace('&','')
def output_content(item):
content=''
head='<?xml version="1.0" encoding="ISO-8859-1"?><?xml-stylesheet type="text/xsl" href="report.xsl"?>'
file='output.xml'
content=head+'<report></report>'
if not os.path.isfile(file):
content=head+'<report></report>'
else:
with open(file, 'r') as f:
content=f.read().splitlines();
if not content:
content=head+'<report></report>'
#print content
if not item:
return
cont=''.join(content).replace('<report>','<report>'+item)
#content.insert(1,item)
f=open(file,'w')
#print 'writing '+cont
f.write(cont)
f.close()
def parsehtml(link):
res={'address':'',
'type':'',
'houselink':'',
'mlsid':'',
'realtorphone':'',
'price':'',
'lot':'',
'sqft':'',
'year':'',
'elementary_dist':'',
'elementary_name':'',
'elementary_rank':'',
'middle_dist':'',
'middle_name':'',
'middle_rank':'',
'secondary_dist':'',
'secondary_name':'',
'secondary_rank':'',
'roadnoise':'',
'openinfo':'',
'dayson':''}
res['houselink']=link
#response=urllib2.urlopen(link);
saddress=r'/([\d]*[\w\s-]*)-bc'
maddress=re.findall(saddress,link)
if maddress:
res['address']=maddress[0].replace('-',' ')
smls=r'mls-([\d\w]*)'
mmls=re.findall(smls,link)
if mmls:
res['mlsid']=mmls[0]
res['realtorphone']=getmlsinfo(res['mlsid'])
r=requests.get(link);
#html=r.read()
try:
ins=urllib.request.urlopen(link)
except Exception:
res['houselink']=link
return res
#for lines in ins.readlines():
# if 'Dwelling' in lines.decode('utf-8','ignore'):
# print(lines)
# print(unicodedata.normalize('NFC',lines.decode('utf-8','ignore')))
html=''
for lines in ins.readlines():
lines=unicodedata.normalize('NFC',lines.decode('utf-8','ignore'))
newline=lines.rstrip('\r\n')
html+=newline
#htmlBytes=ins.read()
#html=unicodedata.normalize('NFC',htmlBytes.decode('utf-8', 'ignore'))
#print(html.encode('gbk','ignore'))
#html=html.encode('gbk','ignore')
stype=r'Type of Dwelling</label> *<p>([\w\s\/]*)</p>'
mtype=re.findall(stype,html)
if mtype:
res['type']=mtype[0]
# <label>List Price</label> <p>$1,288,000</p>
ptype=r'List Price</label> *<p>([\w\s\/,$]*)</p>'
ptype=re.findall(ptype,html)
if ptype:
res['price']=ptype[0]
#lot
# <h4>Lot Sz (Sq.ft)</h4> <p>9,288</p>
ltype=r'Lot Sz \(Sq\.ft\)</h4> *<p>([\d,]*)</p>'
ltype=re.findall(ltype,html)
if ltype:
res['lot']=ltype[0]
#sqft
#<h4>Floor Area Total (Sq.ft)</h4> <p>2,443</p>
stype=r'Floor Area Total \(Sq\.ft\)</h4> *<p>([\w\s\/\d,$]*)</p>'
stype=re.findall(stype,html)
if stype:
res['sqft']=stype[0]
#year
# <h4>Approx Year Built</h4> <p>1946</p>
ytype=r'Approx Year Built</h4> *<p>([\w\s\/\d,$]*)</p>'
ytype=re.findall(ytype,html)
if ytype:
res['year']=ytype[0]
#elmentary
# <h4>Elementary</h4> <p>Rochester Elementary</p> <span class="red">Below Average</span> <em>1.25 km</em>
eltype=r'Elementary</h4> *<p>([\w\s\/,$]*)</p>'
eltype=re.findall(eltype,html)
if eltype:
res['elementary_name']=eltype[0]
eltype=re.escape(res['elementary_name'])+r'</p> *<span[ \w=\"]*>([\w\s\/\.\d]*)</span>'
eltype=re.findall(eltype,html)
if eltype:
res['elementary_rank']=eltype[0]
#</p> <span class="green">
eltype=re.escape(res['elementary_name'])+r'</p> *<span[ \w=\"]*>[\w\s\/\.\d]*'+re.escape(res['elementary_rank'])+r'</span> *<em>([\w\s\/\.\d]*)</em>'
print(eltype)
eltype=re.findall(eltype,html)
if eltype:
res['elementary_dist']=eltype[0]
# <h4>Middle</h4> <p>Maillard Middle School</p> <span class="yellow">Average</span> <em>0.83 km</em> </div>
eltype=r'Middle</h4> *<p>([\w\s\/,$]*)</p>'
eltype=re.findall(eltype,html)
if eltype:
res['middle_name']=eltype[0]
if res['middle_name']:
eltype=re.escape(res['middle_name'])+r'</p> *<span[ \w=\"]*>([\w\s\/\.\d]*)</span>'
eltype=re.findall(eltype,html)
if eltype:
res['middle_rank']=eltype[0]
eltype=re.escape(res['middle_name'])+r'</p> *<span[ \w=\"]*>[\w\s\/\.\d]*'+re.escape(res['middle_rank'])+r'</span> *<em>([\w\s\/\.\d]*)</em>'
eltype=re.findall(eltype,html)
if eltype:
res['middle_dist']=eltype[0]
#<h4>Secondary</h4> <p>Centennial School</p> <span class="red">Below Average</span> <em>2.38 km<
eltype=r'Secondary</h4> *<p>([\w\s\/,$]*)</p>'
eltype=re.findall(eltype,html)
if eltype:
res['secondary_name']=eltype[0]
eltype=re.escape(res['secondary_name'])+r'</p> *<span[ \w=\"]*>([\w\s\/\.\d]*)</span>'
eltype=re.findall(eltype,html)
if eltype:
res['secondary_rank']=eltype[0]
eltype=re.escape(res['secondary_name'])+r'</p> *<span[ \w=\"]*>[\w\s\/\.\d]*'+re.escape(res['secondary_rank'])+r'</span> *<em>([\w\s\/\.\d]*)</em>'
eltype=re.findall(eltype,html)
if eltype:
res['secondary_dist']=eltype[0]
#<h4>Road Noise</h4> <span class=\'red\'>Likely</span>
roadtype=r'Road Noise</h4> *<span[ \w=\\\"\']*>([\w\s\/,$]*)</span>'
roadtype=re.findall(roadtype,html)
if roadtype:
res['roadnoise']=roadtype[0]
#Open House Sat Jan 23rd 2-4pm.</p>
opentype=r'[\. \s]+(Open (?!Government)House[\w\s\/,$-\.:]*)</p>'
opentype=re.findall(opentype,html,re.I)
if opentype:
res['openinfo']=opentype[0]
#<span class="dom-in-green"> 1 Days on the market
daytype=r'>([\s\w>]+Days) on the market'
daytype=re.findall(daytype,html,re.I)
if daytype:
res['dayson']=daytype[0]
print(res)
return res
if not sys.argv[1]:
inputfile='houselist.txt'
else:
inputfile=sys.argv[1]
date='20160122'
if date:
outputfile='house'+date+'.xml'
formateddate=date[:4]+'-'+date[4:6]+'-'+date[6:]
file='output.xml'
try:
os.remove(file)
except Exception:
pass
with open(inputfile, 'r') as f:
for line in f.readlines():
jlink=gethouselink(line)
info=parsehtml(jlink)
content=format_item(info)
output_content(content)
jobid=''
config=''
| null |
househunter/generatehouses.py
|
generatehouses.py
|
py
| 10,423 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.post",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.path.isfile",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "unicodedata.normalize",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "re.escape",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "re.escape",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "re.escape",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "re.escape",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "re.escape",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "re.escape",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "os.path.remove",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 278,
"usage_type": "name"
}
] |
369394031
|
import logging
LOGGER = logging.getLogger()
try:
HANDLER = LOGGER.handlers[0]
except IndexError:
# This is required for local testing
HANDLER = logging.StreamHandler()
LOGGER.addHandler(HANDLER)
LOGFORMAT = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
HANDLER.setFormatter(logging.Formatter(LOGFORMAT, "%Y-%m-%d %H:%M:%S"))
LOGGER.setLevel(logging.INFO)
| null |
logger/logger_format.py
|
logger_format.py
|
py
| 376 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 12,
"usage_type": "attribute"
}
] |
369176508
|
import tensorflow as tf
import keras
import numpy as np
print(tf.__version__)
# loading train data
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=1000)
#Explore the data
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
'''The text of reviews have been converted to integers,
where each integer represents a specific word in a dictionary.
Here's what the first review looks like:
And how did they convert it into numbers?
'''
print(train_data[0])
'''Movie reviews may be different lengths.
The below code shows the number of words in the first and
second reviews. Since inputs to a neural network must be
the same length, we'll need to resolve this later.
'''
print(len(train_data[0]), len(train_data[1]))
'''
The dictionary is used to convert the text to data
'''
# A dictionary mapping words to an integer index
# There is a index for the words for the
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
decode_review(train_data[0])
#Prepare the data, they need to be tokenized
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
#Review the data
print(len(train_data[0]), len(train_data[1]))
| null |
IMDB-textmining.py
|
IMDB-textmining.py
|
py
| 2,091 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tensorflow.__version__",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "keras.datasets",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "keras.preprocessing.sequence.pad_sequences",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "keras.preprocessing.sequence.pad_sequences",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing",
"line_number": 52,
"usage_type": "attribute"
}
] |
337069339
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 13 14:45:21 2021
@author: Mike
"""
import pygame.font
from pygame.sprite import Group
from ship import Ship
class Scoreboard:
"""A class to report scoring information"""
def __init__(self, ai_game):
"""Initialize scorekeeping attributes"""
self.ai_game = ai_game
self.screen = ai_game.screen
self.screen_rect = self.screen.get_rect()
self.settings = ai_game.settings
self.stats = ai_game.stats
# Font settings for scoring information
self.text_color = (0, 255, 0)
self.font = pygame.font.SysFont(None, 48)
# Prepare the initial score image
self.prep_score()
self.prep_ships()
def prep_score(self):
"""Turn the score into a rendered image"""
score_str = str(self.stats.score)
self.score_image = self.font.render(score_str, True, self.text_color)
# Display the score at the top right of the screen
self.score_rect = self.score_image.get_rect()
self.score_rect.right = self.screen_rect.right - 20
self.score_rect.top = 20
def show_score(self):
"""Draw scores and ships left to the screen"""
self.screen.blit(self.score_image, self.score_rect)
self.ships.draw(self.screen)
def prep_ships(self):
"""Show how many ships are left"""
self.ships = Group()
for ship_number in range(self.stats.ships_left):
ship = Ship(self.ai_game)
ship.rect.x = 10 + ship_number * ship.rect.width
ship.rect.y = 10
self.ships.add(ship)
| null |
src/scoreboard.py
|
scoreboard.py
|
py
| 1,693 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.font.font.SysFont",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.font.font",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pygame.font",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "ship.Ship",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "ship.rect",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "ship.rect",
"line_number": 52,
"usage_type": "attribute"
}
] |
313094868
|
# -*- coding: utf-8 -*-
"""
@author: brendan.fitzpatrick
Questions
-------------------------------------------------------------------------------
1. The stakeholder wants the message_blast chart to have two new fields:
total_emails_clicked and
total_emails_opened.
How can we provide this information?
See "Message Blast" sheet in the excel.
2. The stakeholder wants the message_action chart to have two new fields:
total_historical_emails_sent and
total_historical_emails_opened.
How can we provide this information?
See "Message Action" and "Message Action Logs" sheets in the excel.
3. One day, we realize that if a user opens/clicks on an email several days
after it was sent, their message_blast record is sent again! We have
duplicate data in our system. How can we account for this?
Drop all records by "profile_id" and "blast_id" for each message blast
object we process.
4. Sometimes if Sailthru's API is delayed then the stakeolder isn't sure if
she's looking at the most up-to-date data. How can we remedy this?
Add a "processed_time" datetime column to the tables.
"""
import os
import sys
import json
import numpy as np
import pandas as pd
import re
def main(argv):
ofname = 'Data/basic_data_output.xlsx'
writer = pd.ExcelWriter(ofname)
""" message blast """
mb_fname = os.path.join('Data', 'message_blast.20160401.json')
mb_json = read_json(mb_fname)
df_mb = (pd.DataFrame(mb_json)
.assign(total_emails_clicked=lambda x: (x.clicks
.apply(lambda r: totals(r))),
total_emails_opened=lambda x: (x.opens
.apply(lambda r: totals(r))))
.drop(['clicks', 'opens'], axis=1))
df_mb.to_excel(writer, 'Message Blast')
""" message action """
mt_fname = os.path.join('Data', 'message_transactional.20160401.json')
mt_json = read_json(mt_fname)
df_mt = (pd.DataFrame(mt_json)
.assign(total_historical_emails_clicked=lambda x: (x.clicks
.apply(lambda r: totals(r))),
total_historical_emails_opened=lambda x: (x.opens
.apply(lambda r: totals(r))))
.drop(['clicks', 'opens'], axis=1))
df_mt.to_excel(writer, 'Message Action')
""" message action logs """
meta_lst = ['delivery_status', 'device', 'id', 'message_revenue',
'profile_id', 'send_time', 'template']
col_ord = ['ts', 'send_time', 'recordtype', 'id', 'profile_id',
'delivery_status', 'device', 'message_revenue', 'template', 'url']
mt_opens = (pd.io.json.json_normalize(mt_json, 'opens', meta_lst)
.assign(ts=lambda x: extract_date(x.ts),
recordtype='open',
url=np.nan)
.reindex(columns=col_ord))
mt_clicks = (pd.io.json.json_normalize(mt_json, 'clicks', meta_lst)
.assign(ts=lambda x: extract_date(x.ts),
recordtype='click')
.reindex(columns=col_ord))
frames = [mt_opens, mt_clicks]
pd.concat(frames).to_excel(writer, 'Message Action Logs')
""" profile chart """
profile_fname = os.path.join('Data', 'profile.20160401.json')
profile_json = read_json(profile_fname)
df_profile = pd.DataFrame(profile_json)
profile_json_cols = ['browser', 'geo', 'lists_signup', 'optout_reason','vars']
pcols = [x for x in df_profile.columns.tolist() if x not in profile_json_cols]
df_profile = (df_profile.loc[:, pcols]
.set_index('id'))
df_profile.to_excel(writer, 'Profile Lookup')
""" blast chart """
blast_fname = os.path.join('Data', 'blast.20160401.json')
df_blast = (pd.read_json(blast_fname, lines=True)
.set_index('id'))
df_blast.to_excel(writer, 'Blast Lookup')
writer.save()
writer.close()
print('\nSee excel file located at {}'.format(ofname))
def read_json(fname):
json_lst = []
with open(fname) as f:
for line in f:
if re.match(r'^\w*$', line) is None:
json_lst.append(json.loads(line.strip()))
return json_lst
def totals(e):
return len(e) if isinstance(e, list) else 0
def extract_date(x):
return pd.to_datetime(x.apply(lambda r: r['$date']), unit='ms')
if __name__ == '__main__':
main(sys.argv)
| null |
NJOY/basic_data.py
|
basic_data.py
|
py
| 4,332 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.ExcelWriter",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pandas.io.json.json_normalize",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pandas.io",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "pandas.io.json.json_normalize",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pandas.io",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "pandas.concat",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_json",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 116,
"usage_type": "attribute"
}
] |
212320947
|
import datetime
import json
class TaskCache(object):
def __init__(self, use_cache, cache_path):
self.use_cache = use_cache
self.cache_path = cache_path or '.asanagh.cache'
def load(self):
if not self.use_cache:
return {}
try:
with open(self.cache_path) as f:
return json.load(f)
except (IOError, ValueError):
return {}
def save(self, cache):
if not self.use_cache:
return
with open(self.cache_path, 'w') as f:
json.dump(cache, f, indent=2)
def includes(self, task):
cache = self.load()
return str(task['id']) in cache
def set(self, task):
cache = self.load()
cache[task['id']] = {
'name': task['name'],
'copied_at': datetime.datetime.now().isoformat(),
'completed_state': task['completed'],
'project': ','.join([p['name'] for p in task['projects']])
}
self.save(cache)
| null |
asana_to_github/taskcache.py
|
taskcache.py
|
py
| 1,023 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 33,
"usage_type": "attribute"
}
] |
545857024
|
import pytest
from triangle import triangle_type
many_triangles = [
(90, 60, 30, "right"),
(100, 40, 40, "obtuse"),
(60, 60, 60, "acute"),
(0, 0, 0, "invalid")
]
def idfn(a_triangle):
a, b, c, expected = a_triangle
return f'{a}-{b}-{c}-{expected}'
@pytest.fixture(params=many_triangles, ids=idfn)
def a_triangle(request):
return request.param
def test_fix(a_triangle):
a, b, c, expected = a_triangle
assert triangle_type(a, b, c) == expected
| null |
2020/pycon_2020/code/test_11.py
|
test_11.py
|
py
| 486 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pytest.fixture",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "triangle.triangle_type",
"line_number": 23,
"usage_type": "call"
}
] |
296662924
|
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 3-clause BSD License
from __future__ import absolute_import, division, print_function
import numpy as np
import os
import sys
import six
from six.moves import map, range, zip
import warnings
import astropy.constants as const
import astropy.units as units
from astropy.units import Quantity
from astropy.time import Time
from astropy.coordinates import EarthLocation
from pyuvdata import UVData, UVBeam
import pyuvdata.utils as uvutils
from . import profiling
from .antenna import Antenna
from .baseline import Baseline
from .telescope import Telescope
from . import utils as simutils
from . import simsetup
from . import mpi
__all__ = ['UVTask', 'UVEngine', 'uvdata_to_task_list', 'run_uvsim', 'initialize_uvdata', 'serial_gather']
class UVTask(object):
# holds all the information necessary to calculate a single src, t, f, bl, array
# need the array because we need an array location for mapping to local alt/az
def __init__(self, source, time, freq, baseline, telescope):
self.time = time
self.freq = freq
self.source = source
self.baseline = baseline
self.telescope = telescope
self.visibility_vector = None
self.uvdata_index = None # Where to add the visibility in the uvdata object.
def __eq__(self, other):
return (np.isclose(self.time, other.time, atol=1e-4)
and np.isclose(self.freq, other.freq, atol=1e-4)
and (self.source == other.source)
and (self.baseline == other.baseline)
and (self.visibility_vector == other.visibility_vector)
and (self.uvdata_index == other.uvdata_index)
and (self.telescope == other.telescope))
def __gt__(self, other):
blti0, _, fi0 = self.uvdata_index
blti1, _, fi1 = other.uvdata_index
if self.baseline == other.baseline:
if fi0 == fi1:
return blti0 > blti1
return fi0 > fi1
return self.baseline > other.baseline
def __ge__(self, other):
blti0, _, fi0 = self.uvdata_index
blti1, _, fi1 = other.uvdata_index
if self.baseline == other.baseline:
if fi0 == fi1:
return blti0 >= blti1
return fi0 >= fi1
return self.baseline >= other.baseline
def __lt__(self, other):
return not self.__ge__(other)
def __le__(self, other):
return not self.__gt__(other)
class UVEngine(object):
def __init__(self, task): # task_array = list of tuples (source,time,freq,uvw)
# self.rank
self.task = task
# Time and freq are scattered as floats.
# Convert them to astropy Quantities
if isinstance(self.task.time, float):
self.task.time = Time(self.task.time, format='jd')
if isinstance(self.task.freq, float):
self.task.freq = self.task.freq * units.Hz
@profile
def apply_beam(self):
""" Get apparent coherency from jones matrices and source coherency. """
baseline = self.task.baseline
source = self.task.source
# coherency is a 2x2 matrix
# [ |Ex|^2, Ex* Ey, Ey* Ex |Ey|^2 ]
# where x and y vectors along the local alt/az axes.
# Apparent coherency gives the direction and polarization dependent baseline response to a source.
beam1_jones = baseline.antenna1.get_beam_jones(self.task.telescope,
source.alt_az_calc(self.task.time,
self.task.telescope.location),
self.task.freq)
beam2_jones = baseline.antenna2.get_beam_jones(self.task.telescope,
source.alt_az_calc(self.task.time,
self.task.telescope.location),
self.task.freq)
this_apparent_coherency = np.dot(beam1_jones,
source.coherency_calc(self.task.time,
self.task.telescope.location))
this_apparent_coherency = np.dot(this_apparent_coherency,
(beam2_jones.conj().T))
self.apparent_coherency = this_apparent_coherency
@profile
def make_visibility(self):
""" Visibility contribution from a single source """
assert(isinstance(self.task.freq, Quantity))
self.apply_beam()
pos_lmn = self.task.source.pos_lmn(self.task.time, self.task.telescope.location)
if pos_lmn is None:
return np.array([0., 0., 0., 0.], dtype=np.complex128)
# need to convert uvws from meters to wavelengths
uvw_wavelength = self.task.baseline.uvw / const.c * self.task.freq.to('1/s')
fringe = np.exp(2j * np.pi * np.dot(uvw_wavelength, pos_lmn))
vij = self.apparent_coherency * fringe
# Reshape to be [xx, yy, xy, yx]
vis_vector = [vij[0, 0], vij[1, 1], vij[0, 1], vij[1, 0]]
return np.array(vis_vector)
@profile
def uvdata_to_task_list(input_uv, sources, beam_list, beam_dict=None):
"""
Create task list from pyuvdata compatible input file.
Args:
input_uv (UVData): UVData object to use
sources: array of Source objects
beam_list: (list of UVBeam or AnalyticBeam objects
beam_dict (dict, optional): dict mapping antenna number to beam index in beam_list
Returns:
List of task parameters to be send to UVEngines with the task
parameters defined in UVTask objectself.
This function extracts time, freq, Antenna1, Antenna2.
"""
if not isinstance(input_uv, UVData):
raise TypeError("input_uv must be UVData object")
if not isinstance(sources, np.ndarray):
raise TypeError("sources must be a numpy array")
freq = input_uv.freq_array[0, :] # units.Hz
telescope = Telescope(input_uv.telescope_name,
EarthLocation.from_geocentric(*input_uv.telescope_location, unit='m'),
beam_list)
if len(beam_list) > 1 and beam_dict is None:
raise ValueError('beam_dict must be supplied if beam_list has more than one element.')
times = input_uv.time_array
antpos_ENU, _ = input_uv.get_ENU_antpos(center=False)
antenna_names = input_uv.antenna_names
antenna_numbers = input_uv.antenna_numbers
antennas = []
for num, antname in enumerate(antenna_names):
if beam_dict is None:
beam_id = 0
else:
beam_id = beam_dict[antname]
antennas.append(Antenna(antname, antenna_numbers[num], antpos_ENU[num], beam_id))
baselines = []
print('Generating Baselines')
for count, antnum1 in enumerate(input_uv.ant_1_array):
antnum2 = input_uv.ant_2_array[count]
index1 = np.where(input_uv.antenna_numbers == antnum1)[0][0]
index2 = np.where(input_uv.antenna_numbers == antnum2)[0][0]
baselines.append(Baseline(antennas[index1], antennas[index2]))
baselines = np.array(baselines)
blts_index = np.arange(input_uv.Nblts)
frequency_index = np.arange(input_uv.Nfreqs)
source_index = np.arange(len(sources))
print('Making Meshgrid')
blts_ind, freq_ind, source_ind = np.meshgrid(blts_index, frequency_index, source_index)
print('Raveling')
blts_ind = blts_ind.ravel()
freq_ind = freq_ind.ravel()
source_ind = source_ind.ravel()
uvtask_list = []
print('Making Tasks')
print('Number of tasks:', len(blts_ind))
count = 0
tot = len(blts_ind)
pbar = simutils.progsteps(maxval=tot)
for (bl, freqi, t, source, blti, fi) in zip(baselines[blts_ind],
freq[freq_ind], times[blts_ind],
sources[source_ind], blts_ind,
freq_ind):
task = UVTask(source, t, freqi, bl, telescope)
task.uvdata_index = (blti, 0, fi) # 0 = spectral window index
uvtask_list.append(task)
count += 1
pbar.update(count)
return uvtask_list
@profile
def initialize_uvdata(uvtask_list, source_list_name, uvdata_file=None,
obs_param_file=None, telescope_config_file=None,
antenna_location_file=None):
"""
Initialize an empty uvdata object to fill with simulation.
Args:
uvtask_list: List of uvtasks to simulate.
source_list_name: Name of source list file or mock catalog.
uvdata_file: Name of input UVData file or None if initializing from
config files.
obs_param_file: Name of observation parameter config file or None if
initializing from a UVData file.
telescope_config_file: Name of telescope config file or None if
initializing from a UVData file.
antenna_location_file: Name of antenna location file or None if
initializing from a UVData file.
"""
if not isinstance(source_list_name, str):
raise ValueError('source_list_name must be a string')
if uvdata_file is not None:
if not isinstance(uvdata_file, str):
raise ValueError('uvdata_file must be a string')
if (obs_param_file is not None or telescope_config_file is not None
or antenna_location_file is not None):
raise ValueError('If initializing from a uvdata_file, none of '
'obs_param_file, telescope_config_file or '
'antenna_location_file can be set.')
elif (obs_param_file is None or telescope_config_file is None
or antenna_location_file is None):
if not isinstance(obs_param_file, str):
raise ValueError('obs_param_file must be a string')
if not isinstance(telescope_config_file, str):
raise ValueError('telescope_config_file must be a string')
if not isinstance(antenna_location_file, str):
raise ValueError('antenna_location_file must be a string')
raise ValueError('If not initializing from a uvdata_file, all of '
'obs_param_file, telescope_config_file or '
'antenna_location_file must be set.')
# Version string to add to history
history = simutils.get_version_string()
history += ' Sources from source list: ' + source_list_name + '.'
if uvdata_file is not None:
history += ' Based on UVData file: ' + uvdata_file + '.'
else:
history += (' Based on config files: ' + obs_param_file + ', '
+ telescope_config_file + ', ' + antenna_location_file)
history += ' Npus = ' + str(mpi.get_Npus()) + '.'
task_freqs = []
task_bls = []
task_times = []
task_antnames = []
task_antnums = []
task_antpos = []
task_uvw = []
ant_1_array = []
ant_2_array = []
telescope_name = uvtask_list[0].telescope.name
telescope_location = uvtask_list[0].telescope.location.geocentric
source_0 = uvtask_list[0].source
freq_0 = uvtask_list[0].freq
for task in uvtask_list:
if not task.source == source_0:
continue
task_freqs.append(task.freq)
if task.freq == freq_0:
task_bls.append(task.baseline)
task_times.append(task.time)
task_antnames.append(task.baseline.antenna1.name)
task_antnames.append(task.baseline.antenna2.name)
ant_1_array.append(task.baseline.antenna1.number)
ant_2_array.append(task.baseline.antenna2.number)
task_antnums.append(task.baseline.antenna1.number)
task_antnums.append(task.baseline.antenna2.number)
task_antpos.append(task.baseline.antenna1.pos_enu)
task_antpos.append(task.baseline.antenna2.pos_enu)
task_uvw.append(task.baseline.uvw)
antnames, ant_indices = np.unique(task_antnames, return_index=True)
task_antnums = np.array(task_antnums)
task_antpos = np.array(task_antpos)
antnums = task_antnums[ant_indices]
antpos = task_antpos[ant_indices]
freqs = np.unique(task_freqs)
uv_obj = UVData()
# add pyuvdata version info
history += uv_obj.pyuvdata_version_str
uv_obj.telescope_name = telescope_name
uv_obj.telescope_location = np.array([tl.to('m').value for tl in telescope_location])
uv_obj.instrument = telescope_name
uv_obj.Nfreqs = freqs.size
uv_obj.Ntimes = np.unique(task_times).size
uv_obj.Nants_data = antnames.size
uv_obj.Nants_telescope = uv_obj.Nants_data
uv_obj.Nblts = len(ant_1_array)
uv_obj.antenna_names = antnames.tolist()
uv_obj.antenna_numbers = antnums
antpos_ecef = uvutils.ECEF_from_ENU(antpos, *uv_obj.telescope_location_lat_lon_alt) - uv_obj.telescope_location
uv_obj.antenna_positions = antpos_ecef
uv_obj.ant_1_array = np.array(ant_1_array)
uv_obj.ant_2_array = np.array(ant_2_array)
uv_obj.time_array = np.array(task_times)
uv_obj.uvw_array = np.array(task_uvw)
uv_obj.baseline_array = uv_obj.antnums_to_baseline(ant_1_array, ant_2_array)
uv_obj.Nbls = np.unique(uv_obj.baseline_array).size
if uv_obj.Nfreqs == 1:
uv_obj.channel_width = 1. # Hz
else:
uv_obj.channel_width = np.diff(freqs)[0]
if uv_obj.Ntimes == 1:
uv_obj.integration_time = np.ones_like(uv_obj.time_array, dtype=np.float64) # Second
else:
# Note: currently only support a constant spacing of times
uv_obj.integration_time = (np.ones_like(uv_obj.time_array, dtype=np.float64)
* np.diff(np.unique(task_times))[0])
uv_obj.set_lsts_from_time_array()
uv_obj.zenith_ra = uv_obj.lst_array
uv_obj.zenith_dec = np.repeat(uv_obj.telescope_location_lat_lon_alt[0], uv_obj.Nblts) # Latitude
uv_obj.object_name = 'zenith'
uv_obj.set_drift()
uv_obj.vis_units = 'Jy'
uv_obj.polarization_array = np.array([-5, -6, -7, -8])
uv_obj.spw_array = np.array([0])
uv_obj.freq_array = np.array([freqs])
uv_obj.Nspws = uv_obj.spw_array.size
uv_obj.Npols = uv_obj.polarization_array.size
uv_obj.data_array = np.zeros((uv_obj.Nblts, uv_obj.Nspws, uv_obj.Nfreqs, uv_obj.Npols), dtype=np.complex)
uv_obj.flag_array = np.zeros((uv_obj.Nblts, uv_obj.Nspws, uv_obj.Nfreqs, uv_obj.Npols), dtype=bool)
uv_obj.nsample_array = np.ones_like(uv_obj.data_array, dtype=float)
uv_obj.history = history
uv_obj.check()
return uv_obj
def serial_gather(uvtask_list, uv_out):
"""
Loop over uvtask list, acquire visibilities and add to uvdata object.
"""
for task in uvtask_list:
blt_ind, spw_ind, freq_ind = task.uvdata_index
uv_out.data_array[blt_ind, spw_ind, freq_ind, :] += task.visibility_vector
return uv_out
@profile
def run_uvsim(input_uv, beam_list, beam_dict=None, catalog_file=None,
mock_keywords=None,
uvdata_file=None, obs_param_file=None,
telescope_config_file=None, antenna_location_file=None):
"""
Run uvsim
Arguments:
input_uv: An input UVData object, containing baseline/time/frequency information.
beam_list: A list of UVBeam and/or AnalyticBeam objects
beam_dict: Dictionary of {antenna_name : beam_ID}, where beam_id is an index in
the beam_list. This assigns beams to antennas.
Default: All antennas get the 0th beam in the beam_list.
catalog_file: Catalog file name.
Default: Create a mock catalog
mock_keywords: Settings for a mock catalog (see keywords of create_mock_catalog)
uvdata_file: Name of input UVData file if running from a file.
obs_param_file: Parameter filename if running from config files.
telescope_config_file: Telescope configuration file if running from config files.
antenna_location_file: antenna_location file if running from config files.
"""
mpi.start_mpi()
rank = mpi.get_rank()
if not isinstance(input_uv, UVData):
raise TypeError("input_uv must be UVData object")
# The Head node will initialize our simulation
# Read input file and make uvtask list
uvtask_list = []
if rank == 0:
print('Nblts:', input_uv.Nblts)
print('Nfreqs:', input_uv.Nfreqs)
if catalog_file is None or catalog_file == 'mock':
# time, arrangement, array_location, save, Nsrcs, max_za
if mock_keywords is None:
mock_keywords = {}
if 'array_location' not in mock_keywords:
array_loc = EarthLocation.from_geocentric(*input_uv.telescope_location, unit='m')
mock_keywords['array_location'] = array_loc
if 'time' not in mock_keywords:
mock_keywords['time'] = input_uv.time_array[0]
if "array_location" not in mock_keywords:
warnings.warn("Warning: No array_location given for mock catalog. Defaulting to HERA site")
if 'time' not in mock_keywords:
warnings.warn("Warning: No julian date given for mock catalog. Defaulting to first of input_UV object")
time = mock_keywords.pop('time')
catalog, mock_keywords = simsetup.create_mock_catalog(time, **mock_keywords)
mock_keyvals = [str(key) + str(val) for key, val in six.iteritems(mock_keywords)]
source_list_name = 'mock_' + "_".join(mock_keyvals)
elif isinstance(catalog_file, str):
source_list_name = catalog_file
if catalog_file.endswith("txt"):
catalog = simsetup.read_text_catalog(catalog_file)
elif catalog_file.endswith('vot'):
catalog = simsetup.read_votable_catalog(catalog_file)
catalog = np.array(catalog)
print('Nsrcs:', len(catalog))
uvtask_list = uvdata_to_task_list(input_uv, catalog, beam_list, beam_dict=beam_dict)
if 'obs_param_file' in input_uv.extra_keywords:
obs_param_file = input_uv.extra_keywords['obs_param_file']
telescope_config_file = input_uv.extra_keywords['telescope_config_file']
antenna_location_file = input_uv.extra_keywords['antenna_location_file']
uvdata_file_pass = None
else:
uvdata_file_pass = uvdata_file
uv_container = initialize_uvdata(uvtask_list, source_list_name,
uvdata_file=uvdata_file_pass,
obs_param_file=obs_param_file,
telescope_config_file=telescope_config_file,
antenna_location_file=antenna_location_file)
# To split into PUs make a list of lists length NPUs
print("Splitting Task List")
uvtask_list = np.array_split(uvtask_list, mpi.get_Npus())
uvtask_list = [list(tl) for tl in uvtask_list]
print("Sending Tasks To Processing Units")
sys.stdout.flush()
# Scatter the task list among all available PUs
comm = mpi.get_comm()
local_task_list = comm.scatter(uvtask_list, root=0)
if rank == 0:
print("Tasks Received. Begin Calculations.")
sys.stdout.flush()
# UVBeam objects don't survive the scatter with prop_fget() working. This fixes it on each rank.
if not len(local_task_list) == 0:
for i, bm in enumerate(local_task_list[0].telescope.beam_list):
if isinstance(bm, UVBeam):
uvb = UVBeam()
uvb = bm
local_task_list[0].telescope.beam_list[i] = bm
summed_task_dict = {}
if rank == 0:
tot = len(local_task_list) * mpi.get_Npus()
print("Tasks: ", tot)
sys.stdout.flush()
pbar = simutils.progsteps(maxval=tot)
for count, task in enumerate(local_task_list):
engine = UVEngine(task)
if task.uvdata_index not in summed_task_dict.keys():
summed_task_dict[task.uvdata_index] = task
if summed_task_dict[task.uvdata_index].visibility_vector is None:
summed_task_dict[task.uvdata_index].visibility_vector = engine.make_visibility()
else:
summed_task_dict[task.uvdata_index].visibility_vector += engine.make_visibility()
if rank == 0:
pbar.update(count * mpi.get_Npus())
if rank == 0:
pbar.finish()
if rank == 0:
print("Calculations Complete.")
# All the sources in this summed list are foobar-ed
# Source are summed over but only have 1 name
# Some source may be correct
summed_local_task_list = list(summed_task_dict.values())
# gather all the finished local tasks into a list of list of len NPUs
# gather is a blocking communication, have to wait for all PUs
full_tasklist = comm.gather(summed_local_task_list, root=0)
# Concatenate the list of lists into a flat list of tasks
if rank == 0:
uvtask_list = sum(full_tasklist, [])
uvdata_out = serial_gather(uvtask_list, uv_container)
return uvdata_out
| null |
pyuvsim/uvsim.py
|
uvsim.py
|
py
| 21,498 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.isclose",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "astropy.time.Time",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "astropy.units.Hz",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "baseline.antenna1.get_beam_jones",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "baseline.antenna1",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "baseline.antenna2.get_beam_jones",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "baseline.antenna2",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "astropy.units.Quantity",
"line_number": 121,
"usage_type": "argument"
},
{
"api_name": "numpy.array",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.complex128",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "astropy.constants.c",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "astropy.constants",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "numpy.exp",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "pyuvdata.UVData",
"line_number": 154,
"usage_type": "argument"
},
{
"api_name": "numpy.ndarray",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "telescope.Telescope",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "astropy.coordinates.EarthLocation.from_geocentric",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "astropy.coordinates.EarthLocation",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "antenna.Antenna",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "baseline.Baseline",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "six.moves.zip",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "pyuvdata.UVData",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "pyuvdata.utils.ECEF_from_ENU",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "pyuvdata.utils",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 351,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones_like",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 354,
"usage_type": "attribute"
},
{
"api_name": "numpy.diff",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "numpy.complex",
"line_number": 369,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "pyuvdata.UVData",
"line_number": 415,
"usage_type": "argument"
},
{
"api_name": "astropy.coordinates.EarthLocation.from_geocentric",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "astropy.coordinates.EarthLocation",
"line_number": 431,
"usage_type": "name"
},
{
"api_name": "warnings.warn",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "six.iteritems",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "numpy.array_split",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "sys.stdout.flush",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 478,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 484,
"usage_type": "attribute"
},
{
"api_name": "pyuvdata.UVBeam",
"line_number": 489,
"usage_type": "argument"
},
{
"api_name": "pyuvdata.UVBeam",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "sys.stdout.flush",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 499,
"usage_type": "attribute"
}
] |
371815025
|
import logging
from pprint import pformat
LOGGER = logging.getLogger(__name__)
class APIItems:
"""Base class for a map of API Items."""
def __init__(self, raw, request, path, item_cls):
self._request = request
self._path = path
self._item_cls = item_cls
self._items = {}
self.process_raw(raw)
LOGGER.debug(pformat(raw))
async def update(self):
raw = await self._request('get', self._path)
self.process_raw(raw)
def process_raw(self, raw):
for raw_item in raw:
mac = raw_item['mac']
obj = self._items.get(mac)
if obj is not None:
obj.raw = raw_item
else:
self._items[mac] = self._item_cls(raw_item, self._request)
def values(self):
return self._items.values()
def __getitem__(self, obj_id):
try:
return self._items[obj_id]
except KeyError:
LOGGER.error("Couldn't find key: '{}'".format(obj_id))
def __iter__(self):
return iter(self._items)
| null |
aiounifi/api.py
|
api.py
|
py
| 1,087 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 17,
"usage_type": "call"
}
] |
254806145
|
#!/usr/bin/python
"""
for testing run:
(python3 gloss_links.py test_data/gloss_key.txt test_data --lf
"test_data/gloss_links_inp1.txt" "test_data/gloss_links_inp2.txt")
--------- for understanding code --------
Each file is opened only once now.
The data structure used in new version
is a bit complicated:
-> keyword_context is a dictionary where key
is key/glossary from gloss_list and value is another dictionary
-> in the nested dictionary key is name of
files and value is list of context for that file.
{keyword: {filenm:[context,...,contxt],..., filenm:
[context,...,contxt]},.....,keyword:
{filenm:[context,contxt],..., filenm:[context,context]}}
--------- for understanding code ---------
"""
import argparse
import re
ARG_ERROR = 1 # type: int
def process_file(filenm, keyword_context, gloss_list):
"""
Args: filenm and contexts_per_file
returns: None
"""
try:
with open(filenm, 'r') as txt:
for keyword in gloss_list:
for line in txt:
# splits into a list
if keyword in line:
# line=re.sub(r'[^\w\s]','',str(line.strip())).split()
line = line.strip().split(" ")
context = None
index_list = []
for index, word in enumerate(line):
word = re.sub(r'[^\w\s]', '', str(word))
if keyword == word:
index_list.append(index)
for index in index_list:
# if keyword appears more than once in a line
key_index = index
if 0 < key_index < len(line) - 1:
context = (line[key_index-1] + " " +
line[key_index] + " " +
line[key_index+1])
elif key_index == 0:
if len(line) > 1:
context = (line[key_index] + " " +
line[key_index+1])
else:
context = line[key_index]
elif key_index == len(line) - 1:
context = (line[key_index - 1] + " " +
line[key_index])
if keyword not in keyword_context:
keyword_context[keyword] = {}
file_per_keyword = keyword_context[keyword]
if filenm not in file_per_keyword:
keyword_context[keyword][filenm] = []
keyword_context[keyword][filenm].append(context)
txt.seek(0)
except IOError as ioe:
print("Error opening file: %s; exception: %s", (filenm, str(ioe)))
def process_args():
"""
Parses command line args and returns:
keyword_file, file_list
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("gloss_key")
arg_parser.add_argument("outdir")
arg_parser.add_argument(
"--lf", # you need to add "--lf" flag in command line
nargs="*",
type=str,
default=[],
)
args = arg_parser.parse_args()
return (args.gloss_key, args.outdir, args.lf)
def output_context(outdir, keyword_context):
"""
output context of a keyword
Args: outdir, keyword, context
Returns: None
"""
for keyword in keyword_context:
output_name = outdir + "/" + keyword + ".txt"
with open(output_name, 'w') as files:
files.write(keyword + " found in: \n")
temp = keyword_context[keyword]
for filenm, context_list in temp.items():
for context in context_list:
files.write(" " + filenm + ": " + context + "\n")
files.write("\n")
if __name__ == '__main__':
# get command line params:
(KEYWORD_FILE_LIST, OUTDIR, FILE_LIST) = process_args()
GLOSS_LISTS = []
KEYWORD_CONTEXTS = {}
# first get all the gloss keywords
try:
with open(KEYWORD_FILE_LIST, 'r') as f:
for line in f:
# tab delimited
key = line.strip().split("\t")
GLOSS_LISTS.append(key[0])
except IOError:
print("Couldn't read " + KEYWORD_FILE_LIST)
exit(1)
for filename in FILE_LIST: # look for keywords in all files
process_file(filename, KEYWORD_CONTEXTS, GLOSS_LISTS)
output_context(OUTDIR, KEYWORD_CONTEXTS)
""""
from html.parser import HTMLParser
import urllib.request as urllib2
class MyHTMLParser(HTMLParser):
#Initializing lists
lsStartTags = list()
lsEndTags = list()
lsStartEndTags = list()
lsComments = list()
#HTML Parser Methods
def handle_starttag(self, startTag, attrs):
self.lsStartTags.append(startTag)
def handle_endtag(self, endTag):
self.lsEndTags.append(endTag)
def handle_startendtag(self,startendTag, attrs):
self.lsStartEndTags.append(startendTag)
"""
| null |
gloss_links.py
|
gloss_links.py
|
py
| 5,306 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "re.sub",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 88,
"usage_type": "call"
}
] |
169599884
|
# coding=utf-8
# !/usr/bin/python
"""
INFO:
DESC:
script options
--------------
--param : parameter list
Created by Samujjwal_Ghosh on 11-Apr-17.
__author__ : Samujjwal Ghosh
__version__ = ": 1 $"
__date__ = "$"
__copyright__ = "Copyright (c) 2017 Samujjwal Ghosh"
__license__ = "Python"
Supervised approaches:
SVM,
Features:
# 1. Unigrams, bigrams
# 2. count of words like (lakh,lakhs,millions,thousands)
# 3. count of units present (litre,kg,gram)
# 4. k similar tweets class votes
# 5. k closest same class distance avg
# 6. count of frequent words of that class (unique to that class)
# 7. Length related features.
"""
import os,sys,json,math
import numpy as np
from collections import OrderedDict
import platform
if platform.system() == 'Windows':
sys.path.append('D:\GDrive\Dropbox\IITH\\0 Research')
else:
sys.path.append('/home/cs16resch01001/codes')
# print(platform.system(),"os detected.")
import my_modules as mm
date_time_tag = mm.get_date_time_tag(current_file_name=os.path.basename(__file__))
np.set_printoptions(threshold=np.inf,precision=4,suppress=True)
# change here START------------------------------
n_classes = 4 # number of classes
result_file = "smerp17_"
# change here END--------------------------------
dataset_file = result_file+'labeled_' # Dataset file name
grid_flag = False # Sets the flag to use grid search
n_grams = 2 # bigrams
min_df = 1 # min count for each term to be considered
class_names = ['RESOURCES AVAILABLE',
'RESOURCES REQUIRED',
]
dataset_path = mm.get_dataset_path()
def main(result_all):
print(dataset_file)
train,validation,test = mm.read_labeled(dataset_file)
train = mm.merge_dicts(train,validation)
print("Training data:",mm.count_class([val["classes"] for id,val in train.items()],n_classes))
print("Testing data:",mm.count_class([val["classes"] for id,val in test.items()],n_classes))
vec,train_tfidf_matrix_1 = mm.vectorizer([vals["parsed_tweet"] for twt_id,vals in train.items()],n_grams,min_df)
test_tfidf_matrix_1 = vec.transform([vals["parsed_tweet"] for twt_id,vals in test.items()])
# test_names = ["alphabeta","alpha","mul","add","iw"]
test_names = ["mul","add","iw"]
alphas = [0.0001,0.001,0.01,0.1,0.3,0.5,0.7,0.9,1]
betas = [0.0001,0.001,0.01,0.1,0.3,0.5,0.7,0.9,1]
ks = [0.0001,0.001,0.01,0.1,1,2,5,10]
for test_name in test_names:
matrices = OrderedDict()
if test_name == "alphabeta":
for beta in betas:
for alpha in alphas:
run_name = result_file+" "+test_name+" "+str(alpha)+" "+str(beta)
print("-------------------------------------------------------------------")
print("TEST:",run_name)
print("-------------------------------------------------------------------")
matrices = mm.class_tfidf_CNE(train,vec,train_tfidf_matrix_1,n_classes,alpha,beta,test_name)
assert(train_tfidf_matrix_1.shape == matrices[0].shape)
for cls in range(n_classes):
result_all[cls],predictions,probabilities = mm.supervised_bin(train,test,matrices[cls],test_tfidf_matrix_1.todense(),2,class_id=cls,metric=True,grid=grid_flag)
print(run_name,json.dumps(result_all,indent=4))
mm.save_json(result_all,run_name,tag=False)
if test_name == "alpha":
for alpha in alphas:
run_name = result_file+" "+test_name+" "+str(alpha)
print("-------------------------------------------------------------------")
print("TEST:",run_name)
print("-------------------------------------------------------------------")
matrices = mm.class_tfidf_CNE(train,vec,train_tfidf_matrix_1,n_classes,alpha,test_name)
assert(train_tfidf_matrix_1.shape == matrices[0].shape)
for cls in range(n_classes):
result_all,predictions,probabilities = mm.supervised_bin(train,test,matrices[cls],test_tfidf_matrix_1.todense(),2,class_id=cls,metric=True,grid=grid_flag)
print(run_name,json.dumps(result_all,indent=4))
mm.save_json(result_all,run_name,tag=False)
else:
for k in ks:
run_name = result_file+" "+test_name+" "+str(k)
print("-------------------------------------------------------------------")
print("TEST:",run_name)
print("-------------------------------------------------------------------")
matrices = mm.class_tfidf_CNE(train,vec,train_tfidf_matrix_1,n_classes,k,test_name)
assert(train_tfidf_matrix_1.shape == matrices[0].shape)
for cls in range(n_classes):
result_all,predictions,probabilities = mm.supervised_bin(train,test,matrices[cls],test_tfidf_matrix_1.todense(),2,class_id=cls,metric=True,grid=grid_flag)
print(run_name,json.dumps(result_all,indent=4))
mm.save_json(result_all,run_name,tag=False)
return
if __name__ == "__main__":
result = OrderedDict()
main(result)
print("MAIN: ",json.dumps(result,indent=4))
| null |
SMERP17_CNE.py
|
SMERP17_CNE.py
|
py
| 5,498 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "platform.system",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "my_modules.get_date_time_tag",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "numpy.set_printoptions",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "my_modules.get_dataset_path",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "my_modules.read_labeled",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "my_modules.merge_dicts",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "my_modules.count_class",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "my_modules.count_class",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "my_modules.vectorizer",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "my_modules.class_tfidf_CNE",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "my_modules.supervised_bin",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "my_modules.save_json",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "my_modules.class_tfidf_CNE",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "my_modules.supervised_bin",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "my_modules.save_json",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "my_modules.class_tfidf_CNE",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "my_modules.supervised_bin",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "my_modules.save_json",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 120,
"usage_type": "call"
}
] |
180891043
|
from django.urls import path
from . import views
from .my_views import state_views
from .my_views import old_func_views
from .my_views import old_func_async_views
from .my_views import analyze_cfg_view
from .my_views import parse_cfg_view
from .my_views import task_view
from .my_views import pack_view
from .my_views import variable_views
from .my_views import taint_view
urlpatterns = [
# =========================================================================
# 正式指令部分
# 启动 cfg 分析任务,并保存分析结果到数据库
path('task/analyze_cfg', analyze_cfg_view.analyze_cfg, name='task_analyze_cfg'),
# 读取任务结果
path('task/query', task_view.get_task_result, name='task_query'),
# 读取全部任务结果
path('task/query_all', task_view.get_all_task_result, name='task_query_all'),
# 读取组件任务结果
path('task/query_component', task_view.query_component, name='task_query_component'),
# 停止任务
path('task/stop', task_view.stop_task, name='task_stop'),
# 读取指定 pack 的任务
path('task/search_by_pack', task_view.search_tasks_by_pack, name='search_tasks_by_pack'),
# 读取指定 文件 的任务
path('task/search_by_file', task_view.search_tasks_by_file, name='search_tasks_by_file'),
# 获取函数列表
path('cfg/func_list', parse_cfg_view.cfg_func_list, name='parse_cfg_func_list'),
# 获取脆弱性函数列表
path('cfg/vulner_func_list', parse_cfg_view.vulner_func_list, name='parse_vulner_func_list'),
# 获取污点函数列表
path('cfg/taint_func_list', parse_cfg_view.taint_func_list, name='parse_taint_func_list'),
# 获取指定函数的 call-graph
path('cfg/call_graph_a', parse_cfg_view.call_graph_a, name='parse_cfg_call_graph_a'),
# 获取指定函数的 control_flow_graph
path('cfg/cfg_graph', parse_cfg_view.control_flow_graph, name='parse_cfg_control_flow_graph'),
# 获取指定函数的 control_dependence_graph
path('cfg/cdg_graph', parse_cfg_view.control_dependence_graph, name='parse_cfg_control_dependence_graph'),
# 获取函数信息,包含诸如:汇编代码、中间代码、后继调用等
path('cfg/func_info', parse_cfg_view.function_info, name='parse_cfg_function_info'),
# 获取函数属性,包含:参数、返回、地址等
path('cfg/func_props', parse_cfg_view.function_props, name='parse_cfg_function_props'),
# 查询所有固件包信息
path('pack/all', pack_view.all_packs_info, name='query_all_packs_info'),
# 查询指定固件包信息
path('pack/info', pack_view.pack_info, name='query_pack_info'),
# 编辑指定固件包信息 厂商 型号
path('pack/edit', pack_view.pack_edit, name='query_pack_edit'),
# 删除指定固件包
path('pack/delete', pack_view.pack_delete, name='query_pack_info'),
# 查询指定固件包中所含的执行文件目录树
path('pack/exec_files_tree', pack_view.pack_exec_files_tree, name='query_pack_exec_files_tree'),
# 查询所有组件文件目录树
path('pack/com_files_tree', pack_view.com_files_tree, name='query_com_files_tree'),
# 9.9 组件源码文件目录树
path('pack/com_sourcecode_files_tree', pack_view.com_sourcecode_files_tree, name='query_com_sourcecode_files_tree'),
# 查询所有组件文件列表
path('pack/com_files_list', pack_view.com_files_list, name='query_com_files_list'),
# 提取解析变量
path('vars/extract', variable_views.analyze_extract_vars, name='query_analyze_extract_vars'),
# 组件自动漏洞关联
path('com/auto_vuler_association', pack_view.auto_vuler_association, name='async_com_vuler_association'),
# 检测溢出漏洞
path('task/detect_vulner', parse_cfg_view.detect_vulner, name='task_detect_vulner'),
# 自定义污点源-函数增加
path('taint/add', taint_view.taintadd, name='task_taintadd'),
# 自定义污点源-函数删除
path('taint/del', taint_view.taintdel, name='task_taintdel'),
# 自定义污点源-函数修改
path('taint/update', taint_view.taintmodify, name='task_taintmodify'),
# 自定义污点源-
path('taint/list', taint_view.list, name='task_taintlist'),
# =========================================================================
# 以下部分均为测试指令
# 启动 cfg 分析任务,并保存分析结果到数据库
path('task/analyze_cfg_auto', analyze_cfg_view.analyze_cfg_auto, name='task_analyze_cfg'),
path('', views.index, name='index'),
# 固件文件头自动解码或解析
path('decode', views.binwalk_scan_signature, name='binwalk_scan_signature'),
path('decodeEx', views.binwalk_scan_signatureEx, name='binwalk_scan_signature'),
# 架构识别
path('arch', views.binwalk_scan_opcodes, name='binwalk_scan_opcodes'),
# 抽取文件
path('extract', views.binwalk_file_extract, name='binwalk_file_extract'),
path('extractEx', views.binwalk_file_extractEx, name='binwalk_file_extract'),
# 测试 binwalk
path('test_binwalk', views.binwalk_file_test, name='binwalk_file_test'),
# 转换成中间代码
path('convert_code', views.angr_convert_code, name='angr_convert_code'),
# 转换成汇编代码
# path('/convert_asm', views.angr_convert_asm, name='angr_convert_asm'),
# 函数识别
path('recognize_func', views.angr_recognize_func, name='angr_recognize_func'),
#
# 函数及状态机基本分析:同步调用接口
#
# 状态机信息
path('state', state_views.entry_state_info, name='entry_state_info'),
# 函数列表
path('functions', old_func_views.fw_functions_list, name='fw_functions_list'),
# 函数后继调用
path('functions/successors', old_func_views.func_successors, name='func_successors'),
# 指定函数的汇编代码
path('functions/asm', old_func_views.func_asm, name='func_asm'),
# 指定函数的中间代码
path('functions/vex', old_func_views.func_vex, name='func_vex'),
#
# 函数分析:异步调用接口
#
# 异步获取函数列表
path('async_funcs/list', old_func_async_views.async_fw_functions_list, name='async_fw_functions_list'),
# 异步获取函数信息,包含诸如:汇编代码、中间代码、后继调用等
path('async_funcs/func_info', old_func_async_views.async_function_info, name='async_function_info'),
# 异步绘制函数调用关系图
path('async_funcs/call_graph', old_func_async_views.async_function_call_graph, name='async_function_call_graph'),
]
| null |
fw_analyze/urls.py
|
urls.py
|
py
| 6,650 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "my_views.analyze_cfg_view.analyze_cfg",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "my_views.analyze_cfg_view",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "my_views.task_view.get_task_result",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "my_views.task_view",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "my_views.task_view.get_all_task_result",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "my_views.task_view",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "my_views.task_view.query_component",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "my_views.task_view",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "my_views.task_view.stop_task",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "my_views.task_view",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "my_views.task_view.search_tasks_by_pack",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "my_views.task_view",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "my_views.task_view.search_tasks_by_file",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "my_views.task_view",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "my_views.parse_cfg_view.cfg_func_list",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "my_views.parse_cfg_view",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "my_views.parse_cfg_view.vulner_func_list",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "my_views.parse_cfg_view",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "my_views.parse_cfg_view.taint_func_list",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "my_views.parse_cfg_view",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "my_views.parse_cfg_view.call_graph_a",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "my_views.parse_cfg_view",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "my_views.parse_cfg_view.control_flow_graph",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "my_views.parse_cfg_view",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "my_views.parse_cfg_view.control_dependence_graph",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "my_views.parse_cfg_view",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "my_views.parse_cfg_view.function_info",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "my_views.parse_cfg_view",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "my_views.parse_cfg_view.function_props",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "my_views.parse_cfg_view",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "my_views.pack_view.all_packs_info",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "my_views.pack_view",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "my_views.pack_view.pack_info",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "my_views.pack_view",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "my_views.pack_view.pack_edit",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "my_views.pack_view",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "my_views.pack_view.pack_delete",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "my_views.pack_view",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "my_views.pack_view.pack_exec_files_tree",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "my_views.pack_view",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "my_views.pack_view.com_files_tree",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "my_views.pack_view",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "my_views.pack_view.com_sourcecode_files_tree",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "my_views.pack_view",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "my_views.pack_view.com_files_list",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "my_views.pack_view",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "my_views.variable_views.analyze_extract_vars",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "my_views.variable_views",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "my_views.pack_view.auto_vuler_association",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "my_views.pack_view",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "my_views.parse_cfg_view.detect_vulner",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "my_views.parse_cfg_view",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "my_views.taint_view.taintadd",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "my_views.taint_view",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "my_views.taint_view.taintdel",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "my_views.taint_view",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "my_views.taint_view.taintmodify",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "my_views.taint_view",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "my_views.taint_view.list",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "my_views.taint_view",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "my_views.analyze_cfg_view.analyze_cfg_auto",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "my_views.analyze_cfg_view",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "my_views.state_views.entry_state_info",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "my_views.state_views",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "my_views.old_func_views.fw_functions_list",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "my_views.old_func_views",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "my_views.old_func_views.func_successors",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "my_views.old_func_views",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "my_views.old_func_views.func_asm",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "my_views.old_func_views",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "my_views.old_func_views.func_vex",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "my_views.old_func_views",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "my_views.old_func_async_views.async_fw_functions_list",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "my_views.old_func_async_views",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "my_views.old_func_async_views.async_function_info",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "my_views.old_func_async_views",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "my_views.old_func_async_views.async_function_call_graph",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "my_views.old_func_async_views",
"line_number": 171,
"usage_type": "name"
}
] |
57798440
|
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import os
from tqdm import tqdm
import torch
import cv2
import numpy as np
class VideoDataset(Dataset):
def __init__(self, data_dir, split):
print('Initialize VideoDataset...')
self.data_dir = os.path.join(data_dir, split)
self.ano_video_paths, self.nor_video_paths = self.__get_video_path_list()
self.ano_num = len(self.ano_video_paths)
self.nor_num = len(self.nor_video_paths)
self.get_anomaly = True
print('Initialization Done.')
def __len__(self):
return self.ano_num + self.nor_num
def __getitem__(self, index):
if self.get_anomaly:
idx = np.random.randint(self.ano_num)
video_path = self.ano_video_paths[idx]
else:
idx = np.random.randint(self.nor_num)
video_path = self.nor_video_paths[idx]
label = 1 if self.get_anomaly else 0
self.get_anomaly = not self.get_anomaly
features = torch.load(video_path)
return features, torch.tensor(label)
def __get_video_path_list(self):
video_paths = []
for classname in os.listdir(self.data_dir):
classdir = os.path.join(self.data_dir, classname)
for videoname in os.listdir(classdir):
video_paths.append(os.path.join(classdir, videoname))
ano_video_paths = []
nor_video_paths = []
for pth in video_paths:
if 'Normal' in pth:
nor_video_paths.append(pth)
else:
ano_video_paths.append(pth)
return ano_video_paths, nor_video_paths
if __name__ == '__main__':
split = 'train'
feat_dir = '/home/yangzehua/RADetection/features'
dataset = VideoDataset(data_dir=feat_dir, split=split)
video_loader = DataLoader(dataset=dataset, batch_size=60, num_workers=1, shuffle=False)
for segs, label in tqdm(video_loader):
print(segs.size(), label)
| null |
SpatialAttention/VideoDataset.py
|
VideoDataset.py
|
py
| 2,061 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.utils.data.Dataset",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 56,
"usage_type": "call"
}
] |
316601380
|
import json
from datetime import datetime
from django.http import HttpResponse
from oauth2_provider.decorators import protected_resource
from oauth2_provider.models import get_access_token_model
from .util import get_member_data
from core import librato
from user.models import UserOauthActiveLog
AccessToken = get_access_token_model()
@protected_resource(scopes=['read'])
def membership_data(request):
librato.increment('sherpa.api.oauth.membership.request')
try:
token = request.META['HTTP_AUTHORIZATION'].replace('Bearer ', '')
at = AccessToken.objects.select_related('application').get(token=token)
client_id = at.application.id
client_name = at.application.name
request.user.last_active_oauth_date = datetime.now()
request.user.save()
log, created = UserOauthActiveLog.objects.get_or_create(
user=request.user,
oauth_client_id=client_id,
defaults={
'last_data_date': request.user.last_active_oauth_date,
})
if not created:
log.last_data_date = request.user.last_active_oauth_date
log.save()
except Exception as e:
pass
return HttpResponse(json.dumps(get_member_data(request.user)))
@protected_resource(scopes=['read'])
def membership_household_data(request):
try:
token = request.META['HTTP_AUTHORIZATION'].replace('Bearer ', '')
at = AccessToken.objects.select_related('application').get(token=token)
client_id = at.application.id
client_name = at.application.name
request.user.last_active_oauth_date = datetime.now()
request.user.save()
log, created = UserOauthActiveLog.get_or_create(
user=request.user,
oauth_client_id=client_id,
defaults={
'last_data_date': request.user.last_active_oauth_date,
})
if not created:
log.last_data_date = request.user.last_active_oauth_date
log.save()
except:
pass
if not request.user.is_member():
# Not a member
user_data = {
'hovedmedlem': get_member_data(request.user),
'husstandsmedlemmer': [],
}
elif not request.user.is_related_member():
# A main member
user_data = {
'hovedmedlem': get_member_data(request.user),
'husstandsmedlemmer': [get_member_data(u) for u in request.user.get_children()],
}
else:
# A household member
if request.user.get_parent() is not None:
user_data = {
'hovedmedlem': get_member_data(request.user.get_parent()),
'husstandsmedlemmer': [get_member_data(u) for u in request.user.get_parent().get_children()],
}
else:
# A household member without a parent, send it as such
user_data = {
'hovedmedlem': None,
'husstandsmedlemmer': [get_member_data(request.user)],
}
return HttpResponse(json.dumps(user_data))
| null |
apps/api/resources_oauth.py
|
resources_oauth.py
|
py
| 3,094 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "oauth2_provider.models.get_access_token_model",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "core.librato.increment",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "core.librato",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "user.models.UserOauthActiveLog.objects.get_or_create",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "user.models.UserOauthActiveLog.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "user.models.UserOauthActiveLog",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "util.get_member_data",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "oauth2_provider.decorators.protected_resource",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "user.models.UserOauthActiveLog.get_or_create",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "user.models.UserOauthActiveLog",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "util.get_member_data",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "util.get_member_data",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "util.get_member_data",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "util.get_member_data",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "util.get_member_data",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "util.get_member_data",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "oauth2_provider.decorators.protected_resource",
"line_number": 43,
"usage_type": "call"
}
] |
370528522
|
import numpy as np
import matplotlib.pyplot as plt
print("Select an option")
print("Option 1")
print("Option 2")
print("Option 3")
c=input("Select ")
if c=="1":
x=np.linspace(1,100,100)
y=np.sin(x)
plt.plot(x,y)
plt.show()
else:
print("selecciono otro")
| null |
main_REMOTE_2214.py
|
main_REMOTE_2214.py
|
py
| 267 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.linspace",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
}
] |
274493342
|
import logging
from smartcard.CardType import AnyCardType
from smartcard.CardRequest import CardRequest
from smartcard.sw.SWExceptions import SWException
from smartcard.Exceptions import CardRequestTimeoutException
class SmartCardFuzzer():
# Constants for logging a succes
class Status():
SUCCES = 0
FAILED = 1
PARAM_FAIL = 2
# Constanst for determining the succes from the status values
BAD_INSTRUCTIONS = [0x20, 0x24]
SUCCESS_LIST_RESP = [
0x90, # Success
0x61, # More Data
0x67, # Wrong Length
0x6c, # Wrong Length
0x6a, # Referenced Data not found
]
SUCCESS_BAD_PARAM_RESP = [(0x6a, 0x86)] # Incorrect Paramters
SUCCESS_FAIL_RESP = [(0x6a, 0x81)] # Function not supported
UNSUPPORTED_RESP = [(0x6E, 0x00)] # Class not supported
def __init__(self, timeout=30, log_file='smart_fuzzer.log'):
logging.basicConfig(filename=log_file, level=logging.DEBUG)
self.timeout = timeout
self.cardservice = self.__get_card()
def __get_card(self):
'''
This method will get the first card from the cardreader
Afterwards it will connect to the card and returns it service
returns:
cardservice: The cardservice which has a connection with the card
raises:
A timeout excpetion if no card was found
'''
cardtype = AnyCardType()
cardrequest = CardRequest(timeout=self.timeout, cardType=cardtype)
cardservice = cardrequest.waitforcard()
cardservice.connection.connect()
return cardservice
def __send_apdu(self, _class, instruction, p1, p2):
'''
This will send and logs an apdu command to the card
returns:
response: The response of the command
sw1: The first status value
sw2: The second status value
'''
apdu = [_class, instruction, p1, p2]
logging.info(f'Send: {str(apdu)}')
response, sw1, sw2 = self.cardservice.connection.transmit(apdu)
logging.info(f'Returned: {response} {sw1} {sw2}')
return response, sw1, sw2
def __get_succes(self, sw1, sw2):
'''
A function to determine if we encountered a Succes
args:
sw1: The first status value
sw2: the second status value
returns:
a constant succes
'''
if sw1 in self.SUCCESS_LIST_RESP \
and (sw1, sw2) not in self.SUCCESS_FAIL_RESP \
and (sw1, sw2) not in self.SUCCESS_BAD_PARAM_RESP:
logging.info('Apdu command succes!')
return self.Status.SUCCES
elif (sw1, sw2) in self.SUCCESS_BAD_PARAM_RESP:
logging.info('Got partial succes, bruteforce all the params!')
return self.Status.PARAM_FAIL
else:
logging.info(f'Apdu command failed!')
return self.Status.FAILED
def _class_fuzzer(self):
'''
This will fuzz all the valid classes in the card
yields:
_class: If the response was supported
'''
for _class in range(0xFF + 1):
# Set as default failure, in case of exception
sw1, sw2 = self.UNSUPPORTED_RESP[0]
try:
response, sw1, sw2 = self.__send_apdu(_class, 0x00, 0x00, 0x00)
except SWException as e:
logging.info(f'Got SWException {e}')
except Exception as e:
logging.warning(f'{e}\nSomething went horribly wrong!')
# If it is supported we call it a succes!
if (sw1, sw2) not in self.UNSUPPORTED_RESP:
yield _class
def _instruction_fuzzer(self, _class):
'''
This will fuzz all the valid instruction in the card
args:
_class: A valid class instruction
yields:
A succesful apdu instruction
(_class, instuction, param1, param2)
'''
for instruction in range(0xFF + 1):
if instruction in self.BAD_INSTRUCTIONS:
# We don't want to lock up the card ;)
continue
respsonse, sw1, sw2 = self.__send_apdu(_class, instruction, 0x00, 0x00)
succes = self.__get_succes(sw1, sw2)
if succes == self.Status.SUCCES:
yield (_class, instruction, 0x00, 0x00)
elif succes == self.Status.PARAM_FAIL:
yield from self.param_fuzzer(_class, instruction)
def _param_fuzzer(self, _class, instruction):
'''
This will fuzz all the possible parameters for an instruction
args:
_class: A valid class instruction
instruction: A valid instruction
yields:
A succesful apdu instruction
(_class, instuction, param1, param2)
'''
for p1 in range(0xff + 1):
for p2 in range(0xff + 1):
response, sw1, sw2 = self.__send_apdu(_class, instruction, p1, p2)
succes = self.__get_succes(sw1, sw2)
if succes == self.Status.SUCCES:
yield (_class, ins, p1, p2)
def fuzz(self):
'''
The main function that will fuzz all possible apdu commands
yields:
A succesfol apdu instruction
(_class, instuction, param1, param2)
'''
for valid_class in self._class_fuzzer():
for valid in self._instruction_fuzzer(valid_class):
yield valid
def main():
smart_fuzzer = SmartCardFuzzer()
for apdu in smart_fuzzer.fuzz():
print(f"Found valid apdu command {apdu}")
if __name__ == '__main__':
main()
| null |
card_fuzzer.py
|
card_fuzzer.py
|
py
| 5,914 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.basicConfig",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "smartcard.CardType.AnyCardType",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "smartcard.CardRequest.CardRequest",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "smartcard.sw.SWExceptions.SWException",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 104,
"usage_type": "call"
}
] |
146135795
|
import SimpleITK as sitk
import numpy as np
import matplotlib.pyplot as plt
from data_loaders_old import Dataset
from models import Modified3DUNet
import os
import torch
from enum import Enum
# Get paths and names (IDS) of folders that store the multimodal training data
data_path = r'C:\Users\artur\Desktop\UCL\Brats2019\Data\MICCAI_BraTS_2019_Data_Training\data'
folder_paths = []
folder_IDS = []
for subdir in os.listdir(data_path):
folder_paths.append(os.path.join(data_path,subdir))
folder_IDS.append(subdir)
train_set = Dataset(folder_paths, folder_IDS)
model = Modified3DUNet(4, 4, 16)
#checkpoint = torch.load("../KFold_Cross_Validation/Fold_1_Epoch_125.tar")
model.load_state_dict(torch.load("pretrained_models/Modified3DUNet_Epoch_200.pt"))
#model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
for idx in [12,55,142,22]:
scans, labels = train_set[idx]
scans = scans.unsqueeze(0)
output, seg_layer = model(scans)
seg_layer = seg_layer.squeeze()
_, indices = seg_layer.max(0)
labels = labels.numpy()
#print(np.unique(labels))
indices = indices.numpy()
#print(np.unique(indices))
overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()
overlap_measures_filter.Execute(sitk.GetImageFromArray(indices), sitk.GetImageFromArray(labels))
dice = overlap_measures_filter.GetDiceCoefficient()
plt.figure()
plt.subplot(1,2,1)
plt.imshow(indices[:,:,50])
plt.title('Model output')
plt.subplot(1,2,2)
plt.imshow(labels[:,:,50])
plt.title('Ground truth')
plt.suptitle("Dice score = " + str(dice))
plt.show()
measures = {}
measures['dice'] = overlap_measures_filter.GetDiceCoefficient()
measures['jaccard'] = overlap_measures_filter.GetJaccardCoefficient()
measures['volume similarity'] = overlap_measures_filter.GetVolumeSimilarity()
measures['false negatives'] = overlap_measures_filter.GetFalseNegativeError()
measures['false positives'] = overlap_measures_filter.GetFalsePositiveError()
for key, item in measures.items():
print(key, "\t", item)
| null |
segmentation_evaluation.py
|
segmentation_evaluation.py
|
py
| 2,104 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.listdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "data_loaders_old.Dataset",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.Modified3DUNet",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "SimpleITK.LabelOverlapMeasuresImageFilter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "SimpleITK.GetImageFromArray",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.suptitle",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
}
] |
423676558
|
#Este script te permite recuperar los tweets almacenados en Redis previamente
#Los tweets almacenados usan el tema "Viva México"
#Para organizar los tweets y mostrarlos en pantalla se accede a Redis usando hashmaps
#El script te permite escribir una palabra clave y recuperar los tweets que contienen dicha palabra
#También permite escribir un nombre de usuario, anteponiendo el "@" y recuperar los tweets de dicho usuario
#Importamos las bibliotecas necesarias
import redis
import tweepy
#Cremos una variable para manejar la conexión con Redis
redis = redis.StrictRedis(host='localhost', port=6379, db=0)
print("Bienvenido!")
palabra = input("Introduce una palabra clave (eje: pozole) o un nombre de usuario (eje: @tim_cook) para ordenar los tweets:")
print("")
def sortTweets(palabra):
#Contador para ver cuántos tweets mencionaron la palabra
numeroOcurrencias = 0
#Si la palabra que escribió el usuario tiene un "@" quiere decir que quiere ver los tweets donde se menciona a un usuario específico (ejemplo: @tim_cook)
#sino, quiere decir que sólo busca una palabra dentro de tweet (ejemplo: pozole)
if ("@" in palabra):
#Iteramos entre las llaves almacenadas en Redis, buscando las que tienen como llave el prefijo "twiterA5_user:"
for key in redis.keys(pattern="twiterA5_user:*"):
#Recuperamos los nombres de usuario para ver si se encuentra el que se busca
#Me regresa una lista de usuarios cuando sólo debería regresarme un nombre. Código es idéntico al siguiente bloque if
#Es un error por corregir
user_names = redis.hget(key, "username").decode("utf-8")
palabra = palabra.replace("@", "")
#Verifica si el usuario se encuentra en los tweets que se almacenaron, es decir, si realizó algún tweet
if (palabra in user_names):
#Incrementamos el contador de número de tweets del usuario
numeroOcurrencias = numeroOcurrencias + 1
#Recuperamos los datos del tweet de Redis
id_key = (key.split(b':')[1]).decode("utf-8")
user_name = redis.hget(key, "username").decode("utf-8")
date = redis.hget(key, "date").decode("utf-8")
rt_count = redis.hget(key, "rtcount").decode("utf-8")
language = redis.hget(key, "language").decode("utf-8")
content = redis.hget(key, "content").decode("utf-8")
#Imprimimos los datos del tweet
print("id: " + id_key)
print("user_name: " + user_name)
print("date: " + date)
print("retweet count: " + rt_count)
print("language: " + language)
print("content: " + content + "\n")
if (numeroOcurrencias == 0):
#No se encontró el usuario deseado
print("No se encontraron tweets para @" + palabra + "\n")
else:
#Se imprime el número de tweets que el usuario realizó
print("Se encontraron " + str(numeroOcurrencias) + " tweets para @" + palabra + "\n")
else:
#Iteramos entre las llaves almacenadas en Redis, buscando las que tienen como llave el prefijo "twiterA5_user:"
for key in redis.keys(pattern="twiterA5_user:*"):
#Recuperamos el contenido del tweet
content = redis.hget(key, "content").decode("utf-8")
#Validamos si el tweet contiene la palabra mencionada
if (palabra in content):
#Incrementamos el contador de número de tweets que mencionan la palabra
numeroOcurrencias = numeroOcurrencias + 1
#Recuperamos los datos del tweet de Redis
id_key = (key.split(b':')[1]).decode("utf-8")
user_name = redis.hget(key, "username").decode("utf-8")
date = redis.hget(key, "date").decode("utf-8")
rt_count = redis.hget(key, "rtcount").decode("utf-8")
language = redis.hget(key, "language").decode("utf-8")
#Imprimimos los datos del tweet
print("id: " + id_key)
print("user_name: " + user_name)
print("date: " + date)
print("retweet count: " + rt_count)
print("language: " + language)
print("content: " + content + "\n")
if (numeroOcurrencias == 0):
#No se encontró la palabra en los tweets
print("No se encontro la palabra \"" + palabra + "\" dentro de los tweets almacenados!\n")
else:
#Imprimimos cuántas veces se mencionó la palabra
print("La palabra \"" + palabra + "\" fue mencionada " + str(numeroOcurrencias) + " veces!\n")
sortTweets(palabra)
| null |
Redis/Actividad5/sort_tweets.py
|
sort_tweets.py
|
py
| 4,818 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "redis.StrictRedis",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "redis.keys",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "redis.hget",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "redis.hget",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "redis.hget",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "redis.hget",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "redis.hget",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "redis.hget",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "redis.keys",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "redis.hget",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "redis.hget",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "redis.hget",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "redis.hget",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "redis.hget",
"line_number": 78,
"usage_type": "call"
}
] |
57233834
|
from datetime import datetime
from BacktestMarket import BacktestMarket
from FronttestMarket import FronttestMarket
import time
class Trader:
def __init__(self, portfolio, logger, log_level):
self.portfolio = portfolio
self.market = None
self.logger = logger
self.log_level = log_level
def trade(self, market, end_date, timeframe):
self.timeframe = timeframe
self.market = market
self.market.setLogger(self.logger, self.log_level)
self.market.setPortfolio(self.portfolio)
last_action = datetime(2000, 1, 1)
self.curr_time = datetime.now()
while datetime.now() <= end_date:
if datetime.now() - last_action >= self.timeframe:
last_action = datetime.now()
self.curr_time = datetime.now()
self.action()
self.postAction()
time.sleep(timeframe.total_seconds() - 1)
self.finalAction()
def backtest(self, start_date, end_date, timeframe):
self.timeframe = timeframe
self.market = BacktestMarket(start_date, end_date, self.timeframe)
self.market.setTransactionFees(0.0001)
self.market.setLogger(self.logger, self.log_level)
self.market.setPortfolio(self.portfolio)
self.curr_time = start_date
while self.curr_time < end_date:
self.market.setDate(self.curr_time)
self.action()
self.postAction()
self.curr_time = self.curr_time + self.timeframe
self.finalAction()
def fronttest(self, apiKey, privKey, end_date, timeframe):
self.timeframe = timeframe
self.market = FronttestMarket(self.timeframe, apiKey, privKey)
self.market.setTransactionFees(0.0001)
self.market.setLogger(self.logger, self.log_level)
self.market.setPortfolio(self.portfolio)
last_action = datetime(2000, 1, 1)
self.curr_time = datetime.now()
while datetime.now() <= end_date:
if datetime.now() - last_action >= self.timeframe:
last_action = datetime.now()
self.curr_time = datetime.now()
self.action()
self.postAction()
time.sleep(timeframe.total_seconds() - 1)
self.finalAction()
def action(self):
pass
def postAction(self):
pass
def finalAction(self):
self.logger.log('Done.', self.log_level)
self.logger.log(str(self.portfolio), self.log_level)
| null |
src/Trader.py
|
Trader.py
|
py
| 2,321 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "BacktestMarket.BacktestMarket",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "FronttestMarket.FronttestMarket",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 72,
"usage_type": "call"
}
] |
580435353
|
#coding=utf-8
from django.core.management.base import BaseCommand, CommandError
from main.models import *
class Command(BaseCommand):
"""Fix data integrity of share_count of Item model """
args = '<item_id item_id ...>'
help = 'Fix data integrity'
def handle(self, *args, **options):
self.stdout.write('Start fixing\n')
fixed_count = [0]
if len(args) == 0: #Fix for all items
items = Item.objects.all()
for item in items:
one_fixed_count = fix_for_one_item(item.pk)
array = [fixed_count, one_fixed_count]
fixed_count = [sum(a) for a in zip(*array)]
else:
for item_id in args:
one_fixed_count = fix_for_one_item(item_id)
array = [fixed_count, one_fixed_count]
fixed_count = [sum(a) for a in zip(*array)]
self.stdout.write('Finished fixing. Fixed ' + str(fixed_count[0]) + ' share_count\n')
def fix_for_one_item(item_id):
one_fixed_count = [0]
item = Item.objects.get(pk=item_id)
share_count = User_Item.objects.filter(item__id=item_id).count()
if item.share_count != share_count:
item.share_count = share_count
item.save()
one_fixed_count[0] += 1
return one_fixed_count
| null |
main/management/commands/fix_item_data.py
|
fix_item_data.py
|
py
| 1,365 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 5,
"usage_type": "name"
}
] |
47352914
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 21 17:29:04 2018
@author: Ashlin
"""
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
import matplotlib.pyplot as plt
import numpy as np
mydata=pd.read_csv("Position_Salaries.csv")
X=mydata.iloc[:,1:2]
y=mydata.iloc[:,-1]
regressor=RandomForestRegressor(max_features='sqrt',n_estimators=300,criterion='mse',random_state=0)
regressor.fit(X,y)
plt.title("Regression")
plt.xlabel("X")
plt.ylabel("Predicted Value")
X_grid=np.arange(min(X.values),max(X.values),0.01)
X_grid=X_grid.reshape(len(X_grid),1)
plt.scatter(X,y,color='blue',label="Actual")
plt.plot(X_grid,regressor.predict(X_grid),color='red',label="RFR")
plt.legend()
plt.show()
prediction=regressor.predict(6.5)
print("The predicted value for the Salary is %0.4f" % (prediction))
| null |
Random Forest Regression.py
|
Random Forest Regression.py
|
py
| 840 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestRegressor",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
}
] |
620129715
|
from copy import error
from django.http.response import HttpResponse
from django.shortcuts import render
from django.http import HttpResponse, request
from rest_framework import serializers, viewsets
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import CardSerializer, CreateCardSerializer, UpdateCardSerializer, TagSerializer
from .models import Card, Tag
from django.utils import timezone
# Create your views here.
class CardView(viewsets.ModelViewSet):
serializer_class = CardSerializer
queryset = Card.objects.all()
class CreateCardView(APIView):
serializer_class = CreateCardSerializer
def post(self, request, format=None):
updated_request = request.POST.copy()
tag = Tag.objects.get_or_create(name=updated_request['tags'])
updated_request['tags'] = tag[0].id
serializer = CreateCardSerializer(data=updated_request)
if serializer.is_valid():
instance = Tag.objects.get(id=serializer.data['tags'])
new_card = Card.objects.create(texto=serializer.data['texto'], tags=instance)
new_card.save()
new_serializer = CreateCardSerializer(new_card)
return Response(new_serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UpdateDeleteCardView(APIView):
serializer_class = CreateCardSerializer
def put(self, request, pk, format=None):
card_object = Card.objects.get(pk=pk)
req_copy = request.POST.copy()
tag_object = Tag.objects.get_or_create(name=req_copy['tags'])
req_copy['tags'] = tag_object[0].id
serializer = UpdateCardSerializer(card_object, data=req_copy)
if serializer.is_valid():
data_modificacao = timezone.now()
instance = Tag.objects.get(id=tag_object[0].id)
serializer.validated_data['texto'] = request.data['texto']
serializer.validated_data['data_modificacao'] = data_modificacao
serializer.validated_data['tags'] = instance
serializer.save()
res = serializer.data
return Response(data=res, status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
card_object = Card.objects.get(pk=pk)
card_object.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class TagView(APIView):
serializer_class = TagSerializer
def get(self, request, format=None):
queryset = Tag.objects.all()
serializer = TagSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = TagSerializer(data=request.data)
if serializer.is_valid():
try:
tag_exists = Tag.objects.get(name=serializer.data['name'])
res = {
'id': tag_exists.id,
'name': tag_exists.name
}
return Response(res, status=status.HTTP_200_OK)
except Tag.DoesNotExist:
new_tag = Tag.objects.create(name=serializer.data['name'])
new_tag.save()
serialized = TagSerializer(new_tag)
return Response(serialized.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self, request, pk, format=None):
tag = Tag.objects.get(pk=pk)
serializer = TagSerializer(data=request.data)
if serializer.is_valid():
tag.name = serializer.validated_data['name']
tag.save()
res = {
'id': tag.id,
'name': tag.name
}
return Response(data=res, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
tag_object = Tag.objects.get(pk=pk)
tag_object.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| null |
back_end_globo/card/views.py
|
views.py
|
py
| 4,347 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "serializers.CardSerializer",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "models.Card.objects.all",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.Card.objects",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "models.Card",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "serializers.CreateCardSerializer",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.http.request.POST.copy",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.http.request.POST",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "django.http.request",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "models.Tag.objects.get_or_create",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "models.Tag.objects",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "models.Tag",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "serializers.CreateCardSerializer",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "models.Tag.objects.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "models.Tag.objects",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "models.Tag",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "models.Card.objects.create",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "models.Card.objects",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "models.Card",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "serializers.CreateCardSerializer",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "serializers.CreateCardSerializer",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "models.Card.objects.get",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "models.Card.objects",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "models.Card",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.http.request.POST.copy",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.http.request.POST",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "django.http.request",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "models.Tag.objects.get_or_create",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "models.Tag.objects",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "models.Tag",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "serializers.UpdateCardSerializer",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "models.Tag.objects.get",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "models.Tag.objects",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "models.Tag",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "django.http.request.data",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "django.http.request",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_202_ACCEPTED",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "models.Card.objects.get",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "models.Card.objects",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "models.Card",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_204_NO_CONTENT",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "serializers.TagSerializer",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "models.Tag.objects.all",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "models.Tag.objects",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "models.Tag",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "serializers.TagSerializer",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "serializers.TagSerializer",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "django.http.request.data",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "django.http.request",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "models.Tag.objects.get",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "models.Tag.objects",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "models.Tag",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "models.Tag.DoesNotExist",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "models.Tag",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "models.Tag.objects.create",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "models.Tag.objects",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "models.Tag",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "serializers.TagSerializer",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "models.Tag.objects.get",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "models.Tag.objects",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "models.Tag",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "serializers.TagSerializer",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "django.http.request.data",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "django.http.request",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "models.Tag.objects.get",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "models.Tag.objects",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "models.Tag",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_204_NO_CONTENT",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 135,
"usage_type": "name"
}
] |
274772895
|
from gensim.models import Word2Vec
from gensim.test.utils import common_texts
from gensim.models import Phrases
import pandas as pd
import re
pd_title = pd.read_excel('./excel_sample/마버리타이틀.xlsx')
raw_title_list = pd_title.bd_title_ko.tolist()
raw_title_list
def refined_special_symbol(str_list):
# 특수문자 제끼기
return [re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', obj) for obj in str_list]
title_list = refined_special_symbol(raw_title_list)
corpus_list = [obj.split() for obj in title_list]
bigram_transformer = Phrases(corpus_list)
model = Word2Vec(bigram_transformer[corpus_list], size=2, window=1, min_count=0, workers=4, negative=5)
# size 가운데 동그라미 히든 3개
#
# model.save('./wordvec.model')
# model.hashfxn()
model.most_similar('바삭한')
model.wv.most_similar(positive=['페스티벌', '거리'], negative=['페스티벌'])
model.wv['해리포터']
model.wv.syn0.shape #weight
model.get_weights()
# word_vectors = model.wv
# word_vectors
# model = Word2Vec(bigram_transformer[common_texts], min_count=1)
| null |
gensim.py
|
gensim.py
|
py
| 1,098 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_excel",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "gensim.models.Phrases",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "gensim.models.Word2Vec",
"line_number": 20,
"usage_type": "call"
}
] |
594396117
|
import asyncio
import json
import logging
from abc import abstractmethod
from threading import Thread
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.agent.types import BaseAgent
from llama_index.callbacks.base import CallbackManager
from llama_index.chat_engine.types import (
AGENT_CHAT_RESPONSE_TYPE,
AgentChatResponse,
ChatResponseMode,
StreamingAgentChatResponse,
)
from llama_index.indices.base_retriever import BaseRetriever
from llama_index.llms.base import LLM, ChatMessage, ChatResponse, MessageRole
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai_utils import is_function_calling_model
from llama_index.memory import BaseMemory, ChatMemoryBuffer
from llama_index.schema import BaseNode, NodeWithScore
from llama_index.tools import BaseTool, ToolOutput, adapt_to_async_tool
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
DEFAULT_MAX_FUNCTION_CALLS = 5
DEFAULT_MODEL_NAME = "gpt-3.5-turbo-0613"
def get_function_by_name(tools: List[BaseTool], name: str) -> BaseTool:
"""Get function by name."""
name_to_tool = {tool.metadata.name: tool for tool in tools}
if name not in name_to_tool:
raise ValueError(f"Tool with name {name} not found")
return name_to_tool[name]
def call_function(
tools: List[BaseTool], function_call: dict, verbose: bool = False
) -> Tuple[ChatMessage, ToolOutput]:
"""Call a function and return the output as a string."""
name = function_call["name"]
arguments_str = function_call["arguments"]
if verbose:
print("=== Calling Function ===")
print(f"Calling function: {name} with args: {arguments_str}")
tool = get_function_by_name(tools, name)
argument_dict = json.loads(arguments_str)
output = tool(**argument_dict)
if verbose:
print(f"Got output: {str(output)}")
print("========================")
return (
ChatMessage(
content=str(output),
role=MessageRole.FUNCTION,
additional_kwargs={
"name": function_call["name"],
},
),
output,
)
async def acall_function(
tools: List[BaseTool], function_call: dict, verbose: bool = False
) -> Tuple[ChatMessage, ToolOutput]:
"""Call a function and return the output as a string."""
name = function_call["name"]
arguments_str = function_call["arguments"]
if verbose:
print("=== Calling Function ===")
print(f"Calling function: {name} with args: {arguments_str}")
tool = get_function_by_name(tools, name)
async_tool = adapt_to_async_tool(tool)
argument_dict = json.loads(arguments_str)
output = await async_tool.acall(**argument_dict)
if verbose:
print(f"Got output: {str(output)}")
print("========================")
return (
ChatMessage(
content=str(output),
role=MessageRole.FUNCTION,
additional_kwargs={
"name": function_call["name"],
},
),
output,
)
def resolve_function_call(function_call: Union[str, dict] = "auto") -> Union[str, dict]:
"""Resolve function call.
If function_call is a function name string, return a dict with the name.
"""
if isinstance(function_call, str) and function_call not in ["none", "auto"]:
return {"name": function_call}
return function_call
class BaseOpenAIAgent(BaseAgent):
def __init__(
self,
llm: OpenAI,
memory: BaseMemory,
prefix_messages: List[ChatMessage],
verbose: bool,
max_function_calls: int,
callback_manager: Optional[CallbackManager],
):
self._llm = llm
self._verbose = verbose
self._max_function_calls = max_function_calls
self.prefix_messages = prefix_messages
self.memory = memory
self.callback_manager = callback_manager or CallbackManager([])
self.sources: List[ToolOutput] = []
@property
def chat_history(self) -> List[ChatMessage]:
return self.memory.get_all()
@property
def all_messages(self) -> List[ChatMessage]:
return self.prefix_messages + self.memory.get()
@property
def latest_function_call(self) -> Optional[dict]:
return self.memory.get_all()[-1].additional_kwargs.get("function_call", None)
def reset(self) -> None:
self.memory.reset()
@abstractmethod
def _get_tools(self, message: str) -> List[BaseTool]:
"""Get tools."""
pass
def _should_continue(
self, function_call: Optional[dict], n_function_calls: int
) -> bool:
if n_function_calls > self._max_function_calls:
return False
if not function_call:
return False
return True
def init_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> Tuple[List[BaseTool], List[dict]]:
if chat_history is not None:
self.memory.set(chat_history)
self.sources = []
self.memory.put(ChatMessage(content=message, role=MessageRole.USER))
tools = self._get_tools(message)
functions = [tool.metadata.to_openai_function() for tool in tools]
return tools, functions
def _process_message(self, chat_response: ChatResponse) -> AgentChatResponse:
ai_message = chat_response.message
self.memory.put(ai_message)
return AgentChatResponse(response=str(ai_message.content), sources=self.sources)
def _get_stream_ai_response(
self, **llm_chat_kwargs: Any
) -> StreamingAgentChatResponse:
chat_stream_response = StreamingAgentChatResponse(
chat_stream=self._llm.stream_chat(**llm_chat_kwargs),
sources=self.sources,
)
# Get the response in a separate thread so we can yield the response
thread = Thread(
target=chat_stream_response.write_response_to_history,
args=(self.memory,),
)
thread.start()
# Wait for the event to be set
chat_stream_response._is_function_not_none_thread_event.wait()
# If it is executing an openAI function, wait for the thread to finish
if chat_stream_response._is_function:
thread.join()
# if it's false, return the answer (to stream)
return chat_stream_response
async def _get_async_stream_ai_response(
self, **llm_chat_kwargs: Any
) -> StreamingAgentChatResponse:
chat_stream_response = StreamingAgentChatResponse(
achat_stream=await self._llm.astream_chat(**llm_chat_kwargs),
sources=self.sources,
)
# create task to write chat response to history
asyncio.create_task(
chat_stream_response.awrite_response_to_history(self.memory)
)
# wait until openAI functions stop executing
await chat_stream_response._is_function_false_event.wait()
# return response stream
return chat_stream_response
def _call_function(self, tools: List[BaseTool], function_call: dict) -> None:
function_message, tool_output = call_function(
tools, function_call, verbose=self._verbose
)
self.sources.append(tool_output)
self.memory.put(function_message)
async def _acall_function(self, tools: List[BaseTool], function_call: dict) -> None:
function_message, tool_output = await acall_function(
tools, function_call, verbose=self._verbose
)
self.sources.append(tool_output)
self.memory.put(function_message)
def _get_llm_chat_kwargs(
self, functions: List[dict], function_call: Union[str, dict] = "auto"
) -> Dict[str, Any]:
llm_chat_kwargs: dict = dict(messages=self.all_messages)
if functions:
llm_chat_kwargs.update(
functions=functions, function_call=resolve_function_call(function_call)
)
return llm_chat_kwargs
def _get_agent_response(
self, mode: ChatResponseMode, **llm_chat_kwargs: Any
) -> AGENT_CHAT_RESPONSE_TYPE:
if mode == ChatResponseMode.WAIT:
chat_response: ChatResponse = self._llm.chat(**llm_chat_kwargs)
return self._process_message(chat_response)
elif mode == ChatResponseMode.STREAM:
return self._get_stream_ai_response(**llm_chat_kwargs)
else:
raise NotImplementedError
async def _get_async_agent_response(
self, mode: ChatResponseMode, **llm_chat_kwargs: Any
) -> AGENT_CHAT_RESPONSE_TYPE:
if mode == ChatResponseMode.WAIT:
chat_response: ChatResponse = await self._llm.achat(**llm_chat_kwargs)
return self._process_message(chat_response)
elif mode == ChatResponseMode.STREAM:
return await self._get_async_stream_ai_response(**llm_chat_kwargs)
else:
raise NotImplementedError
def _chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
function_call: Union[str, dict] = "auto",
mode: ChatResponseMode = ChatResponseMode.WAIT,
) -> AGENT_CHAT_RESPONSE_TYPE:
tools, functions = self.init_chat(message, chat_history)
n_function_calls = 0
# Loop until no more function calls or max_function_calls is reached
while True:
llm_chat_kwargs = self._get_llm_chat_kwargs(functions, function_call)
agent_chat_response = self._get_agent_response(mode=mode, **llm_chat_kwargs)
if not self._should_continue(self.latest_function_call, n_function_calls):
logger.debug("Break: should continue False")
break
assert isinstance(self.latest_function_call, dict)
self._call_function(tools, self.latest_function_call)
n_function_calls += 1
return agent_chat_response
async def _achat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
function_call: Union[str, dict] = "auto",
mode: ChatResponseMode = ChatResponseMode.WAIT,
) -> AGENT_CHAT_RESPONSE_TYPE:
tools, functions = self.init_chat(message, chat_history)
n_function_calls = 0
# Loop until no more function calls or max_function_calls is reached
while True:
llm_chat_kwargs = self._get_llm_chat_kwargs(functions, function_call)
agent_chat_response = await self._get_async_agent_response(
mode=mode, **llm_chat_kwargs
)
if not self._should_continue(self.latest_function_call, n_function_calls):
break
assert isinstance(self.latest_function_call, dict)
await self._acall_function(tools, self.latest_function_call)
n_function_calls += 1
return agent_chat_response
def chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
function_call: Union[str, dict] = "auto",
) -> AgentChatResponse:
chat_response = self._chat(
message, chat_history, function_call, mode=ChatResponseMode.WAIT
)
assert isinstance(chat_response, AgentChatResponse)
return chat_response
async def achat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
function_call: Union[str, dict] = "auto",
) -> AgentChatResponse:
chat_response = await self._achat(
message, chat_history, function_call, mode=ChatResponseMode.WAIT
)
assert isinstance(chat_response, AgentChatResponse)
return chat_response
def stream_chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
function_call: Union[str, dict] = "auto",
) -> StreamingAgentChatResponse:
chat_response = self._chat(
message, chat_history, function_call, mode=ChatResponseMode.STREAM
)
assert isinstance(chat_response, StreamingAgentChatResponse)
return chat_response
async def astream_chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
function_call: Union[str, dict] = "auto",
) -> StreamingAgentChatResponse:
chat_response = await self._achat(
message, chat_history, function_call, mode=ChatResponseMode.STREAM
)
assert isinstance(chat_response, StreamingAgentChatResponse)
return chat_response
class OpenAIAgent(BaseOpenAIAgent):
def __init__(
self,
tools: List[BaseTool],
llm: OpenAI,
memory: BaseMemory,
prefix_messages: List[ChatMessage],
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
) -> None:
super().__init__(
llm=llm,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
)
self._tools = tools
@classmethod
def from_tools(
cls,
tools: Optional[List[BaseTool]] = None,
llm: Optional[LLM] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
**kwargs: Any,
) -> "OpenAIAgent":
tools = tools or []
chat_history = chat_history or []
llm = llm or OpenAI(model=DEFAULT_MODEL_NAME)
if not isinstance(llm, OpenAI):
raise ValueError("llm must be a OpenAI instance")
memory = memory or memory_cls.from_defaults(chat_history, llm=llm)
if not is_function_calling_model(llm.model):
raise ValueError(
f"Model name {llm.model} does not support function calling API. "
)
if system_prompt is not None:
if prefix_messages is not None:
raise ValueError(
"Cannot specify both system_prompt and prefix_messages"
)
prefix_messages = [ChatMessage(content=system_prompt, role="system")]
prefix_messages = prefix_messages or []
return cls(
tools=tools,
llm=llm,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
)
def _get_tools(self, message: str) -> List[BaseTool]:
"""Get tools."""
return self._tools
class RetrieverOpenAIAgent(BaseOpenAIAgent):
"""Retriever OpenAI Agent.
This agent specifically performs retrieval on top of functions
during query-time.
NOTE: this is a beta feature, function interfaces might change.
NOTE: this is also a too generally named, a better name is
FunctionRetrieverOpenAIAgent
TODO: add a native OpenAI Tool Index.
"""
def __init__(
self,
retriever: BaseRetriever,
node_to_tool_fn: Callable[[BaseNode], BaseTool],
llm: OpenAI,
memory: BaseMemory,
prefix_messages: List[ChatMessage],
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
) -> None:
super().__init__(
llm=llm,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
)
self._retriever = retriever
self._node_to_tool_fn = node_to_tool_fn
@classmethod
def from_retriever(
cls,
retriever: BaseRetriever,
node_to_tool_fn: Callable[[BaseNode], BaseTool],
llm: Optional[OpenAI] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
) -> "RetrieverOpenAIAgent":
chat_history = chat_history or []
llm = llm or OpenAI(model=DEFAULT_MODEL_NAME)
if not isinstance(llm, OpenAI):
raise ValueError("llm must be a OpenAI instance")
memory = memory or memory_cls.from_defaults(chat_history, llm=llm)
if not is_function_calling_model(llm.model):
raise ValueError(
f"Model name {llm.model} does not support function calling API. "
)
if system_prompt is not None:
if prefix_messages is not None:
raise ValueError(
"Cannot specify both system_prompt and prefix_messages"
)
prefix_messages = [ChatMessage(content=system_prompt, role="system")]
prefix_messages = prefix_messages or []
return cls(
retriever=retriever,
node_to_tool_fn=node_to_tool_fn,
llm=llm,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
)
def _get_tools(self, message: str) -> List[BaseTool]:
retrieved_nodes_w_scores: List[NodeWithScore] = self._retriever.retrieve(
message
)
retrieved_nodes = [node.node for node in retrieved_nodes_w_scores]
retrieved_tools: List[BaseTool] = [
self._node_to_tool_fn(n) for n in retrieved_nodes
]
return retrieved_tools
| null |
llama_index/agent/openai_agent.py
|
openai_agent.py
|
py
| 18,383 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "llama_index.llms.base.MessageRole.FUNCTION",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "llama_index.llms.base.MessageRole",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.ToolOutput",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.adapt_to_async_tool",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "llama_index.llms.base.MessageRole.FUNCTION",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "llama_index.llms.base.MessageRole",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.ToolOutput",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "llama_index.agent.types.BaseAgent",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.openai.OpenAI",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "llama_index.memory.BaseMemory",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "llama_index.callbacks.base.CallbackManager",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "llama_index.callbacks.base.CallbackManager",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.ToolOutput",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "llama_index.llms.base.MessageRole.USER",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "llama_index.llms.base.MessageRole",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatResponse",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.AgentChatResponse",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "llama_index.chat_engine.types.AgentChatResponse",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.StreamingAgentChatResponse",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "llama_index.chat_engine.types.StreamingAgentChatResponse",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.StreamingAgentChatResponse",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "asyncio.create_task",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "llama_index.chat_engine.types.StreamingAgentChatResponse",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode.WAIT",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatResponse",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode.STREAM",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.AGENT_CHAT_RESPONSE_TYPE",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode.WAIT",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatResponse",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode.STREAM",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.AGENT_CHAT_RESPONSE_TYPE",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode.WAIT",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "llama_index.chat_engine.types.AGENT_CHAT_RESPONSE_TYPE",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 277,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 277,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 277,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode.WAIT",
"line_number": 279,
"usage_type": "attribute"
},
{
"api_name": "llama_index.chat_engine.types.AGENT_CHAT_RESPONSE_TYPE",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode.WAIT",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.AgentChatResponse",
"line_number": 307,
"usage_type": "argument"
},
{
"api_name": "llama_index.chat_engine.types.AgentChatResponse",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 314,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode.WAIT",
"line_number": 317,
"usage_type": "attribute"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode",
"line_number": 317,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.AgentChatResponse",
"line_number": 319,
"usage_type": "argument"
},
{
"api_name": "llama_index.chat_engine.types.AgentChatResponse",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 325,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 325,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 325,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode.STREAM",
"line_number": 329,
"usage_type": "attribute"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode",
"line_number": 329,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.StreamingAgentChatResponse",
"line_number": 331,
"usage_type": "argument"
},
{
"api_name": "llama_index.chat_engine.types.StreamingAgentChatResponse",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 338,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode.STREAM",
"line_number": 341,
"usage_type": "attribute"
},
{
"api_name": "llama_index.chat_engine.types.ChatResponseMode",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "llama_index.chat_engine.types.StreamingAgentChatResponse",
"line_number": 343,
"usage_type": "argument"
},
{
"api_name": "llama_index.chat_engine.types.StreamingAgentChatResponse",
"line_number": 339,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.openai.OpenAI",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "llama_index.memory.BaseMemory",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 353,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 353,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 356,
"usage_type": "name"
},
{
"api_name": "llama_index.callbacks.base.CallbackManager",
"line_number": 356,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 372,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.LLM",
"line_number": 372,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "llama_index.memory.BaseMemory",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "llama_index.memory.BaseMemory",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "llama_index.callbacks.base.CallbackManager",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 379,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 381,
"usage_type": "name"
},
{
"api_name": "llama_index.memory.ChatMemoryBuffer",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.openai.OpenAI",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "llama_index.llms.openai.OpenAI",
"line_number": 386,
"usage_type": "argument"
},
{
"api_name": "llama_index.llms.openai_utils.is_function_calling_model",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "llama_index.indices.base_retriever.BaseRetriever",
"line_number": 435,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 436,
"usage_type": "name"
},
{
"api_name": "llama_index.schema.BaseNode",
"line_number": 436,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 436,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.openai.OpenAI",
"line_number": 437,
"usage_type": "name"
},
{
"api_name": "llama_index.memory.BaseMemory",
"line_number": 438,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 439,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 439,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 442,
"usage_type": "name"
},
{
"api_name": "llama_index.callbacks.base.CallbackManager",
"line_number": 442,
"usage_type": "name"
},
{
"api_name": "llama_index.indices.base_retriever.BaseRetriever",
"line_number": 458,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 459,
"usage_type": "name"
},
{
"api_name": "llama_index.schema.BaseNode",
"line_number": 459,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 459,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 460,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.openai.OpenAI",
"line_number": 460,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 462,
"usage_type": "name"
},
{
"api_name": "llama_index.memory.BaseMemory",
"line_number": 462,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 463,
"usage_type": "name"
},
{
"api_name": "llama_index.memory.BaseMemory",
"line_number": 463,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 466,
"usage_type": "name"
},
{
"api_name": "llama_index.callbacks.base.CallbackManager",
"line_number": 466,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 467,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 468,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 468,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 468,
"usage_type": "name"
},
{
"api_name": "llama_index.memory.ChatMemoryBuffer",
"line_number": 463,
"usage_type": "name"
},
{
"api_name": "llama_index.llms.openai.OpenAI",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "llama_index.llms.openai.OpenAI",
"line_number": 473,
"usage_type": "argument"
},
{
"api_name": "llama_index.llms.openai_utils.is_function_calling_model",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "llama_index.llms.base.ChatMessage",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 503,
"usage_type": "name"
},
{
"api_name": "llama_index.schema.NodeWithScore",
"line_number": 503,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 507,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 507,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 502,
"usage_type": "name"
},
{
"api_name": "llama_index.tools.BaseTool",
"line_number": 502,
"usage_type": "name"
}
] |
623640422
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(User):
avatar = models.ImageField(upload_to='avatar/%Y/%m/%d', null=True, verbose_name='Аватар', default='avatar.jpg')
class Meta:
verbose_name = 'Userprofile'
verbose_name_plural = 'Userprofiles'
def name(self):
return self.first_name + " " + self.last_name
def __str__(self):
return "%d: \n\t%s" % (self.id, self.username)
class Tag(models.Model):
name = models.CharField(max_length=100, verbose_name=u'Тег')
questions = models.ManyToManyField('Question')
class Meta:
verbose_name = 'Tag'
verbose_name_plural = 'Tags'
def __str__(self):
return self.name
# class Like(models.Model):
# author = models.ForeignKey('UserProfile', on_delete=models.CASCADE)
# like_date_time = models.DateTimeField(auto_now_add=True)
# like_target_question = models.ForeignKey('Question', on_delete=models.CASCADE)
# like_target_answer = models.ForeignKey('Answer', on_delete=models.CASCADE)
# unique для 3 полей
# две модели лайков для вопросов и ответов
# две связи к вопросу и к ответу одна из них нулл
class QuestionManager(models.Manager):
@staticmethod
def add_likes(all_questions):
for question in all_questions:
all_likes = LikeQuestion.objects.filter(like_target_question=question)
# question.likes = len(all_likes)
question.likes = 0
for like in all_likes:
question.likes += like.status
return all_questions
@staticmethod
def add_tags(all_questions):
for question in all_questions:
question_tags = list(Tag.objects.filter(questions__id=int(question.id)))
question.tags = question_tags
return all_questions
@staticmethod
def add_numbers_answers(all_questions):
for question in all_questions:
all_answers = Answer.objects.filter(question=question)
question.number_answers = len(all_answers)
return all_questions
# новые вопросы
def recent_questions(self):
all_questions = list(super(QuestionManager, self).get_queryset().order_by('-create_date'))
self.add_likes(all_questions)
self.add_tags(all_questions)
self.add_numbers_answers(all_questions)
return all_questions
# вопросы по тегу
def questions_by_tag(self, tag):
all_questions = Question.objects.filter(tag__name=tag)
self.add_tags(all_questions)
self.add_likes(all_questions)
self.add_numbers_answers(all_questions)
return all_questions
# самые популярные вопросы
def questions_with_high_rating(self):
all_questions = Question.objects.all()
self.add_likes(all_questions)
self.add_tags(all_questions)
self.add_numbers_answers(all_questions)
result = list(all_questions)
result.sort(key=lambda question: question.likes, reverse=True)
return result
# выберет все вопросы и
# добавит к ним теги
def get_all_with_tags(self):
all_questions = list(Question.objects.all())
self.add_tags(all_questions)
self.add_likes(all_questions)
return all_questions
# выберет один вопрос с question_id и
# добавит к нему теги
def get_with_tags(self, question_id):
question = Question.objects.get(id=question_id)
question_tags = list(Tag.objects.filter(questions__id=int(question_id)))
question.tags = question_tags
all_likes = LikeQuestion.objects.filter(like_target_question=question)
question.likes = 0
for like in all_likes:
question.likes += like.status
return question
class Question(models.Model):
title = models.CharField(max_length=200, verbose_name=u'Заголовок вопроса')
text = models.TextField(verbose_name=u'Тело вопроса')
author = models.ForeignKey('UserProfile', on_delete=models.CASCADE, verbose_name=u'Автор')
create_date = models.DateField(auto_now_add=True, verbose_name=u'Дата создания')
objects = QuestionManager()
class Meta:
verbose_name = 'Question'
verbose_name_plural = 'Questions'
def __str__(self):
return "%s: \n\t%s" % (self.title, self.text)
class AnswerManager(models.Manager):
# выберет все ответы на вопрос с question_id и
# добавит к ним лайки
def get_with_likes(self, question_id):
all_answers = Answer.objects.filter(question=int(question_id))
for answer in all_answers:
all_likes = LikeAnswer.objects.filter(like_target_answer=answer)
answer.likes = len(all_likes)
return all_answers
class Answer(models.Model):
text = models.TextField(verbose_name=u'Тело ответа')
is_correct = models.BooleanField(default=False)
question = models.ForeignKey('Question', on_delete=models.CASCADE)
author = models.ForeignKey('UserProfile', on_delete=models.CASCADE)
create_date = models.DateField(auto_now_add=True, verbose_name=u'Дата создания')
objects = AnswerManager()
class Meta:
verbose_name = 'Answer'
verbose_name_plural = 'Answers'
def __str__(self):
return "%d: \n\t%s" % (self.id, self.text)
class LikeQuestion(models.Model):
author = models.ForeignKey('UserProfile', on_delete=models.CASCADE)
like_date_time = models.DateTimeField(auto_now_add=True)
like_target_question = models.ForeignKey('Question', on_delete=models.CASCADE)
status = models.IntegerField(default=0, verbose_name=u'Статус лайк или дизлайк')
class Meta:
unique_together = ('author', 'like_target_question',)
verbose_name = 'Like for question'
verbose_name_plural = 'Like for questions'
def __str__(self):
return "%s: \n\t%d" % (self.author, self.status)
class LikeAnswer(models.Model):
author = models.ForeignKey('UserProfile', on_delete=models.CASCADE)
like_date_time = models.DateTimeField(auto_now_add=True)
like_target_answer = models.ForeignKey('Answer', on_delete=models.CASCADE)
status = models.IntegerField(default=0, verbose_name=u'Статус лайк или дизлайк')
class Meta:
unique_together = ('author', 'like_target_answer',)
verbose_name = 'Like for answer'
verbose_name_plural = 'Like for answers'
def __str__(self):
return "%s: \n\t%d" % (self.author, self.status)
| null |
ask_app/models.py
|
models.py
|
py
| 6,969 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.contrib.auth.models.User",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.models.ImageField",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.db.models.ManyToManyField",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.db.models.Manager",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.DateField",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "django.db.models.Manager",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.DateField",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 183,
"usage_type": "name"
}
] |
165555383
|
import multiprocessing as mp
import csv
import nodes
import setup_state
import steady_state
import plt
def main():
if __name__ == "__main__":
pool = mp.Pool(6)
pool.map(fdrc, range(int(input("Simulation times: "))))
def fdrc(k):
# rd = [10, 80] m, step by 5 m
for rd in range(10, 81, 5):
density = 250
fuzzy = False
tvalue = 0.5
node, base = nodes.generate_node(density, tvalue)
name = "D"+str(density)+"RD"+str(rd)+("FUZZY" if fuzzy else "EC"+str(tvalue))+"R"+str(k+1)
data = list()
count = 1
while len(node) == density:
# setup phase
cch = setup_state.phase1(node)
ch, cm = setup_state.phase2(node, cch, rd)
setup_state.phase3(node, ch, cm, rd)
setup_state.phase4(node, cm)
setup_state.phase5(node, ch, rd, density, fuzzy)
# steady state phase
steady_state.steady(node, base, ch, cm)
sizeavg = 0 if len(ch) == 0 else sum([node[i].size for i in ch])/len(ch)
noptr = sum([i.CHptr == None and i.role != "CH" for i in node])
data.append([str(count), str(sizeavg), str(noptr)])
nodes.outofenergy(node)
for i in range(len(node)):
node[i].reset()
count += 1
print("Simulation time: {} {}".format(k+1, mp.current_process()))
with open(name+".csv", "w", newline="") as csv_file:
fieldnames = ["round", "cluster size avg", "no CH ptr"]
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for line in data:
writer.writerow({fieldnames[0]:line[0], fieldnames[1]:line[1], fieldnames[2]:line[2]})
main()
| null |
main.py
|
main.py
|
py
| 1,816 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "multiprocessing.Pool",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "nodes.generate_node",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "setup_state.phase1",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "setup_state.phase2",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "setup_state.phase3",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "setup_state.phase4",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "setup_state.phase5",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "steady_state.steady",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "nodes.outofenergy",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 49,
"usage_type": "call"
}
] |
14250168
|
import networkx as nx
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
from shapely.geometry.polygon import Polygon
from descartes import PolygonPatch
from matplotlib import animation
from matplotlib.animation import FuncAnimation
from aircraft import Aircraft
import time
class Simulator(object):
def __init__(self, airport_graph, test_ac, obs_acs=[], timestep=1, sim_time=0, interval=0.1, until='goal', num_random_obs=0):
self.test_ac = test_ac
self.obs_acs = obs_acs
self.timestep = timestep
self.airport_graph = airport_graph
self.until = until
self.sim_time = sim_time
self.interval = interval
self.initialize_map()
self.airport_map = self.initialize_map()
self.num_random_obs = num_random_obs
if self.num_random_obs>0:
self.obs_acs.extend(self.obs_acs_gen2(self.num_random_obs))
self.test_ac.obs_acs.extend(self.obs_acs)
def initialize_map(self):
# initialize map for visualization. Up to now, simply load mat files
KBOS_layout = scipy.io.loadmat('KBOS_layout')
KBOS_taxiways = scipy.io.loadmat('KBOS_taxiways')
KBOS_runways = scipy.io.loadmat('KBOS_runways')
KBOS_layout = KBOS_layout['KBOS_layout']
KBOS_taxiways = KBOS_taxiways['KBOS_taxiways']
KBOS_runways = KBOS_runways['KBOS_runways']
layout = []
layout_x_min = 10**10
layout_x_max = -10**10
layout_y_min = 10**10
layout_y_max = -10**10
for i in KBOS_layout:
poly_temp = []
for j in range(np.size(i[0],0)):
poly_temp.append((i[0][j,0], i[0][j,1]))
if i[0][j,0] < layout_x_min:
layout_x_min = i[0][j,0]
if i[0][j,0] > layout_x_max:
layout_x_max = i[0][j,0]
if i[0][j,1] < layout_y_min:
layout_y_min = i[0][j,1]
if i[0][j,1] > layout_y_max:
layout_y_max = i[0][j,1]
poly_temp.append((i[0][0,0], i[0][0,1]))
layout.append(Polygon(poly_temp))
taxiways = []
for i in KBOS_taxiways:
poly_temp = []
for j in range(np.size(i[0],0)):
poly_temp.append((i[0][j,0], i[0][j,1]))
poly_temp.append((i[0][0,0], i[0][0,1]))
taxiways.append(Polygon(poly_temp))
runways = []
for i in KBOS_runways:
poly_temp = []
for j in range(np.size(i[0],0)):
poly_temp.append((i[0][j,0], i[0][j,1]))
poly_temp.append((i[0][0,0], i[0][0,1]))
runways.append(Polygon(poly_temp))
fig = plt.figure(1, figsize=(15,15), dpi=90)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax = [ax1, ax2]
for i in range(len(layout)):
patch_temp = PolygonPatch(layout[i],fc='white', ec='black', alpha=0.05)
ax1.add_patch(patch_temp)
for i in range(len(taxiways)):
patch_temp = PolygonPatch(taxiways[i],fc='red', ec='black', alpha=0.05)
ax1.add_patch(patch_temp)
for i in range(len(runways)):
patch_temp = PolygonPatch(runways[i],fc='green', ec='black', alpha=0.05)
ax1.add_patch(patch_temp)
for i in range(len(layout)):
patch_temp = PolygonPatch(layout[i],fc='white', ec='black', alpha=0.05)
ax2.add_patch(patch_temp)
for i in range(len(taxiways)):
patch_temp = PolygonPatch(taxiways[i],fc='red', ec='black', alpha=0.05)
ax2.add_patch(patch_temp)
for i in range(len(runways)):
patch_temp = PolygonPatch(runways[i],fc='green', ec='black', alpha=0.05)
ax2.add_patch(patch_temp)
xrange = [layout_x_min, layout_x_max]
yrange = [layout_y_min, layout_y_max]
ax1.set_xlim(*xrange)
ax1.set_ylim(*yrange)
airport_map = Map(layout, runways, taxiways, fig, ax)
return airport_map
def simulate(self):
edges = self.airport_graph.digraph.edges()
ax = self.airport_map.draw_map()
ax1 = ax[0]
ax2 = ax[1]
scat1 = []
scat2 = []
path_plot = []
t0 = time.time()
while(True):
# ax1.clear()
# ax2.clear()
for obs_ac in self.obs_acs:
obs_ac.sim_move(self.timestep)
self.test_ac.ego_sim_move(self.timestep)
# get predicted paths
# print('predicted paths')
# print(self.test_ac.traj_pred)
t1 = time.time()
sleep_time = t1-t0
if self.interval - sleep_time > 0.001:
plt.pause(self.interval - sleep_time)
else:
print("interval is too short to simulate!")
t0 = time.time()
for i in scat1:
i.remove()
for i in scat2:
i.remove()
# remove old paths
while len(ax1.lines)>0:
del ax1.lines[0]
scat1 = []
scat1.append(ax1.scatter(self.test_ac.current_position[0], self.test_ac.current_position[1],color='blue'))
for obs_ac in self.obs_acs:
scat1.append(ax1.scatter(obs_ac.current_position[0], obs_ac.current_position[1],color='red'))
scat2 = []
scat2.append(ax2.scatter(self.test_ac.current_position[0], self.test_ac.current_position[1],color='blue'))
for obs_ac in self.obs_acs:
scat2.append(ax2.scatter(obs_ac.current_position[0], obs_ac.current_position[1],color='red'))
# plot current path
waypoints_x = []
waypoints_y = []
for i in range(len(self.test_ac.current_path)-1):
waypoints_x.extend(edges[str(self.test_ac.current_path[i]), str(self.test_ac.current_path[i+1])]['waypoints'][:,0])
waypoints_y.extend(edges[str(self.test_ac.current_path[i]), str(self.test_ac.current_path[i+1])]['waypoints'][:,1])
# print('current_point')
# print(edges[str(self.test_ac.current_path[i]), str(self.test_ac.current_path[i+1])])
# print('Current path', t0)
# print(waypoints_x)
# print(waypoints_y)
current_edge_x = edges[str(self.test_ac.current_edge[0]), str(self.test_ac.current_edge[1])]['waypoints'][:,0]
current_edge_y = edges[str(self.test_ac.current_edge[0]), str(self.test_ac.current_edge[1])]['waypoints'][:,1]
ax1.plot(waypoints_x, waypoints_y,color='black')
ax1.plot(current_edge_x, current_edge_y,color='black')
# plot predicted path
for key, value in self.test_ac.traj_pred.items():
start, end, _ = key
obs_wpts = edges[start,end]['waypoints']
ax1.plot(obs_wpts[:,0],obs_wpts[:,1],'r',linewidth=value[-1]*2)
# print(edges[start,end])
xrange = [self.test_ac.current_position[0]-0.005, self.test_ac.current_position[0]+0.005]
yrange = [self.test_ac.current_position[1]-0.005, self.test_ac.current_position[1]+0.005]
ax2.set_xlim(*xrange)
ax2.set_ylim(*yrange)
plt.show(block=False)
if self.until=='goal':
if self.test_ac.current_edge[1]==self.test_ac.goal_node and np.size(self.test_ac.current_wpts)==0:
break
else:
if self.sim_time >= self.until:
break
self.sim_time = self.sim_time + self.timestep
def obs_acs_gen2(self, num_random_obs):
G = self.airport_graph.digraph
obs_acs = []
# good case: prediction horizon as 300
# p1 = [36, 35, 33, 24, 21, 12, 11, 14, 16, 19, 20, 23, 21, 12, 11, 14, 16, 19, 22, 26, 30, 31, 43, 45, 47, 49, 61, 65, 67, 68]
# p2 = [93, 95, 103, 102, 106, 77, 78, 81, 82, 85, 88, 89, 84, 83, 80, 75, 74, 66, 62, 52, 51, 46, 45, 47, 49, 61, 65, 67, 69, 76]
# p3 = [44, 45, 47, 50, 51, 46, 44, 34, 32, 31, 43, 46, 51, 50, 49, 61, 65, 67, 68, 72, 70, 56, 57, 63, 65, 67, 68, 72, 70, 56]
# p4 = [11, 12, 21, 23, 20, 22, 26, 30, 32, 35, 36, 24, 28, 32, 34, 44, 45, 47, 48, 41, 42, 55, 57, 63, 61, 49, 48, 41, 39, 107]
# p1=[63, 65, 67, 68, 72, 70, 56, 57, 63, 64, 62, 52, 50, 48, 41, 42, 55, 56, 70, 71, 94, 95, 103, 102, 106, 77, 74, 66, 64, 63]
# p2=[64, 62, 52, 51, 46, 43, 31, 28, 23, 20, 19, 16, 14, 11, 12, 21, 23, 20, 19, 16, 14, 11, 12, 21, 28, 30, 26, 29, 40, 41]
# p3=[103, 102, 106, 77, 76, 69, 66, 64, 63, 57, 55, 42, 40, 29, 22, 20, 23, 28, 32, 35, 36, 24, 21, 12, 11, 14, 16, 19, 20, 23]
# p4=[63, 57, 56, 70, 71, 94, 95, 103, 102, 106, 77, 78, 81, 83, 84, 89, 88, 85, 82, 80, 75, 76, 69, 68, 72, 70, 56, 57, 63, 61]
# p1=[66, 74, 76, 69, 74, 75, 80, 82, 85, 88, 89, 84, 82, 81, 78, 77, 106, 102, 103, 95, 94, 71, 70, 56, 57, 63, 65, 67, 68, 72]
# p2=[54, 56, 70, 72, 68, 69, 76, 74, 66, 62, 52, 50, 48, 41, 40, 29, 26, 30, 28, 24, 36, 35, 34, 44, 46, 51, 52, 62, 64, 61]
# p3=[17, 20, 23, 21, 12, 11, 14, 16, 19, 20, 23, 21, 12, 11, 14, 16, 19, 20, 23, 21, 12, 11, 14, 16, 19, 20, 23, 28, 32, 33]
# p4=[74, 77, 106, 102, 103, 95, 94, 71, 70, 56, 57, 63, 65, 67, 69, 76, 74, 69, 76, 78, 81, 82, 85, 88, 89, 84, 83, 81, 78, 77]
p1=[97, 77, 74, 66, 62, 52, 51, 46, 43, 31, 32, 35, 36, 24, 28, 32, 35, 36, 24, 28, 32, 34, 44, 45, 47, 49, 61, 63, 57, 54]
p2=[68, 69, 76, 78, 81, 83, 84, 89, 88, 85, 82, 81, 78, 77, 106, 102, 103, 95, 94, 71, 70, 56, 57, 63, 64, 66, 74, 77, 106, 102]
p3=[113, 108, 107, 39, 40, 29, 26, 30, 31, 43, 46, 51, 50, 47, 45, 46, 51, 52, 62, 66, 74, 75, 80, 83, 84, 89, 88, 85, 82, 80]
p4=[74, 66, 62, 52, 50, 47, 45, 46, 51, 50, 49, 61, 65, 67, 69, 76, 77, 106, 102, 103, 95, 94, 71, 70, 56, 57, 63, 64, 62, 52]
# p1 = [26, 22, 20, 23, 21, 12, 11, 14, 16, 19, 22, 29, 40, 41, 48, 50, 52, 62, 66, 74, 76, 69, 74, 76, 69, 67, 65, 61, 49, 48]
# p2 = [77, 78, 81, 82, 85, 88, 89, 84, 83, 80, 75, 76, 69, 74, 77, 106, 102, 103, 95, 94, 71, 70, 56, 55, 42, 40, 29, 22, 19, 16]
# p3 = [30, 31, 43, 46, 51, 50, 48, 41, 42, 55, 57, 63, 64, 62, 52, 51, 46, 43, 31, 32, 35, 36, 24, 21, 12, 11, 14, 16, 19, 20]
# p4 = [46, 45, 47, 49, 61, 65, 67, 69, 76, 77, 106, 102, 103, 95, 94, 71, 70, 56, 55, 42, 40, 29, 26, 30, 28, 24, 36, 35, 32, 30]
# p1 = [96, 95, 103, 102, 106, 77, 78, 81, 82, 85, 88, 89, 84, 83, 81, 78, 76, 69, 74, 76, 69, 66, 64, 65, 67, 69, 76, 77, 106, 102]
# p2 = [18, 16, 14, 11, 12, 21, 24, 36, 35, 33, 24, 23, 20, 19, 16, 14, 11, 12, 21, 28, 30, 26, 22, 19, 16, 14, 11, 12, 21, 24]
# p3 = [77, 78, 81, 82, 84, 89, 88, 85, 82, 80, 75, 74, 69, 76, 74, 66, 64, 65, 67, 69, 76, 77, 106, 102, 103, 95, 94, 71, 70, 56]
# p4 = [116, 114, 120, 25, 26, 30, 31, 43, 45, 47, 50, 52, 62, 64, 63, 57, 55, 42, 40, 29, 22, 20, 23, 28, 30, 26, 22, 19, 16, 14]
# obs_acs.append(Aircraft(current_position=G.edges[str(p1[0]),str(p1[1])]['waypoints'][3,0:2],
# current_heading=G.edges[str(p1[0]),str(p1[1])]['waypoints'][3,2],
# current_edge=(str(p1[0]),str(p1[1]), G.edges[str(p1[0]),str(p1[1])]),
# airport_graph=self.airport_graph, callsign='OBS_1',
# current_path=np.array(p1), current_wpts=G.edges[str(p1[0]),str(p1[1])]['waypoints'][3,0:3],
# risk_bound=1, path_fixed=1, traj_fixed=0))
# obs_acs.append(Aircraft(current_position=G.edges[str(p2[0]),str(p2[1])]['waypoints'][3,0:2],
# current_heading=G.edges[str(p2[0]),str(p2[1])]['waypoints'][3,2],
# current_edge=(str(p2[0]),str(p2[1]), G.edges[str(p2[0]),str(p2[1])]),
# airport_graph=self.airport_graph, callsign='OBS_2',
# current_path=np.array(p2), current_wpts=G.edges[str(p2[0]),str(p2[1])]['waypoints'][3,0:3],
# risk_bound=1, path_fixed=1, traj_fixed=0))
# obs_acs.append(Aircraft(current_position=G.edges[str(p3[0]),str(p3[1])]['waypoints'][3,0:2],
# current_heading=G.edges[str(p3[0]),str(p3[1])]['waypoints'][3,2],
# current_edge=(str(p3[0]),str(p3[1]), G.edges[str(p3[0]),str(p3[1])]),
# airport_graph=self.airport_graph, callsign='OBS_3',
# current_path=np.array(p3), current_wpts=G.edges[str(p3[0]),str(p3[1])]['waypoints'][3,0:3],
# risk_bound=1, path_fixed=1, traj_fixed=0))
obs_acs.append(Aircraft(current_position=G.edges[str(p4[0]),str(p4[1])]['waypoints'][3,0:2],
current_heading=G.edges[str(p4[0]),str(p4[1])]['waypoints'][3,2],
current_edge=(str(p4[0]),str(p4[1]), G.edges[str(p4[0]),str(p4[1])]),
airport_graph=self.airport_graph, callsign='OBS_4',
current_path=np.array(p4), current_wpts=G.edges[str(p4[0]),str(p4[1])]['waypoints'][3,0:3],
risk_bound=1, path_fixed=1, traj_fixed=0))
return obs_acs
def obs_acs_gen(self, num_random_obs):
G = self.airport_graph.digraph
Edges = G.edges()
Edges_list = list(G.edges.data())
obs_acs = []
ac_num = 0
occupancy_time_list = []
while(len(obs_acs)<num_random_obs):
flag = 0
ac_num=ac_num+1
path = []
rand_edge = Edges_list[np.random.randint(len(Edges_list))]
path.append(int(rand_edge[0]))
path.append(int(rand_edge[1]))
k=0
while(len(path)<30):
adj_edges = list(G.adj[str(path[-1])])
while(len(adj_edges)>0):
k=k+1
if k>200:
break
adj_edge = adj_edges[np.random.randint(len(adj_edges))]
if self.is_connectable(G.edges[str(path[-2]),str(path[-1])], G.edges[str(path[-1]),str(adj_edge)]):
path.append(int(adj_edge))
break
else:
adj_edges.remove(adj_edge)
if len(adj_edges)==0:
del path[-1]
if len(path)==1 or k>200:
k=0
path = []
rand_edge = Edges_list[np.random.randint(len(Edges_list))]
path.append(int(rand_edge[0]))
path.append(int(rand_edge[1]))
occupancy_time = []
time = 0
for i in range(len(path)-1):
edge_identifier = (str(path[i]), str(path[i+1]))
edge_length = np.size(Edges[edge_identifier[0], edge_identifier[1]]['waypoints'],0)
if i < len(path)-2:
occupancy_time.append([edge_identifier, [time, time + edge_length]])
occupancy_time.append([(edge_identifier[1],edge_identifier[0]), [time, time + edge_length]])
time = time + edge_length
else:
occupancy_time.append([edge_identifier, [time, 10**10]])
occupancy_time.append([(edge_identifier[1],edge_identifier[0]), [time, 10**10]])
for occupancy in occupancy_time_list:
for i in occupancy_time:
for j in occupancy:
if i[0] == j[0]:
# print(i)
# print(j)
if (j[1][0] <= i[1][0] and i[1][0] <= j[1][1]) \
or (j[1][0] <= i[1][1] and i[1][1] <= j[1][1]) \
or (i[1][0] <= j[1][0] and j[1][0] <= i[1][1]) \
or (i[1][0] <= j[1][1] and j[1][1] <= i[1][1]):
flag = 1
ac_num = ac_num-1
break
if flag == 1:
break
if flag == 1:
break
if flag == 0:
print(path)
obs_acs.append(Aircraft(current_position=G.edges[str(path[0]),str(path[1])]['waypoints'][3,0:2],
current_heading=G.edges[str(path[0]),str(path[1])]['waypoints'][3,2],
current_edge=(str(path[0]),str(path[1]), G.edges[str(path[0]),str(path[1])]),
airport_graph=self.airport_graph, callsign='RND_'+str(ac_num+1),
current_path=np.array(path), current_wpts=G.edges[str(path[0]),str(path[1])]['waypoints'][3,0:3],
risk_bound=1, path_fixed=1, traj_fixed=0))
occupancy_time_list.append(occupancy_time)
return obs_acs
def is_connectable(self, edge1, edge2):
angle1 = edge1['waypoints'][-1,2]
angle2 = edge2['waypoints'][0,2]
if angle1<0:
angle1 = angle1+360
if angle2<0:
angle2 = angle2+360
angle_diff = abs(angle1-angle2)
if angle_diff > 180:
angle_diff = 360 - angle_diff
elif angle_diff < -180:
angle_diff = 360 + angle_diff
if angle_diff <= 90:
return True
else:
return False
class Map(object):
def __init__(self, layout, runways, taxiways, fig, ax):
self.layout = layout
self.runways = runways
self.taxiways = taxiways
self.fig = fig
self.ax = ax
def draw_map(self):
fig = self.fig
ax = self.ax
plt.show(block=False)
return ax
| null |
simulator.py
|
simulator.py
|
py
| 17,938 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "scipy.io.io.loadmat",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "scipy.io.io",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "scipy.io",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "scipy.io.io.loadmat",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "scipy.io.io",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "scipy.io",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "scipy.io.io.loadmat",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "scipy.io.io",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "scipy.io",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.size",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.polygon.Polygon",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.polygon.Polygon",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.polygon.Polygon",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "descartes.PolygonPatch",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "descartes.PolygonPatch",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "descartes.PolygonPatch",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "descartes.PolygonPatch",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "descartes.PolygonPatch",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "descartes.PolygonPatch",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pause",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "numpy.size",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "aircraft.Aircraft",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "numpy.size",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "aircraft.Aircraft",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 390,
"usage_type": "name"
}
] |
347938655
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, render_to_response,redirect
from django.contrib.auth import authenticate, login
from ter.models import Way
from shutil import rmtree, copy2, copytree
import os
from os.path import isdir, isfile
import codecs
import platform
import ter.extension
if platform.system()=='Windows':
import ctypes
kdll = ctypes.windll.LoadLibrary("kernel32.dll")
# Create your views here.
def base(request):
if 'img' in os.listdir(os.path.join(os.path.abspath(os.curdir),'nav/static')):
for fi in os.listdir(os.path.join(os.path.abspath(os.curdir),'nav/static/img')):
os.remove(os.path.join(os.path.abspath(os.curdir),'nav/static/img/'+fi))
if 'vid' in os.listdir(os.path.join(os.path.abspath(os.curdir),'nav/static')):
for fi in os.listdir(os.path.join(os.path.abspath(os.curdir),'nav/static/vid')):
os.remove(os.path.join(os.path.abspath(os.curdir),'nav/static/vid/'+fi))
if 'auth' in request.COOKIES:
if "way" in request.COOKIES:
way=os.path.abspath(request.COOKIES["way"])
else:
#way=os.path.abspath(os.curdir)
pass
#if platform.system()=='Windows':
# way=os.path.abspath(os.curdir)#[0]+'://'+os.path.abspath(os.curdir).replace('\\','/')[3:]
if "cr_name" in request.POST:
if request.POST["cr_type"]=="dir":
if request.POST["cr_name"]!="" and not isfile(os.path.join(way,request.POST["cr_name"])):
os.mkdir(os.path.join(way,request.POST["cr_name"]))
elif request.POST["cr_type"]=="cr_file":
if request.POST["cr_name"]!="" and not isfile(os.path.join(way,request.POST["cr_name"])):
file = open(os.path.join(way,request.POST["cr_name"]), "w")
file.close()
if 'is_past' in request.POST:
if request.POST['is_past']=='true':
copy_file=request.COOKIES['copy_file'].split(',')
copy_dir=request.COOKIES['copy_dir'].split(',')
if copy_file[0]!='':
for x in copy_file:
if x!='':
copy2(x,way)
if copy_dir[0]!='':
for x in copy_dir:
if x!='':
copytree(x,way)
if 'new_path' in request.POST and os.path.abspath(request.POST["new_path"])!=os.path.abspath(way):
if isdir(os.path.abspath(request.POST["new_path"])):
way=os.path.abspath(request.POST["new_path"])
if "rm" in request.POST:
if isfile(os.path.abspath(request.POST["rm"])):
os.remove(os.path.abspath(request.POST["rm"]))
elif isdir(os.path.abspath(request.POST["rm"])):
rmtree(os.path.abspath(request.POST["rm"]))
if 'file_dir' in request.POST:
if request.POST['action']=='delite':
for x in request.POST.getlist('file_dir'):
if isfile(os.path.join(way,x)):
os.remove(os.path.join(way,x))
elif isdir(os.path.join(way,x)):
rmtree(os.path.join(way,x))
elif request.POST['action']=='copy':
copy_file=','
copy_dir=','
for x in request.POST.getlist('file_dir'):
if isfile(os.path.join(way,x)):
copy_file+=os.path.join(way,x)+","
elif isdir(os.path.join(way,x)):
copy_dir+=os.path.join(way,x)+","
files = os.listdir(way)
dirs=[]
d_files=[]
d_pic=[]
d_video=[]
for x in files:
w=os.path.join(way,x)
if isfile(w):
for ext in ter.extension.img:
if x.lower().endswith(ext):
if platform.system()=='Linux':
st=u'ln '+w+' '+os.path.abspath(os.curdir)+'/nav/static/img/'+x
os.system(st.encode("utf-8"))
d_pic.append(x.replace(' ','%'))
elif platform.system()=='Windows':
os.system('mklink '+os.path.abspath(os.path.join(os.curdir,'nav/static/img/'+x.replace(' ','%')))+' '+os.path.abspath(w.replace('%',' ')))
d_pic.append(x.replace(' ','%'))
#elif x.endswith('.mp4') or x.endswith('.avi') or x.endswith('.MP4') or x.endswith('.AVI') or x.endswith('.MKV') or x.endswith('.mkv'):
for ext in ter.extension.vid:
if x.lower().endswith(ext):
if platform.system()=='Linux':
st=u'ln '+w+' '+os.path.join(os.path.abspath(os.curdir),'nav/static/vid/'+x)
#os.system(st.encode("utf-8"))
d_video.append(x.replace(' ','%'))
elif platform.system()=='Windows':
os.system('mklink '+os.path.abspath(os.path.join(os.curdir,'/nav/static/vid/'+x.replace(' ','%')))+' '+os.path.abspath(w.replace('%',' ')))
d_video.append(x.replace(' ','%'))
else:
x=x.replace(" ","%")
d_files.append(x)
else:
x=x.replace(" ","%")
dirs.append(x)
auth=True
response=render_to_response('template.html',{'files':d_files,'pics':sorted(d_pic),'videos':sorted(d_video),
'dirs':sorted(dirs),'way':way[1:],'way_length':len(way),'select':sorted(files),})
response.set_cookie("way",way)
if 'action' in request.POST:
if request.POST['action']=='copy':
response.set_cookie("copy_file",copy_file)
response.set_cookie("copy_dir",copy_dir)
return response
else:
if 'username' in request.POST and 'password' in request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
response=redirect('/')
if user is not None:
if user.is_active:
login(request, user)
response=redirect('/')
response.set_cookie("auth",True)
return response
else:
return response
else:
return response
else:
return render_to_response('auth.html',{})
def ch_dir(request,n_dir):
o_way=request.COOKIES["way"]
if n_dir=="never_use_this_name_for_dir":
o_way=o_way.split("/")
way="/".join(o_way[:-1])
if way=='' or way=='/' or way==':/' or way.endswith(':'):
way="/"
if platform.system()!='Linux':
way=os.path.abspath(os.curdir)[0]+':/'
else:
n_dir=n_dir.replace("%"," ")
way=os.path.join(o_way,n_dir)
response=redirect('/')
response.set_cookie("way",way)#sthash.B2QcCZQv.dpuf)
#print('way: '+way)
return response
def ch_file(request,f_name):
if "changes" in request.POST:
f = codecs.open(os.path.join(os.path.abspath(request.COOKIES["way"]),f_name),'w','utf-8')
stri=request.POST["changes"]
f.write(stri)
f.close()
response=redirect('/')
return response
#f=codecs.open(unicode(request.COOKIES["way"],'UTF-8')+'/'+f_name)
if f_name.endswith('.html'):
lang='html'
elif f_name.endswith('.js'):
lang='javascript'
elif f_name.endswith('.css'):
lang='css'
elif f_name.endswith('.py'):
lang='python'
elif f_name.endswith('.php'):
lang='php'
elif f_name.endswith('.rb'):
lang='ruby'
elif f_name.endswith('.c') or f_name.endswith('.cpp'):
lang='c_cpp'
elif f_name.endswith('.pl'):
lang='perl'
elif f_name.endswith('.lua'):
lang='lua'
else:
lang='html'
f=open(os.path.join(os.path.abspath(request.COOKIES["way"]),f_name,))# encoding="utf8")
try:
fi=f.read().encode("utf-8", "replace")
except UnicodeDecodeError:
response=redirect('/')
return response
response=render_to_response('editor.html',{'file':fi,'f_name':f_name,'lang':lang})
f.close()
return response
| null |
ter/views.py
|
views.py
|
py
| 7,061 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "platform.system",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ctypes.windll.LoadLibrary",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "ctypes.windll",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.curdir",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.curdir",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.curdir",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.curdir",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.curdir",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.curdir",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy2",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "shutil.copytree",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "ter.models.extension",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "ter.models",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "platform.system",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "os.curdir",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.curdir",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "ter.models.extension",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "ter.models",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "platform.system",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.curdir",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "platform.system",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.curdir",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "os.curdir",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 199,
"usage_type": "call"
}
] |
647937047
|
from django.shortcuts import render, redirect
from .models import Article, Comment
from .form import ArticleForm, CommentForm
# Create your views here.
def index(request):
articles = Article.objects.all()
context = {
'articles': articles
}
return render(request, 'index.html', context)
def new(request):
if request.method == 'POST':
article_form = ArticleForm(request.POST)
if article_form.is_valid():
article_form.save()
return redirect('/start/')
else:
article_form = ArticleForm()
context = {
'article_form': article_form
}
return render(request, 'form.html', context)
def detail(request, article_pk):
article = Article.objects.get(pk=article_pk)
comment_form = CommentForm()
comments = Comment.objects.all()
context = {
'article': article,
'comment_form': comment_form,
'comments': comments
}
return render(request, 'detail.html', context)
def update(request, article_pk):
article = Article.objects.get(pk=article_pk)
if request.method == 'POST':
article_form = ArticleForm(request.POST, instance=article)
if article_form.is_valid():
article = article_form.save()
return redirect(f'/start/detail/{article_pk}')
else:
article_form = ArticleForm(instance=article)
context = {
'article_form': article_form
}
return render(request, 'update.html', context)
def delete(request, article_pk):
article = Article.objects.get(pk=article_pk)
if request.method == 'POST':
article.delete()
return redirect('/start/')
else:
return redirect(f'/start/detail/{article_pk}')
def commentcreate(request, article_pk):
if request.method == 'POST':
article = Article.objects.get(pk=article_pk)
comment_form = CommentForm(request.POST)
if comment_form.is_valid():
comment = comment_form.save(commit=False)
comment.article = article
comment.save()
return redirect(f'/start/detail/{article_pk}')
def commentdelete(request, article_pk, comment_pk):
comment = Comment.objects.get(pk=comment_pk)
comment.delete()
return redirect(f'/start/detail/{article_pk}')
| null |
django_pair/start/views.py
|
views.py
|
py
| 2,319 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "models.Article.objects.all",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "models.Article.objects",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "models.Article",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "form.ArticleForm",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "form.ArticleForm",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "models.Article.objects.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "models.Article.objects",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "models.Article",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "form.CommentForm",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "models.Comment.objects.all",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "models.Comment.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "models.Comment",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "models.Article.objects.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "models.Article.objects",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "models.Article",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "form.ArticleForm",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "form.ArticleForm",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "models.Article.objects.get",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "models.Article.objects",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "models.Article",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "models.Article.objects.get",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "models.Article.objects",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "models.Article",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "form.CommentForm",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "models.Comment.objects.get",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "models.Comment.objects",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "models.Comment",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 73,
"usage_type": "call"
}
] |
641215895
|
import logging
import unittest
from mock import Mock
from stompest.config import StompConfig
from stompest.error import StompConnectionError, StompProtocolError
from stompest.protocol import StompFrame, StompSpec, commands
from stompest.sync import Stomp
logging.basicConfig(level=logging.DEBUG)
HOST = 'fakeHost'
PORT = 61613
CONFIG = StompConfig('tcp://%s:%s' % (HOST, PORT), check=False)
class SimpleStompTest(unittest.TestCase):
def _get_transport_mock(self, receive=None, config=None):
stomp = Stomp(config or CONFIG)
stomp._transport = Mock()
if receive:
stomp._transport.receive.return_value = receive
return stomp
def _get_connect_mock(self, receive=None, config=None):
stomp = Stomp(config or CONFIG)
stomp._transportFactory = Mock()
transport = stomp._transportFactory.return_value = Mock()
transport.host = 'mock'
transport.port = 0
if receive:
transport.receive.return_value = receive
return stomp
def test_receiveFrame(self):
frame_ = StompFrame('MESSAGE', {'x': 'y'}, 'testing 1 2 3')
stomp = self._get_transport_mock(frame_)
frame = stomp.receiveFrame()
self.assertEquals(frame_, frame)
self.assertEquals(1, stomp._transport.receive.call_count)
def test_canRead_raises_exception_before_connect(self):
stomp = Stomp(CONFIG)
self.assertRaises(Exception, stomp.canRead)
def test_send_raises_exception_before_connect(self):
stomp = Stomp(CONFIG)
self.assertRaises(StompConnectionError, stomp.send, '/queue/foo', 'test message')
def test_subscribe_raises_exception_before_connect(self):
stomp = Stomp(CONFIG)
self.assertRaises(Exception, stomp.subscribe, '/queue/foo')
def test_disconnect_raises_exception_before_connect(self):
stomp = Stomp(CONFIG)
self.assertRaises(Exception, stomp.disconnect)
def test_connect_raises_exception_for_bad_host(self):
stomp = Stomp(StompConfig('tcp://nosuchhost:2345'))
self.assertRaises(Exception, stomp.connect)
def test_error_frame_after_connect_raises_StompProtocolError(self):
stomp = self._get_connect_mock(StompFrame('ERROR', body='fake error'))
self.assertRaises(StompProtocolError, stomp.connect)
self.assertEquals(stomp._transport.receive.call_count, 1)
def test_connect_when_connected_raises_StompConnectionError(self):
stomp = self._get_transport_mock()
self.assertRaises(StompConnectionError, stomp.connect)
def test_connect_writes_correct_frame(self):
login = 'curious'
passcode = 'george'
stomp = self._get_connect_mock(StompFrame('CONNECTED', {StompSpec.SESSION_HEADER: '4711'}))
stomp._config.login = login
stomp._config.passcode = passcode
stomp.connect()
args, _ = stomp._transport.send.call_args
sentFrame = args[0]
self.assertEquals(StompFrame('CONNECT', {'login': login, 'passcode': passcode}), sentFrame)
def test_send_writes_correct_frame(self):
destination = '/queue/foo'
message = 'test message'
headers = {'foo': 'bar', 'fuzz': 'ball'}
stomp = self._get_transport_mock()
stomp.send(destination, message, headers)
args, _ = stomp._transport.send.call_args
sentFrame = args[0]
self.assertEquals(StompFrame('SEND', {StompSpec.DESTINATION_HEADER: destination, 'foo': 'bar', 'fuzz': 'ball'}, message), sentFrame)
def test_subscribe_writes_correct_frame(self):
destination = '/queue/foo'
headers = {'foo': 'bar', 'fuzz': 'ball'}
stomp = self._get_transport_mock()
stomp.subscribe(destination, headers)
args, _ = stomp._transport.send.call_args
sentFrame = args[0]
self.assertEquals(StompFrame('SUBSCRIBE', {StompSpec.DESTINATION_HEADER: destination, 'foo': 'bar', 'fuzz': 'ball'}, ''), sentFrame)
def test_subscribe_matching_and_corner_cases(self):
destination = '/queue/foo'
headers = {'foo': 'bar', 'fuzz': 'ball'}
stomp = self._get_transport_mock()
token = stomp.subscribe(destination, headers)
self.assertEquals(token, (StompSpec.DESTINATION_HEADER, destination))
self.assertEquals(stomp.message(StompFrame(StompSpec.MESSAGE, {StompSpec.MESSAGE_ID_HEADER: '4711', StompSpec.DESTINATION_HEADER: destination})), token)
self.assertRaises(StompProtocolError, stomp.message, StompFrame(StompSpec.MESSAGE, {StompSpec.MESSAGE_ID_HEADER: '4711', StompSpec.DESTINATION_HEADER: 'unknown'}))
self.assertRaises(StompProtocolError, stomp.message, StompFrame(StompSpec.MESSAGE, {StompSpec.DESTINATION_HEADER: destination}))
def test_stomp_version_1_1(self):
destination = '/queue/foo'
stomp = self._get_transport_mock(config=StompConfig('tcp://%s:%s' % (HOST, PORT), version='1.1', check=False))
stomp._transport = Mock()
frame = StompFrame(StompSpec.MESSAGE, {StompSpec.MESSAGE_ID_HEADER: '4711', StompSpec.DESTINATION_HEADER: destination})
self.assertRaises(StompProtocolError, stomp.nack, frame)
frame = StompFrame(StompSpec.MESSAGE, {StompSpec.MESSAGE_ID_HEADER: '4711', StompSpec.DESTINATION_HEADER: destination, StompSpec.SUBSCRIPTION_HEADER: '0815'})
stomp.nack(frame, receipt='123')
args, _ = stomp._transport.send.call_args
sentFrame = args[0]
self.assertEquals(commands.nack(frame, receipt='123', version='1.1'), sentFrame)
def test_ack_writes_correct_frame(self):
id_ = '12345'
stomp = self._get_transport_mock()
stomp.ack(StompFrame('MESSAGE', {StompSpec.MESSAGE_ID_HEADER: id_}, 'blah'))
args, _ = stomp._transport.send.call_args
sentFrame = args[0]
self.assertEquals(StompFrame('ACK', {StompSpec.MESSAGE_ID_HEADER: id_}), sentFrame)
def test_transaction_writes_correct_frames(self):
transaction = '4711'
stomp = self._get_transport_mock()
for (method, command) in [
(stomp.begin, 'BEGIN'), (stomp.commit, 'COMMIT'),
(stomp.begin, 'BEGIN'), (stomp.abort, 'ABORT')
]:
method(transaction)
args, _ = stomp._transport.send.call_args
sentFrame = args[0]
self.assertEquals(StompFrame(command, {'transaction': transaction}), sentFrame)
with stomp.transaction(transaction):
args, _ = stomp._transport.send.call_args
sentFrame = args[0]
self.assertEquals(StompFrame('BEGIN', {'transaction': transaction}), sentFrame)
args, _ = stomp._transport.send.call_args
sentFrame = args[0]
self.assertEquals(StompFrame('COMMIT', {'transaction': transaction}), sentFrame)
try:
with stomp.transaction(transaction):
raise
except:
args, _ = stomp._transport.send.call_args
sentFrame = args[0]
self.assertEquals(StompFrame('ABORT', {'transaction': transaction}), sentFrame)
if __name__ == '__main__':
unittest.main()
| null |
stompest/tests/sync_client_test.py
|
sync_client_test.py
|
py
| 7,165 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "stompest.config.StompConfig",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "stompest.sync.Stomp",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "stompest.sync.Stomp",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "stompest.sync.Stomp",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "stompest.sync.Stomp",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "stompest.error.StompConnectionError",
"line_number": 49,
"usage_type": "argument"
},
{
"api_name": "stompest.sync.Stomp",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "stompest.sync.Stomp",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "stompest.sync.Stomp",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "stompest.config.StompConfig",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "stompest.error.StompProtocolError",
"line_number": 65,
"usage_type": "argument"
},
{
"api_name": "stompest.error.StompConnectionError",
"line_number": 70,
"usage_type": "argument"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompSpec.SESSION_HEADER",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompSpec.DESTINATION_HEADER",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompSpec.DESTINATION_HEADER",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "stompest.protocol.StompSpec.DESTINATION_HEADER",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompSpec.MESSAGE",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "stompest.protocol.StompSpec.MESSAGE_ID_HEADER",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec.DESTINATION_HEADER",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "stompest.error.StompProtocolError",
"line_number": 109,
"usage_type": "argument"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompSpec.MESSAGE",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "stompest.protocol.StompSpec.MESSAGE_ID_HEADER",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec.DESTINATION_HEADER",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "stompest.error.StompProtocolError",
"line_number": 110,
"usage_type": "argument"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompSpec.MESSAGE",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "stompest.protocol.StompSpec.DESTINATION_HEADER",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "stompest.config.StompConfig",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompSpec.MESSAGE",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "stompest.protocol.StompSpec.MESSAGE_ID_HEADER",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec.DESTINATION_HEADER",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "stompest.error.StompProtocolError",
"line_number": 117,
"usage_type": "argument"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompSpec.MESSAGE",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "stompest.protocol.StompSpec.MESSAGE_ID_HEADER",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec.DESTINATION_HEADER",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec.SUBSCRIPTION_HEADER",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.commands.nack",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.commands",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompSpec.MESSAGE_ID_HEADER",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompSpec.MESSAGE_ID_HEADER",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "stompest.protocol.StompSpec",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "stompest.protocol.StompFrame",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 162,
"usage_type": "call"
}
] |
640023408
|
import pickle
import re
from pathlib import Path
from string import punctuation
from typing import IO, Any, Dict, List, NamedTuple, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from nltk.tokenize import word_tokenize
from spacy import displacy
from .indexing import PaperIndexer
from .jsonformatter import generate_clean_df
# NOTE: This needs to be removed.
Cord19Paths = NamedTuple(
'Cord19Paths', [
('readme', Path), ('metadata', Path), ('dirs', List[Path]),
('pmc_custom_license', Path),
('biorxiv_medrxiv', Path),
('comm_use_subset', Path),
('noncomm_use_subset', Path), ])
def clean_punctuation(text: str) -> str:
punct = re.compile("[{}]".format(re.escape(punctuation)))
tokens = word_tokenize(text)
text = " ".join(filter(lambda t: punct.sub("", t), tokens))
return normalize_whitespace(text)
def normalize_whitespace(string: str) -> str:
"""Normalize excessive whitespace."""
linebreak = re.compile(r"(\r\n|[\n\v])+")
nonebreaking_space = re.compile(r"[^\S\n\v]+", flags=re.UNICODE)
return nonebreaking_space.sub(" ", linebreak.sub(r"\n", string)).strip()
def clean_tokenization(sequence: str) -> str:
"""Clean up spaces before punctuations and abbreviated forms."""
return (
sequence.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" do not", " don't")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
.replace(" / ", "/")
.replace(" )", ")")
.replace("( ", "(")
.replace("[ ", "[")
.replace(" ]", "]")
.replace(" ;", ";")
.replace(" - ", "-")
)
def split_dataset(dataset: List[Any],
subset: float = 0.8,
samples: int = None,
seed: int = 12345) -> Tuple[List[Any], List[Any]]:
"""Split an iterable dataset into a train and evaluation sets."""
np.random.seed(seed)
np.random.shuffle(dataset)
maxlen = len(dataset)
if not samples or samples > maxlen:
samples = maxlen
split = int(subset * samples)
train_data = dataset[:split]
test_data = dataset[split:samples]
return train_data, test_data
class DataIO:
@staticmethod
def save_data(file_path: str, data_obj: Any) -> IO:
file_path = Path(file_path)
if file_path.is_dir():
if not file_path.exists():
file_path.mkdir(parents=True)
with file_path.open("wb") as pkl:
pickle.dump(data_obj, pkl, pickle.HIGHEST_PROTOCOL)
@staticmethod
def load_data(file_path: str) -> Any:
file_path = Path(file_path)
with file_path.open("rb") as pkl:
return pickle.load(pkl)
def load_dataset_paths(basedir: str) -> Cord19Paths:
"""Return an organized representation of all paths in the dataset.
```python
basedir = "path/to/CORD-19-research-challenge/2020-03-13/"
load_dataset_paths(basedir)._fields
...
('readme', 'metadata', 'dirs',
'pmc_custom_license',
'biorxiv_medrxiv',
'comm_use_subset',
'noncomm_use_subset')
```
"""
basedir = Path(basedir)
paths, filesdir = {}, []
for p in basedir.iterdir():
if p.suffix == '.csv':
paths['metadata'] = p
elif p.suffix == '.readme':
paths['readme'] = p
elif p.is_dir():
dirdir = p.joinpath(p.name)
if dirdir.is_dir():
filesdir.append(dirdir)
paths['dirs'] = filesdir
for p in filesdir:
paths[p.name] = p
return Cord19Paths(**paths)
def render(question: str, prediction: Dict[str, str], jupyter=True,
return_html=False, style="ent", manual=True, label='ANSWER'):
"""Spacy displaCy visualization util for the question answering model."""
options = {"compact": True, "bg": "#ed7118", "color": '#000000'}
display_data = {}
start, end = 0, 0
match = re.search(prediction["answer"], prediction["context"])
if match and match.span() is not None:
start, end = match.span()
display_data["ents"] = [{'start': start, 'end': end, 'label': label}]
options['ents'] = [label]
options['colors'] = {label: "linear-gradient(90deg, #aa9cfc, #fc9ce7)"}
if len(prediction['context']) > 1:
display_data['text'] = prediction['context']
display_data['title'] = f'Q : {question}\n'
if return_html:
return displacy.render([display_data], style=style,
jupyter=False, options=options, manual=manual)
else:
displacy.render([display_data], style=style,
page=False, minify=True,
jupyter=jupyter, options=options, manual=manual)
def papers_to_csv(sources: Union[str, Cord19Paths],
dirs: Tuple[Sequence[str]] = ('all',),
out_dir='data') -> None:
"""Convert one or more directories with json files into a csv file(s).
`sources`: Path to the `CORD-19-research-challenge/2020-03-13/` dataset
directory, or an instance of `Cord19Paths`.
`dirs`: Use `all` for all available directories or a sequence of the names.
You can pass the full name or the first three characters e.g., `('pmc
', 'bio', 'com', 'non')`
`out_dir`: Directory where the csv files will be saved.
"""
if isinstance(sources, str):
if not Path(sources).exists():
raise ValueError("Invalid path, got {sources}")
sources = load_dataset_paths(sources)
assert isinstance(sources, Cord19Paths)
out_dir = Path(out_dir)
if not out_dir.is_dir():
out_dir.mkdir(parents=True)
metadata = pd.read_csv(sources.metadata)
has_full_text = metadata.loc[metadata["has_full_text"] == True, ["sha"]]
has_full_text = list(set(has_full_text["sha"].to_list()))
def with_full_text(index: PaperIndexer) -> List[int]:
# filter only paper-id's if full-text is available in the metadata
indices = []
for paper_id in has_full_text:
if paper_id in index.paper_index:
paper_id = index[paper_id]
if paper_id in indices:
continue
indices.append(paper_id)
return indices
if len(dirs) == 4 or 'all' in dirs:
sources = sources.dirs
else:
sources = [d for d in sources.dirs if d.name[:3] in dirs]
for path in sources:
index = PaperIndexer(path)
papers = index.load_papers(with_full_text(index))
df = generate_clean_df(papers)
filepath = out_dir.joinpath(f"{index.source_name}.csv")
df.to_csv(filepath, index=False)
print("All {} files from directory {} saved in: {}".format(
index.num_papers, index.source_name, filepath))
def concat_csv_files(source_dir: str,
file_name="covid-lg.csv",
out_dir="data",
drop_cols=["raw_authors", "raw_bibliography"],
return_df=False):
"""Concat all CSV files into one single file.
return_df: If True, saving to file is ignored and the pandas
DataFrame instance holding the data is returned.
Usage:
>>> concat_csv_files('path/to/csv-files-dir/', out_dir='data')
"""
dataframes = []
for csv_file in Path(source_dir).glob("*.csv"):
df = pd.read_csv(csv_file, index_col=None, header=0)
df.drop(columns=drop_cols, inplace=True)
dataframes.append(df)
master_df = pd.concat(dataframes, axis=0, ignore_index=True)
if not return_df:
out_dir = Path(out_dir)
if not out_dir.exists():
out_dir.mkdir(parents=True)
file_path = out_dir.joinpath(file_name)
master_df.to_csv(file_path, index=False)
else:
return master_df
| null |
corona_nlp/utils.py
|
utils.py
|
py
| 8,049 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.NamedTuple",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.escape",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "nltk.tokenize.word_tokenize",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "re.UNICODE",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "numpy.random.seed",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "typing.IO",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "re.search",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "spacy.displacy.render",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "spacy.displacy",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "spacy.displacy.render",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "spacy.displacy",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "typing.Sequence",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "indexing.PaperIndexer",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "indexing.PaperIndexer",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "jsonformatter.generate_clean_df",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 230,
"usage_type": "call"
}
] |
353774507
|
import os
import numpy as np
import xlrd
import scipy
from scipy.stats import norm
from pathlib import Path
import pandas as pd
from models.regression.ada_boost_regressor import Ada_boost_regressor
from models.regression.decision_tree_regressor import Decision_tree_regressor
from models.regression.gaussian_process_regressor import Gaussian_process_regressor
from models.regression.linear_least_squares import Linear_least_squares
from models.regression.neural_network_regressor import Neural_network_regressor
from models.regression.random_forest_regressor import Random_forest_regressor
from models.regression.support_vector_regressor import Support_vector_regressor
from sklearn.model_selection import train_test_split
from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
from sklearn import preprocessing
class Student_performance:
data = []
targets = []
x_train = []
x_test = []
y_train = []
y_test = []
def __init__(self):
filepath = 'datasets/regression_datasets/7_Student_Performance/'
# filename1 = 'student-mat.csv'
filename2 = 'student-por.csv'
# read the 2 data files
# file1 = np.loadtxt(os.path.join(filepath, filename1),
# delimiter=';', dtype=np.object, skiprows=1)
file2 = np.loadtxt(os.path.join(filepath, filename2),
delimiter=';', dtype=np.object, skiprows=1)
self.data = np.asarray(file2.tolist())
self.data = file2[:, :-1]
self.targets = file2[:, -1]
numeric = self.data[:, np.r_[2, 6, 7, 12:14, 23:30]]
self.data = np.delete(self.data,[2, 6, 7, 12, 13, 14,
23, 24 ,25 ,26 ,27, 28, 29, 30],axis=1)
encode = preprocessing.OneHotEncoder().fit(self.data)
self.data = encode.transform(self.data).toarray()
self.data = np.column_stack((self.data, numeric))
self.data = np.asarray(self.data, dtype=np.float32)
# 2 6 7 12 13 14 23 ~ 32
# 0 1 3 4 5 8 9 10 11 15 16 17 18 19 20 21 22
# split into train and test sets
self.x_train, self.x_test, self.y_train, self.y_test = \
train_test_split(self.data, self.targets, test_size=0.33,
random_state=0)
scaler = preprocessing.StandardScaler().fit(self.x_train)
self.x_train = scaler.transform(self.x_train)
self.x_test = scaler.transform(self.x_test)
def support_vector_regression(self):
C = np.logspace(start=-1, stop=3, base=10, num=5,
dtype=np.float32)
gamma = np.logspace(start=-1, stop=1, base=10, num=3,
dtype=np.float32)
kernel = ['rbf', 'linear', 'sigmoid']
svr = Support_vector_regressor(
x_train=self.x_train,
y_train=self.y_train,
cv=3,
n_jobs=10,
C=C,
kernel=kernel,
gamma=gamma,
grid_search=True)
# svr.print_parameter_candidates()
# svr.print_best_estimator()
return (svr.evaluate(data=self.x_train, targets=self.y_train),
svr.evaluate(data=self.x_test, targets=self.y_test))
def decision_tree_regression(self):
max_depth = range(1, 20, 2)
min_samples_leaf = (1, 20, 2)
dtr = Decision_tree_regressor(
x_train=self.x_train,
y_train=self.y_train,
cv=3,
n_jobs=10,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
grid_search=True)
# dtr.print_parameter_candidates()
# dtr.print_best_estimator()
return (dtr.evaluate(data=self.x_train, targets=self.y_train),
dtr.evaluate(data=self.x_test, targets=self.y_test))
def random_forest_regression(self):
n_estimators = range(1, 200, 50)
max_depth = range(1, 20, 2)
rfr = Random_forest_regressor(
x_train=self.x_train,
y_train=self.y_train,
cv=3,
n_jobs=10,
n_estimators=n_estimators,
max_depth=max_depth,
grid_search=True)
# rfr.print_parameter_candidates()
# rfr.print_best_estimator()
return (rfr.evaluate(data=self.x_train, targets=self.y_train),
rfr.evaluate(data=self.x_test, targets=self.y_test))
def ada_boost_regression(self):
n_estimators = range(1, 100, 5)
learning_rate = np.logspace(start=-2, stop=0, base=10, num=3,
dtype=np.float32) # [0.01, 0.1, 1]
abr = Ada_boost_regressor(
x_train=self.x_train,
y_train=self.y_train,
cv=5,
n_jobs=10,
n_estimators=n_estimators,
learning_rate=learning_rate,
grid_search=True)
# abr.print_parameter_candidates()
# abr.print_best_estimator()
return (abr.evaluate(data=self.x_train, targets=self.y_train),
abr.evaluate(data=self.x_test, targets=self.y_test))
def gaussian_process_regression(self):
alpha = np.logspace(start=-10, stop=-7, base=10, num=4,
dtype=np.float32)
gpr = Gaussian_process_regressor(
x_train=self.x_train,
y_train=self.y_train,
cv=3,
n_jobs=-1,
alpha=alpha,
grid_search=True)
# gpr.print_parameter_candidates()
# gpr.print_best_estimator()
return (gpr.evaluate(data=self.x_train, targets=self.y_train),
gpr.evaluate(data=self.x_test, targets=self.y_test))
def linear_least_squares(self):
np.random.seed(0)
alpha = norm.rvs(loc=64, scale=2, size=3).astype(np.float32)
max_iter = norm.rvs(loc=100, scale=20, size=3).astype(np.int)
solver = ('auto', 'svd', 'cholesky', 'lsqr', 'saga')
lls = Linear_least_squares(
x_train=self.x_train,
y_train=self.y_train,
cv=5,
alpha=alpha,
max_iter=max_iter,
solver=solver,
grid_search=True
)
# print all possible parameter values and the best parameters
# lls.print_parameter_candidates()
# lls.print_best_estimator()
return (lls.evaluate(data=self.x_train, targets=self.y_train),
lls.evaluate(data=self.x_test, targets=self.y_test))
def neural_network_regression(self):
reciprocal_distribution_hls = scipy.stats.reciprocal(a=100, b=1000)
reciprocal_distribution_mi = scipy.stats.reciprocal(a=1000, b=10000)
np.random.seed(0)
hidden_layer_sizes = \
reciprocal_distribution_hls.rvs(size=5).astype(np.int)
activation = ['logistic', 'tanh', 'relu']
max_iter = reciprocal_distribution_mi.rvs(size=5).astype(np.int)
nnr = Neural_network_regressor(
x_train=self.x_train,
y_train=self.y_train,
cv=3,
n_jobs=-1,
hidden_layer_sizes=hidden_layer_sizes,
activation=activation,
max_iter=max_iter,
random_search=True)
# nnr.print_parameter_candidates()
# nnr.print_best_estimator()
return (nnr.evaluate(data=self.x_train, targets=self.y_train),
nnr.evaluate(data=self.x_test, targets=self.y_test))
if __name__ == '__main__':
sp = Student_performance()
svr_results = sp.support_vector_regression()
dtr_results = sp.decision_tree_regression()
rfr_results = sp.random_forest_regression()
abr_results = sp.ada_boost_regression()
gpr_results = sp.gaussian_process_regression()
lls_results = sp.linear_least_squares()
nnr_results = sp.neural_network_regression()
print("(mean_square_error, r2_score) on training set:")
print('SVR: (%.3f, %.3f)' % (svr_results[0]))
print('DTR: (%.3f, %.3f)' % (dtr_results[0]))
print('RFR: (%.3f, %.3f)' % (rfr_results[0]))
print('ABR: (%.3f, %.3f)' % (abr_results[0]))
print('GPR: (%.3f, %.3f)' % (gpr_results[0]))
print('LLS: (%.3f, %.3f)' % (lls_results[0]))
print('NNR: (%.3f, %.3f)' % (nnr_results[0]))
print("(mean_square_error, r2_score) on test set:")
print('SVR: (%.3f, %.3f)' % (svr_results[1]))
print('DTR: (%.3f, %.3f)' % (dtr_results[1]))
print('RFR: (%.3f, %.3f)' % (rfr_results[1]))
print('ABR: (%.3f, %.3f)' % (abr_results[1]))
print('GPR: (%.3f, %.3f)' % (gpr_results[1]))
print('LLS: (%.3f, %.3f)' % (lls_results[1]))
print('NNR: (%.3f, %.3f)' % (nnr_results[1]))
| null |
Default_Project/Master_Script_part_1/models/regression/Student_Performance.py
|
Student_Performance.py
|
py
| 8,543 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.loadtxt",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "numpy.object",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.r_",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "numpy.delete",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.OneHotEncoder",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "numpy.column_stack",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "numpy.logspace",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "numpy.logspace",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "models.regression.support_vector_regressor.Support_vector_regressor",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "models.regression.decision_tree_regressor.Decision_tree_regressor",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "models.regression.random_forest_regressor.Random_forest_regressor",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.logspace",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "models.regression.ada_boost_regressor.Ada_boost_regressor",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.logspace",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "models.regression.gaussian_process_regressor.Gaussian_process_regressor",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats.norm.rvs",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "numpy.float32",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats.norm.rvs",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "numpy.int",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "models.regression.linear_least_squares.Linear_least_squares",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "scipy.stats.reciprocal",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats.reciprocal",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "numpy.int",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "numpy.int",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "models.regression.neural_network_regressor.Neural_network_regressor",
"line_number": 190,
"usage_type": "call"
}
] |
89627227
|
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.cuda.amp import autocast, GradScaler
from torchvision.utils import save_image
from torch.utils.data import random_split
import numpy as np
from ..general import XDoGAnimeFaceDataset, XDoGDanbooruPortraitDataset, to_loader
from ..general import Status, get_device, save_args
from ..gan_utils import DiffAugment
from ..gan_utils.losses import LSGANLoss, VGGLoss
from .tps import tps_transform
from .model import Generator, Discriminator, init_weight_kaiming, init_weight_xavier, init_weight_N002
l1_loss = nn.L1Loss()
def triplet_loss(anchor, negative, positive, margin):
scale = np.sqrt(anchor[0].numel())
B = anchor.size(0)
an = torch.bmm(anchor.reshape(B, 1, -1), negative.reshape(B, -1, 1)) / scale
ap = torch.bmm(anchor.reshape(B, 1, -1), positive.reshape(B, -1, 1)) / scale
loss = F.relu(-ap+an+margin).mean()
return loss
def train(
max_iters, dataset, test_batch,
G, D, optimizer_G, optimizer_D,
color_augment, spatial_augment,
recon_lambda, style_lambda, perc_lambda,
triplet_lambda, margin,
amp, device, save
):
status = Status(max_iters)
loss = LSGANLoss()
vgg = VGGLoss(device, p=1)
scaler = GradScaler() if amp else None
while status.batches_done < max_iters:
for real, sketch in dataset:
optimizer_D.zero_grad()
optimizer_G.zero_grad()
real = real.to(device)
sketch = sketch.to(device)
'''Discriminator'''
with autocast(amp):
# augment
real = color_augment(real)
real_s = spatial_augment(real)
# D(real)
real_prob, _ = D(torch.cat([sketch, real], dim=1))
# D(G(sketch, Is))
fake, qk_p = G(sketch, real_s, True)
fake_prob, _ = D(torch.cat([sketch, fake.detach()], dim=1))
# D(G(sketch, Ir))
_, qk_n = G(sketch, real, True)
# loss
# adv
D_loss = loss.d_loss(real_prob, fake_prob)
if scaler is not None:
scaler.scale(D_loss).backward()
scaler.step(optimizer_D)
else:
D_loss.backward()
optimizer_D.step()
'''Generator'''
with autocast(amp):
# D(G(sketch, Is))
fake_prob, _ = D(torch.cat([sketch, fake], dim=1))
# loss
# adv
G_loss = loss.g_loss(fake_prob)
# reconstruction
if recon_lambda > 0:
G_loss = G_loss \
+ l1_loss(fake, real) * recon_lambda
# style
if style_lambda > 0:
G_loss = G_loss \
+ vgg.style_loss(real, fake) * style_lambda
# perceptual
if perc_lambda > 0:
G_loss = G_loss \
+ vgg.vgg_loss(real, fake, [0, 1, 2, 3]) * perc_lambda
# triplet
if triplet_lambda > 0:
G_loss = G_loss \
+ triplet_loss(qk_p[0], qk_n[1], qk_p[1], margin) * triplet_lambda
if scaler is not None:
scaler.scale(G_loss).backward()
scaler.step(optimizer_G)
else:
G_loss.backward()
optimizer_G.step()
# save
if status.batches_done % save == 0:
with torch.no_grad():
G.eval()
image = G(test_batch[1], test_batch[0])
G.train()
image_grid = _image_grid(test_batch[1], test_batch[0], image)
save_image(
image_grid, f'implementations/SCFT/result/{status.batches_done}.jpg',
nrow=3*3, normalize=True, value_range=(-1, 1)
)
torch.save(G.state_dict(), f'implementations/SCFT/result/G_{status.batches_done}.pt')
save_image(fake, 'running.jpg', nrow=5, normalize=True, value_range=(-1, 1))
# updates
loss_dict = dict(
G=G_loss.item() if not torch.any(torch.isnan(G_loss)) else 0,
D=D_loss.item() if not torch.any(torch.isnan(D_loss)) else 0
)
status.update(loss_dict)
if scaler is not None:
scaler.update()
if status.batches_done == max_iters:
break
status.plot()
def _image_grid(src, ref, gen):
srcs = src.expand(src.size(0), 3, *src.size()[2:]).chunk(src.size(0), dim=0)
refs = ref.chunk(ref.size(0), dim=0)
gens = gen.chunk(gen.size(0), dim=0)
images = []
for src, ref, gen in zip(srcs, refs, gens):
images.extend([src, ref, gen])
return torch.cat(images, dim=0)
def add_arguments(parser):
parser.add_argument('--num-test', default=9, type=int, help='number of images for eval')
parser.add_argument('--sketch-channels', default=1, type=int, help='number of channels in sketch images')
parser.add_argument('--ref-channels', default=3, type=int, help='number of channels in reference images')
parser.add_argument('--bottom-width', default=8, type=int, help='bottom width in model')
parser.add_argument('--enc-channels', default=16, type=int, help='channel width multiplier for encoder/decoder')
parser.add_argument('--layer-per-resl', default=2, type=int, help='number of layers per resolution')
parser.add_argument('--num-res-blocks', default=7, type=int, help='number of residual blocks in G')
parser.add_argument('--disable-sn', default=False, action='store_true', help='disable spectral norm')
parser.add_argument('--disable-bias', default=False, action='store_true', help='disable bias')
parser.add_argument('--enable-scft-bias', default=False, action='store_true', help='enable bias in SCFT (attention)')
parser.add_argument('--norm-name', default='in', choices=['in', 'bn'], help='normalization layer name')
parser.add_argument('--act-name', default='lrelu', choices=['lrelu', 'relu'], help='activation function name')
parser.add_argument('--num-layers', default=3, type=int, help='number of layers in D')
parser.add_argument('--d-channels', default=32, type=int, help='channel width multiplier for D')
parser.add_argument('--d-lr', default=0.0002, type=float, help='learning rate for D')
parser.add_argument('--g-lr', default=0.0001, type=float, help='learning rate for G')
parser.add_argument('--betas', default=[0.5, 0.999], type=float, nargs=2, help='betas')
parser.add_argument('--recon-lambda', default=30., type=float, help='lambda for reconstruction loss')
parser.add_argument('--triplet-lambda', default=1., type=float, help='lambda for triplet loss')
parser.add_argument('--margin', default=12., type=float, help='margin for triplet loss')
parser.add_argument('--perc-lambda', default=0.01, type=float, help='lambda for perceptual loss')
parser.add_argument('--style-lambda', default=50., type=float, help='lambda for style loss')
return parser
def main(parser):
parser = add_arguments(parser)
args = parser.parse_args()
save_args(args)
amp = not args.disable_gpu and not args.disable_amp
device = get_device(not args.disable_gpu)
# data
if args.dataset == 'animeface':
dataset = XDoGAnimeFaceDataset(args.image_size, args.min_year)
elif args.dataset == 'danbooru':
dataset = XDoGDanbooruPortraitDataset(args.image_size, num_images=args.num_images+args.num_test)
dataset, test = random_split(dataset, [len(dataset)-args.num_test, args.num_test])
# train
dataset = to_loader(dataset, args.batch_size, use_gpu=not args.disable_gpu)
# test
test = to_loader(test, args.num_test, shuffle=False, use_gpu=False)
test_batch = next(iter(test))
test_batch = (test_batch[0].to(device), test_batch[1].to(device))
if args.max_iters < 0:
args.max_iters = len(dataset) * args.default_epochs
# model
G = Generator(
args.image_size, args.sketch_channels, args.ref_channels,
args.bottom_width, args.enc_channels, args.layer_per_resl, args.num_res_blocks,
not args.disable_sn, not args.disable_bias, args.enable_scft_bias, args.norm_name, args.act_name
)
D = Discriminator(
args.image_size, args.sketch_channels+args.ref_channels, args.num_layers, args.d_channels,
not args.disable_sn, not args.disable_bias, args.norm_name, args.act_name
)
G.apply(init_weight_N002)
D.apply(init_weight_N002)
G.to(device)
D.to(device)
# optimizers
optimizer_G = optim.Adam(G.parameters(), lr=args.g_lr, betas=args.betas)
optimizer_D = optim.Adam(D.parameters(), lr=args.d_lr, betas=args.betas)
# augmentations
color_augment = functools.partial(
DiffAugment, policy='color'
)
spatial_augment = tps_transform
train(
args.max_iters, dataset, test_batch,
G, D, optimizer_G, optimizer_D,
color_augment, spatial_augment,
args.recon_lambda, args.style_lambda, args.perc_lambda,
args.triplet_lambda, args.margin,
amp, device, args.save
)
| null |
implementations/SCFT/utils.py
|
utils.py
|
py
| 9,480 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.nn.L1Loss",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "numpy.sqrt",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.bmm",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.bmm",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "general.Status",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "gan_utils.losses.LSGANLoss",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "gan_utils.losses.VGGLoss",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.cuda.amp.GradScaler",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.cuda.amp.autocast",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.cuda.amp.autocast",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torchvision.utils.save_image",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "torchvision.utils.save_image",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.any",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.isnan",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.any",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.isnan",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "general.save_args",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "general.get_device",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "general.XDoGAnimeFaceDataset",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "general.XDoGDanbooruPortraitDataset",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.random_split",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "general.to_loader",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "general.to_loader",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "model.Generator",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "model.Discriminator",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "model.init_weight_N002",
"line_number": 210,
"usage_type": "argument"
},
{
"api_name": "model.init_weight_N002",
"line_number": 211,
"usage_type": "argument"
},
{
"api_name": "torch.optim.Adam",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "gan_utils.DiffAugment",
"line_number": 221,
"usage_type": "argument"
},
{
"api_name": "tps.tps_transform",
"line_number": 223,
"usage_type": "name"
}
] |
579848129
|
import unittest
import gradio.interpretation
import gradio.test_data
from gradio.processing_utils import decode_base64_to_image, encode_array_to_base64
from gradio import Interface
import numpy as np
import os
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
class TestDefault(unittest.TestCase):
def test_default_text(self):
max_word_len = lambda text: max([len(word) for word in text.split(" ")])
text_interface = Interface(max_word_len, "textbox", "label", interpretation="default")
interpretation = text_interface.interpret(["quickest brown fox"])[0][0]
self.assertGreater(interpretation[0][1], 0) # Checks to see if the first word has >0 score.
self.assertEqual(interpretation[-1][1], 0) # Checks to see if the last word has 0 score.
class TestShapley(unittest.TestCase):
def test_shapley_text(self):
max_word_len = lambda text: max([len(word) for word in text.split(" ")])
text_interface = Interface(max_word_len, "textbox", "label", interpretation="shapley")
interpretation = text_interface.interpret(["quickest brown fox"])[0][0]
self.assertGreater(interpretation[0][1], 0) # Checks to see if the first word has >0 score.
self.assertEqual(interpretation[-1][1], 0) # Checks to see if the last word has 0 score.
class TestCustom(unittest.TestCase):
def test_custom_text(self):
max_word_len = lambda text: max([len(word) for word in text.split(" ")])
custom = lambda text: [(char, 1) for char in text]
text_interface = Interface(max_word_len, "textbox", "label", interpretation=custom)
result = text_interface.interpret(["quickest brown fox"])[0][0]
self.assertEqual(result[0][1], 1) # Checks to see if the first letter has score of 1.
def test_custom_img(self):
max_pixel_value = lambda img: img.max()
custom = lambda img: img.tolist()
img_interface = Interface(max_pixel_value, "image", "label", interpretation=custom)
result = img_interface.interpret([gradio.test_data.BASE64_IMAGE])[0][0]
expected_result = np.asarray(decode_base64_to_image(gradio.test_data.BASE64_IMAGE).convert('RGB')).tolist()
self.assertEqual(result, expected_result)
class TestHelperMethods(unittest.TestCase):
def test_diff(self):
diff = gradio.interpretation.diff(13, "2")
self.assertEquals(diff, 11)
diff = gradio.interpretation.diff("cat", "dog")
self.assertEquals(diff, 1)
diff = gradio.interpretation.diff("cat", "cat")
self.assertEquals(diff, 0)
def test_quantify_difference_with_textbox(self):
iface = Interface(lambda text: text, ["textbox"], ["textbox"])
diff = gradio.interpretation.quantify_difference_in_label(iface, ["test"], ["test"])
self.assertEquals(diff, 0)
diff = gradio.interpretation.quantify_difference_in_label(iface, ["test"], ["test_diff"])
self.assertEquals(diff, 1)
def test_quantify_difference_with_label(self):
iface = Interface(lambda text: len(text), ["textbox"], ["label"])
diff = gradio.interpretation.quantify_difference_in_label(iface, ["3"], ["10"])
self.assertEquals(diff, -7)
diff = gradio.interpretation.quantify_difference_in_label(iface, ["0"], ["100"])
self.assertEquals(diff, -100)
def test_quantify_difference_with_confidences(self):
iface = Interface(lambda text: len(text), ["textbox"], ["label"])
output_1 = {
"cat": 0.9,
"dog": 0.1
}
output_2 = {
"cat": 0.6,
"dog": 0.4
}
output_3 = {
"cat": 0.1,
"dog": 0.6
}
diff = gradio.interpretation.quantify_difference_in_label(iface, [output_1], [output_2])
self.assertAlmostEquals(diff, 0.3)
diff = gradio.interpretation.quantify_difference_in_label(iface, [output_1], [output_3])
self.assertAlmostEquals(diff, 0.8)
def test_get_regression_value(self):
iface = Interface(lambda text: text, ["textbox"], ["label"])
output_1 = {
"cat": 0.9,
"dog": 0.1
}
output_2 = {
"cat": float("nan"),
"dog": 0.4
}
output_3 = {
"cat": 0.1,
"dog": 0.6
}
diff = gradio.interpretation.get_regression_or_classification_value(iface, [output_1], [output_2])
self.assertEquals(diff, 0)
diff = gradio.interpretation.get_regression_or_classification_value(iface, [output_1], [output_3])
self.assertAlmostEquals(diff, 0.1)
def test_get_classification_value(self):
iface = Interface(lambda text: text, ["textbox"], ["label"])
diff = gradio.interpretation.get_regression_or_classification_value(iface, ["cat"], ["test"])
self.assertEquals(diff, 1)
diff = gradio.interpretation.get_regression_or_classification_value(iface, ["test"], ["test"])
self.assertEquals(diff, 0)
if __name__ == '__main__':
unittest.main()
| null |
test/test_interpretation.py
|
test_interpretation.py
|
py
| 5,099 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "gradio.Interface",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "gradio.Interface",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "gradio.Interface",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "gradio.Interface",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.test_data",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "gradio.processing_utils.decode_base64_to_image",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.test_data",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation.interpretation.diff",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "gradio.interpretation.interpretation.diff",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "gradio.interpretation.interpretation.diff",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "gradio.Interface",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation.quantify_difference_in_label",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "gradio.interpretation.interpretation.quantify_difference_in_label",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "gradio.Interface",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation.quantify_difference_in_label",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "gradio.interpretation.interpretation.quantify_difference_in_label",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "gradio.Interface",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation.quantify_difference_in_label",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "gradio.interpretation.interpretation.quantify_difference_in_label",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "gradio.Interface",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation.get_regression_or_classification_value",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "gradio.interpretation.interpretation.get_regression_or_classification_value",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "gradio.Interface",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation.get_regression_or_classification_value",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "gradio.interpretation.interpretation.get_regression_or_classification_value",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "gradio.interpretation.interpretation",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "gradio.interpretation",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "unittest.main",
"line_number": 113,
"usage_type": "call"
}
] |
146790840
|
"""
Created by nguyenvanhieu.vn at 9/18/2018
"""
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import json
from sklearn.neighbors import NearestNeighbors
import data_helper
class LDA:
def __init__(self, folders, query, mode):
self.X, self.y, self.path = data_helper.read_data(folders, mode=mode)
self.n_components = len(folders) - 1
self.lda = LinearDiscriminantAnalysis(n_components=self.n_components)
self.Xt = self.lda.fit(self.X, self.y).transform(self.X)
self.targets = list(set(map(str, self.y)))
self.data_folder = folders
self.n_classes = len(folders)
self.query = self.get_output(data_helper.read_image(query, mode))
# def export(self, output_file):
# data = {}
# cnt = 0
# for x, y, path in zip(self.Xt.tolist(), self.y.tolist(), self.paths):
# data[cnt] = {'x': x, 'y': y, 'path': path}
# cnt += 1
# with open(output_file, 'w') as fp:
# json.dump(data, fp, ensure_ascii=False)
def get_output(self, x):
if x.ndim > 1:
return self.lda.transform(x)
else:
return self.lda.transform([x])
def k_most_similar(self, n_neighbors):
nbrs = NearestNeighbors(n_neighbors=n_neighbors).fit(self.Xt)
if self.query.ndim > 1:
dist, inds = nbrs.kneighbors(self.query)
else:
dist, inds = nbrs.kneighbors([self.query])
result = []
for ind in inds[0]:
result.append(self.path[ind])
return result
if __name__ == '__main__':
lda = LDA(['data/train/0/', 'data/train/1'], 'data/test/0/0_1.jpg', 'RGB')
print(lda.k_most_similar(n_neighbors=20))
| null |
lda.py
|
lda.py
|
py
| 1,745 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "data_helper.read_data",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.discriminant_analysis.LinearDiscriminantAnalysis",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "data_helper.read_image",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.NearestNeighbors",
"line_number": 39,
"usage_type": "call"
}
] |
436504627
|
import os
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html")
def post(self):
body = self.get_argument('body')
len_body = len(body)
self.render("result.html",
len_body = len_body,
)
class HogeHandler(tornado.web.RequestHandler):
def get(self):
self.write("This is hoge page!")
application = tornado.web.Application([
(r"/", MainHandler),
(r"/hoge", HogeHandler),
],
template_path=os.path.join(os.getcwd(), "templates"),
static_path=os.path.join(os.getcwd(), "static"),
)
if __name__ == "__main__":
application.listen(8888)
print("Server is up ...")
tornado.ioloop.IOLoop.instance().start()
| null |
server.py
|
server.py
|
py
| 801 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tornado.ioloop.web",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "tornado.ioloop",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "tornado.ioloop.web",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tornado.ioloop",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "tornado.ioloop.web.Application",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tornado.ioloop.web",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "tornado.ioloop",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tornado.ioloop.ioloop.IOLoop.instance",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tornado.ioloop.ioloop",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "tornado.ioloop",
"line_number": 32,
"usage_type": "name"
}
] |
356561853
|
import os
import shutil
import tarfile
from coco_assistant import COCO_Assistant
import data_getter
from pycocotools.coco import COCO
import pytest
@pytest.fixture
def get_data():
if os.path.isdir('./annotations') is False and os.path.isdir('./images') is False:
# Download and extract data
print("Downloading...")
file_id = '1WAFzdtIa56UL4wFVHg2TaBMhtzqRc0F-'
destination = 'test.tar.gz'
data_getter.download_file_from_google_drive(file_id, destination)
# Unzip data
print("Extracting")
tar = tarfile.open(destination, "r:gz")
tar.extractall()
tar.close()
# Set up paths
img_dir = os.path.join(os.getcwd(), 'images')
ann_dir = os.path.join(os.getcwd(), 'annotations')
return [img_dir, ann_dir]
#@pytest.mark.skip
def test_combine(get_data):
cas = COCO_Assistant(get_data[0], get_data[1])
cas.combine()
comb = COCO(os.path.join(cas.resann_dir, 'combined.json'))
# Get combined annotation count
combann = len(comb.anns)
# Get individual annotation counts
ann_counts = [len(_cfile.anns) for _cfile in cas.annfiles]
print(combann)
print(sum(ann_counts))
# Clean up
shutil.rmtree(cas.res_dir)
assert sum(ann_counts) == combann, "Failure in merging datasets"
#@pytest.mark.skip
def test_cat_removal(get_data):
cas = COCO_Assistant(get_data[0], get_data[1])
test_ann = "tiny2.json"
test_rcats = sorted(['plane', 'ship', 'Large_Vehicle'])
cas.remove_cat(jc=test_ann, rcats=test_rcats)
orig = COCO(os.path.join(cas.ann_dir, cas.jc))
rmj = COCO(os.path.join(cas.resrm_dir, cas.jc))
orig_names = [list(orig.cats.values())[i]['name'] for i in range(len(orig.cats))]
rmj_names = [list(rmj.cats.values())[i]['name'] for i in range(len(rmj.cats))]
diff_names = sorted(list(set(orig_names) - set(rmj_names)))
# Clean up
shutil.rmtree(cas.resrm_dir)
assert diff_names == test_rcats, "Failure in removing following categories: {}".format(test_rcats)
| null |
tests/tests.py
|
tests.py
|
py
| 1,944 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.isdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "data_getter.download_file_from_google_drive",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tarfile.open",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "coco_assistant.COCO_Assistant",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pycocotools.coco.COCO",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "coco_assistant.COCO_Assistant",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pycocotools.coco.COCO",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "pycocotools.coco.COCO",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 67,
"usage_type": "call"
}
] |
49756977
|
import pytest
import csv
import re
import email as email_lib
import imaplib
from retry import retry
from notifications_python_client.notifications import NotificationsAPIClient
from config import Config
from tests.pages import VerifyPage
class RetryException(Exception):
pass
def remove_all_emails(email=None, pwd=None, email_folder=None):
if not email:
email = Config.SERVICE_EMAIL
if not pwd:
pwd = Config.SERVICE_EMAIL_PASSWORD
if not email_folder:
email_folder = Config.EMAIL_FOLDER
gimap = None
try:
gimap = imaplib.IMAP4_SSL('imap.gmail.com')
rv, data = gimap.login(email, pwd)
rv, data = gimap.select(email_folder)
rv, data = gimap.search(None, "ALL")
for num in data[0].split():
gimap.store(num, '+FLAGS', '\\Deleted')
gimap.expunge()
finally:
if gimap:
gimap.close()
gimap.logout()
def create_temp_csv(number, field_name):
import os
import tempfile
directory_name = tempfile.mkdtemp()
csv_file_path = os.path.join(directory_name, 'sample.csv')
with open(csv_file_path, 'w') as csv_file:
fieldnames = [field_name]
csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
csv_writer.writeheader()
csv_writer.writerow({field_name: number})
return directory_name, 'sample.csv'
@retry(RetryException, tries=Config.EMAIL_TRIES, delay=Config.EMAIL_DELAY)
def get_email_body(profile, email_folder):
gimap = None
try:
gimap = imaplib.IMAP4_SSL('imap.gmail.com')
try:
rv, data = gimap.login(profile.email, profile.email_password)
except imaplib.IMAP4.error as e:
pytest.fail("Login to email account has failed.")
rv, data = gimap.select(email_folder)
rv, data = gimap.search(None, "ALL")
ids_count = len(data[0].split())
if ids_count > 1:
pytest.fail("There is more than one token email")
elif ids_count == 1:
num = data[0].split()[0]
rv, data = gimap.fetch(num, '(UID BODY[TEXT])')
msg = email_lib.message_from_bytes(data[0][1])
gimap.store(num, '+FLAGS', '\\Deleted')
gimap.expunge()
return msg.get_payload().strip().replace('=\r\n', '') # yikes
else:
raise RetryException("Failed to retrieve the email from the email server.")
finally:
if gimap:
gimap.close()
gimap.logout()
@retry(RetryException, tries=Config.EMAIL_TRIES, delay=Config.EMAIL_DELAY)
def get_email_verify_code(profile, email_folder):
gimap = None
try:
gimap = imaplib.IMAP4_SSL('imap.gmail.com')
try:
rv, data = gimap.login(profile.email, profile.email_password)
except imaplib.IMAP4.error as e:
pytest.fail("Login to email account has failed.")
rv, data = gimap.select(email_folder)
rv, data = gimap.search(None, "ALL")
if rv == 'OK' and data[0]:
num = data[0].split()[0]
rv, data = gimap.fetch(num, '(UID BODY[TEXT])')
msg = email_lib.message_from_bytes(data[0][1])
gimap.store(num, '+FLAGS', '\\Deleted')
gimap.expunge()
return msg.get_payload().replace(' is your Notify authentication code', '').strip() # yikes
else:
raise RetryException("Failed to retrieve the email from the email server.")
finally:
remove_all_emails(email_folder=email_folder)
if gimap:
gimap.close()
gimap.logout()
def generate_unique_email(email, uuid):
parts = email.split('@')
return "{}+{}@{}".format(parts[0], uuid, parts[1])
def get_link(profile, email_label):
import re
try:
email_body = get_email_body(profile.email, profile.email_password, email_label)
match = re.search('http[s]?://\S+', email_body, re.MULTILINE)
if match:
return match.group(0)
else:
pytest.fail("Couldn't get the registraion link from the email")
finally:
remove_all_emails(email_folder=email_label)
def get_email_message(profile, email_label):
try:
return get_email_body(profile, email_label)
except:
pytest.fail("Couldn't get notification email")
finally:
remove_all_emails(email_folder=email_label)
def send_to_deskpro(config, message):
import requests
email = config.DESKPRO_PERSON_EMAIL
deskpro_dept_id = config.DESKPRO_DEPT_ID
deskpro_agent_team_id = config.DESKPRO_ASSIGNED_AGENT_TEAM_ID
deskpro_api_key = config.DESKPRO_API_KEY
deskpro_api_host = config.DESKPRO_API_HOST
message = message
data = {'person_email': email,
'department_id': deskpro_dept_id,
'agent_team_id': deskpro_agent_team_id,
'subject': 'Notify incident report',
'message': message
}
headers = {
"X-DeskPRO-API-Key": deskpro_api_key,
'Content-Type': "application/x-www-form-urlencoded"
}
resp = requests.post(
deskpro_api_host + '/api/tickets',
data=data,
headers=headers)
if resp.status_code != 201:
print("Deskpro create ticket request failed with {} '{}'".format(resp.status_code, resp.json()))
def _get_latest_verify_code_message(resp, profile):
for notification in resp['notifications']:
if notification['to'] == profile.mobile and notification['template']['name'] == 'Notify SMS verify code':
return notification['body']
raise RetryException
def get_verify_code_from_api(profile):
client = NotificationsAPIClient(Config.NOTIFY_API_URL,
Config.NOTIFY_SERVICE_ID,
Config.NOTIFY_SERVICE_API_KEY)
resp = client.get('notifications')
verify_code_message = _get_latest_verify_code_message(resp, profile)
m = re.search('\d{5}', verify_code_message)
if not m:
pytest.fail("Could not find the verify code in notification body")
return m.group(0)
@retry(RetryException, tries=15, delay=2)
def do_verify(driver, profile):
verify_code = get_verify_code_from_api(profile)
verify_page = VerifyPage(driver)
verify_page.verify(verify_code)
if verify_page.has_code_error():
raise RetryException
@retry(RetryException, tries=15, delay=2)
def get_sms_via_api(service_id, template_id, profile, api_key):
client = NotificationsAPIClient(Config.NOTIFY_API_URL,
service_id,
api_key)
resp = client.get('notifications')
for notification in resp['notifications']:
t_id = notification['template']['id']
to = notification['to']
status = notification['to']
if t_id == template_id and to == profile.mobile and status != 'created':
return notification['body']
else:
message = 'Could not find notification with template {} to number {}'.format(template_id, profile.mobile)
raise RetryException(message)
| null |
tests/utils.py
|
utils.py
|
py
| 7,148 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "config.Config.SERVICE_EMAIL",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "config.Config.SERVICE_EMAIL_PASSWORD",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "config.Config.EMAIL_FOLDER",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "imaplib.IMAP4_SSL",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "csv.DictWriter",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "imaplib.IMAP4_SSL",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "imaplib.IMAP4",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "pytest.fail",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pytest.fail",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "email.message_from_bytes",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "retry.retry",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "config.Config.EMAIL_TRIES",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "config.Config.EMAIL_DELAY",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "imaplib.IMAP4_SSL",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "imaplib.IMAP4",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "pytest.fail",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "email.message_from_bytes",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "retry.retry",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "config.Config.EMAIL_TRIES",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "config.Config.EMAIL_DELAY",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "email.split",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "re.MULTILINE",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "pytest.fail",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "pytest.fail",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "config.DESKPRO_PERSON_EMAIL",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "config.DESKPRO_DEPT_ID",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "config.DESKPRO_ASSIGNED_AGENT_TEAM_ID",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "config.DESKPRO_API_KEY",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "config.DESKPRO_API_HOST",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "requests.post",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "notifications_python_client.notifications.NotificationsAPIClient",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "config.Config.NOTIFY_API_URL",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "config.Config.NOTIFY_SERVICE_ID",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "config.Config.NOTIFY_SERVICE_API_KEY",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "re.search",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "pytest.fail",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "tests.pages.VerifyPage",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "retry.retry",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "notifications_python_client.notifications.NotificationsAPIClient",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "config.Config.NOTIFY_API_URL",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "retry.retry",
"line_number": 193,
"usage_type": "call"
}
] |
589353255
|
# Some pygame helper functions for simple image display
# and sound effect playback
# Rob Miles July 2017
# Version 1.0
import time
import random
import pygame
surface = None
sound_available = False
def setup(width=800, height=600, title=''):
'''
Sets up the pygame environment
'''
global window_size
global back_color
global text_color
global image
global surface
# Don't initialise if we already have
if surface is not None:
return
window_size = (width, height)
back_color = (255, 255, 255)
text_color = (255, 0, 0)
image = None
pygame.init()
# Create the game surface
surface = pygame.display.set_mode(window_size)
clear_display()
pygame.display.set_caption(title)
def handle_events():
'''
Consume events that are generated by the pygame window
Captures key pressed events and sets the key_pressed value
'''
global key_pressed
setup()
for event in pygame.event.get():
pass
def play_sound(filepath):
'''
Plays the specified sound file
'''
try:
# pre initialise pyGame's audio engine to avoid sound latency issues
pygame.mixer.pre_init(frequency=44100)
pygame.mixer.init()
except:
print("There is no sound provision on this computer.")
print("Sound commands will not produce any output")
return
sound = pygame.mixer.Sound(filepath)
sound.play()
def display_image(filepath):
'''
Displays the image from the given filepath
Starts pygame if required
May throw exceptions
'''
global surface
global window_size
global image
handle_events()
image = pygame.image.load(filepath)
image = pygame.transform.smoothscale(image, window_size)
surface.blit(image, (0, 0))
pygame.display.flip()
def clear_display():
'''
Clears the display to the background colour
and the image (if any) on top of it
'''
global surface
global image
global back_color
handle_events()
surface.fill(back_color)
if image is not None:
surface.blit(image, (0, 0))
pygame.display.flip()
def split_lines_on_spaces(text):
'''
returns a list of words which have been
extracted from the text.
Spaces on the ends of words are
preserved
'''
result = []
got_space = False
word = ''
for ch in text:
if ch == ' ':
got_space = True
word = word + ch
else:
if got_space:
# first character of next word
result.append(word)
word=ch
got_space=False
else:
word = word + ch
result.append(word)
return result
def get_display_lines(text, font, width):
'''
Returns a list of strings which have been split
to fit the given window width using the supplied font
'''
result = []
text_lines = text.splitlines()
for text_line in text_lines:
words = split_lines_on_spaces(text_line)
x = 0
line = ''
for word in words:
word_width = font.size(word)[0]
if x + word_width > width:
# Remove the trailing space from the line
# before adding to the list of lines to return
result.append(line)
line = word
x = word_width
else:
line = line + word
x = x + word_width
result.append(line)
return result
def render_message(text, size=200, margin=20, horiz='center', vert='center',
color=(255, 0, 0), cursor=''):
# Get the text version of the input
text = str(text)
font = pygame.font.Font(None, size)
available_width = window_size[0] - (margin * 2)
lines = get_display_lines(text, font, available_width)
rendered_lines = []
height = 0
for line in lines:
rendered_line = font.render(line, 1, color)
height += rendered_line.get_height()
rendered_lines.append(rendered_line)
if height > window_size[1]:
raise Exception('Text too large for window')
if vert == 'center':
y = (window_size[1] - height) / 2.0
elif vert == 'bottom':
y=(window_size[1]-margin) - height
else:
# default vertical cursor position is top
y = margin
for rendered_line in rendered_lines:
width = rendered_line.get_width()
height = rendered_line.get_height()
if horiz == 'center':
x = (available_width - width) / 2.0 + margin
elif horiz == 'right':
x = window_size[0] - width - margin
else:
# default position is left margin
x = margin
surface.blit(rendered_line, (x, y))
y += height
if cursor:
cursor_size = font.size(cursor)
cursor_width = cursor_size[0]
cursor_height = cursor_size[1]
if len(rendered_lines):
# put the cursor on the end of an existing line
y -= height
x += width
else:
# put the cursor in the start position for this
# orientation
# default x position is the margin
x = margin
if horiz == 'center':
x = (available_width - cursor_width) / 2.0 + margin
elif horiz == 'right':
x = window_size[0] - cursor_width - margin
else:
# default position is left margin
x = margin
if vert == 'center':
y = (window_size[1] - cursor_height) / 2.0
elif vert == 'bottom':
y=(window_size[1]-margin) - cursor_height
else:
# default vertical cursor position is top
y = margin
cursor_image = font.render(cursor, 1, color)
surface.blit(cursor_image, (x, y))
pygame.display.flip()
def display_message(text, size=200, margin=20, horiz='center', vert='center',
color=(255, 0, 0)):
'''
Displays the text as a message
Size can be used to select the size of the
text
'''
global window_size
global surface
handle_events()
clear_display()
render_message(text, size=size, margin=margin, horiz=horiz, vert=vert, color=color)
def get_dot():
'''
Waits for a mouse movement and then returns it
as a tuple of x and y coordinates
'''
setup()
while True:
event = pygame.event.wait()
if event.type == 4:
# Event 4 is mouse motion
pos = event.dict['pos']
return pos
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
BLUE = ( 0, 0, 255)
GREEN = ( 0, 255, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
MAGENTA = (255, 0, 255)
CYAN = (0, 255, 255)
dot_color = WHITE
def set_color(r,g,b):
dot_color = (r,g,b)
def set_random_color():
global dot_color
dot_color = (random.randint(0,255),
random.randint(0,255),
random.randint(0,255))
def get_mouse_pressed():
return pygame.mouse.get_pressed()[0]
def draw_dot(pos, radius):
setup()
pygame.draw.circle(surface, dot_color, pos, radius)
pygame.display.flip()
def get_key():
'''
Waits for a keypress and then returns it as a string
Only characters are returned, not control keys
'''
setup()
while True:
event = pygame.event.wait()
if event.type == 2:
# Event 2 is keydown
key_code = event.dict['unicode']
if key_code:
return key_code
def get_string(prompt, size=50, margin=20,
color=(255, 0, 0), horiz='left', vert='center',
max_line_length=20):
'''
Reads a string from the user
'''
setup()
result = ''
cursor_char = '*'
cursor = None
def redraw():
clear_display()
render_message(prompt+result, margin=margin, size=size,
horiz=horiz, vert=vert, color=color, cursor=cursor)
def cursor_flip():
nonlocal cursor
# create a timer for the cursor
cursor_event = pygame.USEREVENT+1
pygame.time.set_timer(cursor_event,500)
while True:
event = pygame.event.wait()
if event.type == cursor_event:
if cursor:
cursor = None
else:
cursor = cursor_char
redraw()
elif event.type == 2:
# Event 2 is keydown
key_code = event.dict['unicode']
if key_code is None:
continue
if key_code == '\r':
break
elif key_code == '\x08':
if len(result) > 0:
result=result[:-1]
redraw()
else:
if len(result) < max_line_length:
result += key_code
redraw()
# disable the timer for the cursor
pygame.time.set_timer(cursor_event,0)
return result
import urllib.request
import xml.etree.ElementTree
def get_weather_temp(latitude,longitude):
'''
Uses forecast.weather.gov to get the weather
for the specified latitude and longitude
'''
url="http://forecast.weather.gov/MapClick.php?lat={0}&lon={1}&unit=0&lg=english&FcstType=dwml".format(latitude,longitude)
req=urllib.request.urlopen(url)
page=req.read()
doc=xml.etree.ElementTree.fromstring(page)
# I'm not proud of this, but by gum it works...
for child in doc:
if child.tag == 'data':
if child.attrib['type'] == 'current observations':
for item in child:
if item.tag == 'parameters':
for i in item:
if i.tag == 'temperature':
if i.attrib['type'] == 'apparent':
for t in i:
if t.tag =='value':
return int(t.text)
def get_weather_desciption(latitude,longitude):
'''
Uses forecast.weather.gov to get the weather
for the specified latitude and longitude
'''
url="http://forecast.weather.gov/MapClick.php?lat={0}&lon={1}&unit=0&lg=english&FcstType=dwml".format(latitude,longitude)
req=urllib.request.urlopen(url)
page=req.read()
doc=xml.etree.ElementTree.fromstring(page)
# I'm not proud of this, but by gum it works...
for child in doc:
if child.tag == 'data':
if child.attrib['type'] == 'current observations':
for item in child:
if item.tag == 'parameters':
for i in item:
if i.tag == 'weather':
for t in i:
if t.tag == 'weather-conditions':
if t.get('weather-summary') is not None:
return t.get('weather-summary')
| null |
5. Making Decisions in Programs/snaps.py
|
snaps.py
|
py
| 11,255 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.init",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.pre_init",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.init",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.smoothscale",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.wait",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "pygame.mouse.get_pressed",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 291,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.wait",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 307,
"usage_type": "attribute"
},
{
"api_name": "pygame.USEREVENT",
"line_number": 339,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.set_timer",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 341,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.wait",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 344,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.set_timer",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 369,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 381,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.etree.ElementTree.fromstring",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.etree",
"line_number": 383,
"usage_type": "attribute"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 383,
"usage_type": "name"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 404,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.etree.ElementTree.fromstring",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.etree",
"line_number": 406,
"usage_type": "attribute"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 406,
"usage_type": "name"
}
] |
566027923
|
from typing import List
import torch
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import TextFieldEmbedder
from allennlp.nn import util
from .seq2vec_encoder import Seq2VecEncoder
def masked_mean_pooling(embedding_sequence: torch.Tensor, mask: torch.Tensor):
embedding_sequence = embedding_sequence * mask.unsqueeze(-1).float()
summed_embeddings = embedding_sequence.sum(dim=1) # shape: (batch_size, embedding_dim)
lengths = mask.sum(dim=1) # shape: (batch_size, )
length_mask = lengths > 0
# Set any length 0 to 1, to avoid dividing by zero.
lengths = torch.max(lengths, lengths.new_ones(1))
mean_pooled_embeddings = summed_embeddings / lengths.unsqueeze(-1).float()
# mask embeddings with length 0
mean_pooled_embeddings = mean_pooled_embeddings * (length_mask > 0).float().unsqueeze(-1)
return mean_pooled_embeddings
def get_last_indices_from_mask(mask: torch.Tensor) -> List[int]:
last_index = []
for m in mask:
zero_indices = (m == 0).nonzero(as_tuple=True)[0]
if len(zero_indices) == 0:
index = -1
else:
index = (zero_indices[0] - 1).item()
last_index.append(index)
return last_index
@Seq2VecEncoder.register("boe")
class BoeEncoder(Seq2VecEncoder):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, averaged: bool = False, mask_first_and_last: bool = False
) -> None:
super().__init__(vocab=vocab)
self.embedder = embedder
self.averaged = averaged
self.mask_first_and_last = mask_first_and_last
def forward(self, tokens: TextFieldTensors) -> torch.Tensor:
embedding_sequence = self.embedder(tokens)
mask = util.get_text_field_mask(tokens)
if self.mask_first_and_last:
last_indices = get_last_indices_from_mask(mask)
batch_size = mask.size(0)
mask[range(batch_size), last_indices] = 0
mask[:, 0] = 0
if self.averaged:
return masked_mean_pooling(embedding_sequence, mask)
else:
embedding_sequence = embedding_sequence * mask.unsqueeze(-1).float()
summed = embedding_sequence.sum(dim=1) # shape: (batch_size, embedding_dim)
return summed
| null |
examples_allennlp/utils/retrieval/models/bag_of_embeddings.py
|
bag_of_embeddings.py
|
py
| 2,307 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.Tensor",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.max",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "seq2vec_encoder.Seq2VecEncoder",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "allennlp.data.Vocabulary",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "allennlp.modules.TextFieldEmbedder",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "allennlp.data.TextFieldTensors",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "allennlp.nn.util.get_text_field_mask",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "allennlp.nn.util",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "seq2vec_encoder.Seq2VecEncoder.register",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "seq2vec_encoder.Seq2VecEncoder",
"line_number": 41,
"usage_type": "name"
}
] |
234756079
|
from typing import Dict
from concurrent.futures import (
ProcessPoolExecutor,
ThreadPoolExecutor,
)
from asyncpg.connection import Connection as PgConnection
from helpers import ElasticContext
from .base import BaseUpdater
from .terms import TermUpdater
def get_update_handlers(pg_conn: PgConnection,
elastic_context: ElasticContext,
thread_pool: ThreadPoolExecutor,
process_pool: ProcessPoolExecutor) -> Dict[str, BaseUpdater]:
"""
Получить резолвер всех обработчиков-обновляторов.
:param pg_conn: соединение с СУБД
:param elastic_context: контекст Эластика
:param thread_pool: потоковый пул для асинхронных операций
:param process_pool: процессный пул для асинхронных операций
:return: резолвер обработчиков-обновляторов
"""
params = {
'pg_conn': pg_conn,
'elastic_context': elastic_context,
'thread_pool': thread_pool,
'process_pool': process_pool,
}
return {
'terms': TermUpdater(**params)
}
| null |
eupdater/helpers.py
|
helpers.py
|
py
| 1,276 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "asyncpg.connection.Connection",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "helpers.ElasticContext",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "concurrent.futures.ProcessPoolExecutor",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "terms.TermUpdater",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "base.BaseUpdater",
"line_number": 17,
"usage_type": "name"
}
] |
424102036
|
from django.urls import path
from applications.forms import (
FinancialAidApplicationForm,
ScholarshipApplicationForm,
)
from applications.views import apply, view
app_name = "applications"
urlpatterns = [
# pyre-ignore[16]: This is fixed by https://github.com/facebook/pyre-check/pull/256.
path("edit/<int:pk>", apply, name="edit"),
# pyre-ignore[16]: This is fixed by https://github.com/facebook/pyre-check/pull/256.
path(
"financial-aid",
apply,
{"form_type": FinancialAidApplicationForm},
name="financial_aid",
),
# pyre-ignore[16]: This is fixed by https://github.com/facebook/pyre-check/pull/256.
path(
"ticket", apply, {"form_type": ScholarshipApplicationForm}, name="scholarship"
),
# pyre-ignore[16]: This is fixed by https://github.com/facebook/pyre-check/pull/256.
path("view/<int:pk>", view, name="view"),
]
| null |
src/applications/urls.py
|
urls.py
|
py
| 911 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "applications.views.apply",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "applications.views.apply",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "applications.forms.FinancialAidApplicationForm",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "applications.views.apply",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "applications.forms.ScholarshipApplicationForm",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "applications.views.view",
"line_number": 26,
"usage_type": "argument"
}
] |
426995395
|
import scipy.integrate as spi
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
# N为人群总数
N = 10000
# β为传染率系数
B = 0.01
# y为恢复健康的概率
y = 0.02
# I为感染者的初始人数
I = 1
# S为易感者的初始人数
S = N - I
# 感染者每天接触人数
r = 10
# T为传播时间
T = 200
# INI为初始状态下的数组
INI = (S, I)
def funcSIS(inivalue, _):
Y = np.zeros(2)
X = inivalue
# 易感个体S变化
Y[0] = - (r * B * X[0] * X[1]) / N + y * X[1]
# 感染个体I变化
Y[1] = (r * B * X[0] * X[1]) / N - y * X[1]
return Y
T_range = np.arange(0, T + 1)
RES = spi.odeint(funcSIS, INI, T_range)
# odeint()函数需要至少三个变量,第一个是微分方程函数,第二个是微分方程初值,第三个是微分的自变量。
plt.plot(RES[:, 0], color='g', label='易感染者——Susceptible')
plt.plot(RES[:, 1], color='r', label='传染者——Infection')
plt.title('SIS Model')
plt.legend()
plt.xlabel('天数')
plt.ylabel('人数')
plt.show()
| null |
SIS_Model.py
|
SIS_Model.py
|
py
| 1,083 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.odeint",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "scipy.integrate",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
}
] |
593448263
|
# coding: utf-8
# In[1]:
import requests
from bs4 import BeautifulSoup
from collections import OrderedDict
from contextlib import closing
# In[2]:
url = 'http://us-city.census.okfn.org/place/miami'
# In[3]:
response = requests.get(url)
response.raise_for_status()
soup = BeautifulSoup(response.content.decode(), 'html.parser')
# In[4]:
headers = soup.select('th')
headers = [header.text or 'Propose Revisions' for header in headers]
# In[5]:
DATASETS, SCORES, BREAKDOWNS, LAST_UPDATED, URL_LOCATIONS, INFORMATION, PROPOSE_REVISIONS = headers
# In[6]:
FIRST = 0
DATA_DATASETTITLE = 'data-datasettitle' # attr of some elements yielding title
LOCATION_URL = 4
ROWS = 'tr'
CELLS = 'td'
HREF = 'href'
ALINKS = 'a'
# In[7]:
table_rows = soup.select(ROWS)[1:] # skip the header row
# In[8]:
resources = OrderedDict()
# In[9]:
# returns text values from first column in table: Dataset
data_resource_names = [''.join([str(cell.attrs.get(DATA_DATASETTITLE))
for cell in data_row.select(CELLS)
if cell.attrs.get(DATA_DATASETTITLE)])
or data_row.text.split()[FIRST].strip() # dead row, no link, use text
for data_row in table_rows]
# In[10]:
resources[DATASETS] = data_resource_names
# In[11]:
def get_url(element):
# handles case where there are no a links
try:
return element[FIRST].attrs[HREF]
except (IndexError, KeyError):
return None
resource_urls = [get_url(data_row.select(CELLS)[LOCATION_URL].select(ALINKS))
for data_row in table_rows]
# In[12]:
resources[URL_LOCATIONS] = resource_urls
# In[14]:
test_urls = [(dataset, url) for dataset, url in zip(data_resource_names, resource_urls)
if all([
url is not None,
not str(url).endswith('.zip'),
str(url).startswith('http'),
])]
attrs = ('status_code', 'reason', 'ok')
for dataset, resource_url in test_urls:
print(dataset, resource_url)
with closing(requests.get(resource_url, stream=True)) as response:
for attr in attrs[0:1]:
print(getattr(response, attr))
| null |
notebooks/MiamiOpenDataCensus.py
|
MiamiOpenDataCensus.py
|
py
| 2,135 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "contextlib.closing",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 100,
"usage_type": "call"
}
] |
384674994
|
import os
from uuid import uuid4
from matplotlib import pyplot
from dds_simulation.conf.default import PROJECT_ROOT
def draw_extrapolation(x_vector, y_vector, partitions_number):
pyplot.figure()
pyplot.ylim(0,1)
pyplot.ylabel('I(U)')
pyplot.plot(x_vector, y_vector, color='b', linewidth=3.0)
path = os.path.join(PROJECT_ROOT, 'results',
'{}-consistent-partitions-inconsistency'.format(
partitions_number))
pyplot.savefig(path)
def draw_probability_extrapolation(x_vector, y_vector, partitions,
nodes_number, average):
pyplot.figure()
pyplot.ylim(0, 1)
pyplot.ylabel('I({}) = {}'.format(partitions, average))
pyplot.plot(x_vector, y_vector, color='b')
path = os.path.join(PROJECT_ROOT, 'results',
'{}-consistent-partitions-probability-{}-nodes-{}'.format(
partitions, nodes_number, uuid4().hex))
pyplot.savefig(path)
| null |
dds_simulation/graphs/extrapolation.py
|
extrapolation.py
|
py
| 1,013 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "dds_simulation.conf.default.PROJECT_ROOT",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "dds_simulation.conf.default.PROJECT_ROOT",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "uuid.uuid4",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
}
] |
635161876
|
from opentelemetry import trace
from opentelemetry.exporter import jaeger
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
from opentelemetry.instrumentation.flask import FlaskInstrumentor
from flask import Flask
from flask import request
import requests
import time
trace.set_tracer_provider(TracerProvider())
tracer = trace.get_tracer(__name__)
# create a JaegerSpanExporter
jaeger_exporter = jaeger.JaegerSpanExporter(
service_name='orch-service',
agent_host_name='localhost',
agent_port=6831,
)
# Create a BatchExportSpanProcessor and add the exporter to it
span_processor = BatchExportSpanProcessor(jaeger_exporter)
# add to the tracer
trace.get_tracer_provider().add_span_processor(span_processor)
app = Flask(__name__)
FlaskInstrumentor().instrument_app(app)
def get_information_from_k8s():
with tracer.start_as_current_span('k8s-information'):
requests.get('http://www.google.com')
@app.route("/deploy")
def deploy_to_kubernetes():
print("Starting operation.")
with tracer.start_as_current_span('deploy-to-kubernetes') as span:
print(span)
print(request.headers)
# Simulate 4 second of deployment.
get_information_from_k8s()
time.sleep(1.5)
return "deployed_to_k8s", 200
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8081)
| null |
orchestrator.py
|
orchestrator.py
|
py
| 1,415 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "opentelemetry.trace.set_tracer_provider",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "opentelemetry.trace",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "opentelemetry.sdk.trace.TracerProvider",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "opentelemetry.trace.get_tracer",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "opentelemetry.trace",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "opentelemetry.exporter.jaeger.JaegerSpanExporter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "opentelemetry.exporter.jaeger",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "opentelemetry.sdk.trace.export.BatchExportSpanProcessor",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "opentelemetry.trace.get_tracer_provider",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "opentelemetry.trace",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "opentelemetry.instrumentation.flask.FlaskInstrumentor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 41,
"usage_type": "call"
}
] |
242701061
|
import pygame
from constants import *
class Interface(object):
def __init__(self):
pygame.init()
self.resolution = RESOLUTION
self.size = SCREEN_WIDTH, SCREEN_HEIGHT
self.screen = pygame.display.set_mode(self.size)
self.clock = pygame.time.Clock()
self.timestep = 0
def draw(self, positions, obstacles, state, leader, alive):
self.timestep += 1
# Background
self.screen.fill(LIGHT_GRAY)
# Obstacles
self.draw_obstacles(obstacles)
# Connections
self.draw_connections(positions, state)
# Drones
self.draw_drones(positions, leader, alive)
# Flip screen
pygame.display.flip()
# Record
#pygame.image.save(self.screen, f"record/screenshot_{self.timestep}.jpeg")
def draw_obstacles(self, obstacles):
for coordinate in obstacles:
pygame.draw.circle(self.screen, RED, RATIO * coordinate, radius=SIZE_OBSTACLES, width=20)
pygame.draw.circle(self.screen, BLACK, RATIO * coordinate, radius=RATIO * AVOID_DISTANCE, width=1)
#pygame.draw.circle(self.screen, BLACK, coordinate, radius=RADIUS_OBSTACLES*1.6 + AVOID_DISTANCE, width=1)
def draw_connections(self, positions, state):
num_agents = len(positions)
for i in range(num_agents):
if i not in state.alive: continue
for j in range(i+1, num_agents):
if j not in state.alive: continue
if state.adjacencyMatrix[i][j]:
pos_i = RATIO * positions[i]
pos_j = RATIO * positions[j]
pygame.draw.line(self.screen, BLACK, pos_i, pos_j, 1)
def draw_drones(self, positions, leader, alive):
for idx, position in enumerate(positions):
if idx != leader and idx in alive:
# Draw drone's position
pygame.draw.circle(self.screen, BLUE, RATIO * position, radius=SIZE_DRONE, width=20)
pygame.draw.circle(self.screen, BLACK, RATIO * position, radius=RATIO * AVOID_DISTANCE, width=1)
# Draw leader's position
if leader > -1 and leader in alive:
pygame.draw.circle(self.screen, GREEN, RATIO * positions[leader], radius=1.2 * SIZE_DRONE, width=22)
pygame.draw.circle(self.screen, BLACK, RATIO * positions[leader], radius=RATIO * AVOID_DISTANCE, width=1)
| null |
CE-288/LeaderElectionNetwork/interface.py
|
interface.py
|
py
| 2,546 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.line",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 61,
"usage_type": "attribute"
}
] |
256774015
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from mylinearregression import MyLinearRegression as MyLR
data = pd.read_csv("../resources/spacecraft_data.csv")
X = np.array(data[['Age','Thrust_power','Terameters']])
Xage = np.array(data["Age"]).reshape(-1, 1)
Xthrush = np.array(data["Thrust_power"]).reshape(-1, 1)
Xmeter = np.array(data["Terameters"]).reshape(-1, 1)
Yprice = np.array(data[["Sell_price"]])
# myLR_age = MyLR([[1000.0], [-1.0]])
myLR = MyLR([[1.0], [1.0], [1.0], [1.0]])
myLR_age = MyLR([[1000.0], [-1.0]])
myLR_thrush = MyLR([[1.0], [-1.0]])
myLR_meter = MyLR([[1.0], [-1.0]])
myLR.fit_(X, Yprice, alpha=2e-6, n_cycle=600000)
# myLR_age.fit_(Xage, Yprice, alpha=0.025, n_cycle=100000)
# myLR_thrush.fit_(Xthrush, Yprice, alpha=0.00000007, n_cycle=100000)
# myLR_meter.fit_(Xmeter, Yprice, alpha=0.00017, n_cycle=200000)
# print(myLR_age.theta)
# print(myLR.linear_mse(X, Yprice))
# myLR.fit_(X, Yprice, alpha=1e-4, n_cycle=600000)
# print(myLR.theta)
# myLR_age.fit_(X[:, 0].reshape(-1, 1), Yprice, alpha=2.5e-5, n_cycle=100000)
# RMSE_age = myLR_age.linear_mse(Xage[:, 0].reshape(-1, 1), Yprice)
# print(RMSE_age)
# print(myLR.linear_mse(X, Yprice))
def fig_age():
age = myLR.predict_(X)
plt.plot(X[:, 0].reshape(-1, 1), Yprice, 'o', color='#141C95', label='Sell Price')
plt.plot(X[:, 0].reshape(-1, 1), age, 'o', color='cyan', label='Sell Price')
plt.grid(True)
plt.show()
def figage():
age = myLR_age.predict_(Xage)
plt.plot(Xage, Yprice, 'o', color='#141C95', label='Sell Price')
plt.plot(Xage, age, 'o', color='cyan', label='Sell Price')
plt.grid(True)
plt.show()
def figthrush():
thrush = myLR_thrush.predict_(Xthrush)
plt.plot(Xthrush, Yprice, 'o', color='green', label='Sell Price')
plt.plot(Xthrush, thrush, 'o', color='#00FF00', label='Sell Price')
plt.grid(True)
plt.show()
def figmeter():
thrush = myLR_meter.predict_(Xmeter)
plt.plot(Xmeter, Yprice, 'o', color='#A33BEF', label='Sell Price')
plt.plot(Xmeter, thrush, 'o', color='#EF3BD6', label='Sell Price')
plt.grid(True)
plt.show()
# figage()
# figthrush()
# figmeter()
fig_age()
# print(RMSE_age)
| null |
day01/ex01/multi_linear_model.py
|
multi_linear_model.py
|
py
| 2,201 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "mylinearregression.MyLinearRegression",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "mylinearregression.MyLinearRegression",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mylinearregression.MyLinearRegression",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "mylinearregression.MyLinearRegression",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
}
] |
13509569
|
import numpy as np
import keras as keras #Import de keras
import h5py
from keras.preprocessing.image import load_img, img_to_array
from keras.models import load_model
from keras.utils import CustomObjectScope
from keras.initializers import glorot_uniform
from keras.models import load_model
from tensorflow.keras.models import load_model
longitud, altura = 200, 200 #Longitud y altura anteriores
modelo ='./Entrenamiento Clasificador/modelo.h5' #Directorio de modelo
pesos_modelo ='./Entrenamiento Clasificador/pesos.h5' #Directorio De pesos
cnn = load_model(modelo) #Cargar modelo a la red neuronal
cnn.load_weights(pesos_modelo) #Cargar pesos a la red neuronal
def predict(file): #Procedimiento para predecir (Recibe la imagen)
x = load_img(file, target_size=(longitud, altura)) #A la variable se le carga la imagen a predecir
x = img_to_array(x) #Se convierte en un arreglo la imagen para obtener los valores
x = np.expand_dims(x, axis=0) #En el 0 se a;ade una dimension extra para procesar la informacion
array = cnn.predict(x) #Se llama a la red con la imagen cargada
result = array[0] #Devuelve un arreglo de dos dimensiones, que tiene un solo valor con la dimension con toda la informacion
answer = np.argmax(result) #Respuesta es lo que tenga el resultado en esa capa
if answer == 0:
print("Prediccion: No es Girasol")
print(result[0])
elif answer == 1:
if (result[1]>0.8):
print("Prediccion: Es Girasol")
else:
print(answer)
print("Prediccion:NO Es Girasol")
return answer
predict('./Pruebas/E8.jpg')
| null |
Archivos de entrenamiento/Clasificador.py
|
Clasificador.py
|
py
| 1,612 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.load_img",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.img_to_array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 29,
"usage_type": "call"
}
] |
43954339
|
import numpy as np
import scipy as sc
import time
import imageio
import copy
import torch as K
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import gym_wmgds as gym
from sldr.algorithms.ddpg import DDPG_BD
from sldr.experience import Normalizer
from sldr.exploration import Noise
from sldr.utils import Saver, Summarizer, get_params, running_mean
from sldr.agents.basic import Actor
from sldr.agents.basic import Critic
import pdb
import matplotlib
import matplotlib.pyplot as plt
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
device = K.device("cuda" if K.cuda.is_available() else "cpu")
dtype = K.float32
def init(config, agent='robot', her=False, object_Qfunc=None, backward_dyn=None, object_policy=None, reward_fun=None):
#hyperparameters
ENV_NAME = config['env_id']
SEED = config['random_seed']
N_ENVS = config['n_envs']
def make_env(env_id, i_env, env_type='Fetch', ai_object=False):
def _f():
if env_type == 'Fetch':
env = gym.make(env_id, n_objects=config['max_nb_objects'],
obj_action_type=config['obj_action_type'],
observe_obj_grp=config['observe_obj_grp'],
obj_range=config['obj_range'])
elif env_type == 'Hand':
env = gym.make(env_id, obj_action_type=config['obj_action_type'])
elif env_type == 'Others':
env = gym.make(env_id)
keys = env.observation_space.spaces.keys()
env = gym.wrappers.FlattenDictWrapper(env, dict_keys=list(keys))
env.seed(SEED+10*i_env)
env.unwrapped.ai_object = ai_object
return env
return _f
if 'Fetch' in ENV_NAME and 'Multi' in ENV_NAME and 'Flex' not in ENV_NAME:
dummy_env = gym.make(ENV_NAME, n_objects=config['max_nb_objects'],
obj_action_type=config['obj_action_type'],
observe_obj_grp=config['observe_obj_grp'],
obj_range=config['obj_range'])
envs = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Fetch', agent == 'object') for i_env in range(N_ENVS)])
envs_render = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Fetch', agent == 'object') for i_env in range(1)])
n_rob_actions = 4
n_actions = config['max_nb_objects'] * len(config['obj_action_type']) + n_rob_actions
elif 'Fetch' in ENV_NAME and 'Multi' in ENV_NAME and 'Flex' in ENV_NAME:
dummy_env = gym.make(ENV_NAME, n_objects=config['max_nb_objects'],
obj_action_type=config['obj_action_type'],
observe_obj_grp=config['observe_obj_grp'],
obj_range=config['obj_range'])
envs = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Fetch', agent == 'object') for i_env in range(N_ENVS)])
envs_render = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Fetch', agent == 'object') for i_env in range(1)])
n_rob_actions = 4
n_actions = 2 * len(config['obj_action_type']) + n_rob_actions
elif 'HandManipulate' in ENV_NAME and 'Multi' in ENV_NAME:
dummy_env = gym.make(ENV_NAME, obj_action_type=config['obj_action_type'])
envs = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Hand', agent == 'object') for i_env in range(N_ENVS)])
envs_render = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Hand', agent == 'object') for i_env in range(N_ENVS)])
n_rob_actions = 20
n_actions = 1 * len(config['obj_action_type']) + n_rob_actions
else:
dummy_env = gym.make(ENV_NAME)
envs = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Others', agent == 'object') for i_env in range(N_ENVS)])
envs_render = None
def her_reward_fun(ag_2, g, info): # vectorized
return dummy_env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)
K.manual_seed(SEED)
np.random.seed(SEED)
observation_space = dummy_env.observation_space.spaces['observation'].shape[1] + dummy_env.observation_space.spaces['desired_goal'].shape[0]
action_space = (gym.spaces.Box(-1., 1., shape=(n_rob_actions,), dtype='float32'),
gym.spaces.Box(-1., 1., shape=(n_actions-n_rob_actions,), dtype='float32'),
gym.spaces.Box(-1., 1., shape=(n_actions,), dtype='float32'))
GAMMA = config['gamma']
clip_Q_neg = config['clip_Q_neg'] if config['clip_Q_neg'] < 0 else None
TAU = config['tau']
ACTOR_LR = config['plcy_lr']
CRITIC_LR = config['crtc_lr']
MEM_SIZE = config['buffer_length']
REGULARIZATION = config['regularization']
NORMALIZED_REWARDS = config['reward_normalization']
OUT_FUNC = K.tanh
if config['agent_alg'] == 'DDPG_BD':
MODEL = DDPG_BD
from sldr.replay_buffer import ReplayBuffer
from sldr.her_sampler import make_sample_her_transitions
elif config['agent_alg'] == 'MADDPG_BD':
MODEL = MADDPG_BD
from sldr.replay_buffer import ReplayBuffer_v2 as ReplayBuffer
from sldr.her_sampler import make_sample_her_transitions_v2 as make_sample_her_transitions
#exploration initialization
if agent == 'robot':
agent_id = 0
noise = Noise(action_space[0].shape[0], sigma=0.2, eps=0.3)
elif agent == 'object':
agent_id = 1
#noise = Noise(action_space[1].shape[0], sigma=0.2, eps=0.3)
noise = Noise(action_space[1].shape[0], sigma=0.05, eps=0.2)
config['episode_length'] = dummy_env._max_episode_steps
config['observation_space'] = dummy_env.observation_space
#model initialization
optimizer = (optim.Adam, (ACTOR_LR, CRITIC_LR)) # optimiser func, (actor_lr, critic_lr)
loss_func = F.mse_loss
model = MODEL(observation_space, action_space, optimizer,
Actor, Critic, loss_func, GAMMA, TAU, out_func=OUT_FUNC, discrete=False,
regularization=REGULARIZATION, normalized_rewards=NORMALIZED_REWARDS,
agent_id=agent_id, object_Qfunc=object_Qfunc, backward_dyn=backward_dyn,
object_policy=object_policy, reward_fun=reward_fun, clip_Q_neg=clip_Q_neg
)
normalizer = [Normalizer(), Normalizer()]
for _ in range(1):
state_all = dummy_env.reset()
for _ in range(config['episode_length']):
model.to_cpu()
obs = [K.tensor(obs, dtype=K.float32).unsqueeze(0) for obs in state_all['observation']]
goal = K.tensor(state_all['desired_goal'], dtype=K.float32).unsqueeze(0)
# Observation normalization
obs_goal = []
obs_goal.append(K.cat([obs[agent_id], goal], dim=-1))
if normalizer[agent_id] is not None:
obs_goal[0] = normalizer[agent_id].preprocess_with_update(obs_goal[0])
action = model.select_action(obs_goal[0], noise).cpu().numpy().squeeze(0)
action_to_env = np.zeros_like(dummy_env.action_space.sample())
if agent_id == 0:
action_to_env[0:action.shape[0]] = action
else:
action_to_env[-action.shape[0]::] = action
next_state_all, _, _, _ = dummy_env.step(action_to_env)
# Move to the next state
state_all = next_state_all
#memory initilization
if her:
sample_her_transitions = make_sample_her_transitions('future', 4, her_reward_fun)
else:
sample_her_transitions = make_sample_her_transitions('none', 4, her_reward_fun)
buffer_shapes = {
'o' : (config['episode_length'], dummy_env.observation_space.spaces['observation'].shape[1]*2),
'ag' : (config['episode_length'], dummy_env.observation_space.spaces['achieved_goal'].shape[0]),
'g' : (config['episode_length'], dummy_env.observation_space.spaces['desired_goal'].shape[0]),
'u' : (config['episode_length']-1, action_space[2].shape[0])
}
memory = ReplayBuffer(buffer_shapes, MEM_SIZE, config['episode_length'], sample_her_transitions)
experiment_args = ((envs, envs_render), memory, noise, config, normalizer, agent_id)
return model, experiment_args
def back_to_dict(state, config):
goal_len = config['observation_space'].spaces['desired_goal'].shape[0]
obs_len = config['observation_space'].spaces['observation'].shape[1]
n_agents = config['observation_space'].spaces['observation'].shape[0]
state_dict = {}
state_dict['achieved_goal'] = state[:,0:goal_len]
state_dict['desired_goal'] = state[:,goal_len:goal_len*2]
state_dict['observation'] = state[:,goal_len*2::].reshape(-1,n_agents,obs_len).swapaxes(0,1)
return state_dict
def rollout(env, model, noise, config, normalizer=None, render=False, agent_id=0, ai_object=False, rob_policy=[0., 0.]):
trajectories = []
for i_agent in range(2):
trajectories.append([])
# monitoring variables
episode_reward = np.zeros(env.num_envs)
frames = []
state_all = env.reset()
state_all = back_to_dict(state_all, config)
for i_step in range(config['episode_length']):
model.to_cpu()
obs = [K.tensor(obs, dtype=K.float32) for obs in state_all['observation']]
goal = K.tensor(state_all['desired_goal'], dtype=K.float32)
# Observation normalization
obs_goal = []
for i_agent in range(2):
obs_goal.append(K.cat([obs[i_agent], goal], dim=-1))
if normalizer[i_agent] is not None:
obs_goal[i_agent] = normalizer[i_agent].preprocess_with_update(obs_goal[i_agent])
action = model.select_action(obs_goal[agent_id], noise).cpu().numpy()
if agent_id == 0:
action_to_env = np.zeros((len(action), len(env.action_space.sample())))
action_to_env[:,0:action.shape[1]] = action
if ai_object:
action_to_env[:, action.shape[1]::] = model.get_obj_action(obs_goal[1]).cpu().numpy()
action_to_mem = action_to_env
else:
action_to_env = np.zeros((len(action), len(env.action_space.sample())))
action_to_env[:,] = env.action_space.sample() * rob_policy[0] + np.ones_like(env.action_space.sample()) * rob_policy[1]
action_to_env[:,-action.shape[1]::] = action
action_to_mem = action_to_env
next_state_all, reward, done, info = env.step(action_to_env)
next_state_all = back_to_dict(next_state_all, config)
reward = K.tensor(reward, dtype=dtype).view(-1,1)
next_obs = [K.tensor(next_obs, dtype=K.float32) for next_obs in next_state_all['observation']]
# Observation normalization
next_obs_goal = []
for i_agent in range(2):
next_obs_goal.append(K.cat([next_obs[i_agent], goal], dim=-1))
if normalizer[i_agent] is not None:
next_obs_goal[i_agent] = normalizer[i_agent].preprocess(next_obs_goal[i_agent])
# for monitoring
if model.object_Qfunc is None:
episode_reward += reward.squeeze(1).cpu().numpy()
else:
r_intr = model.get_obj_reward(obs_goal[1], next_obs_goal[1])
if model.masked_with_r:
episode_reward += (r_intr * K.abs(reward) + reward).squeeze(1).cpu().numpy()
else:
episode_reward += (r_intr + reward).squeeze(1).cpu().numpy()
for i_agent in range(2):
state = {
'observation' : state_all['observation'][i_agent],
'achieved_goal' : state_all['achieved_goal'],
'desired_goal' : state_all['desired_goal']
}
next_state = {
'observation' : next_state_all['observation'][i_agent],
'achieved_goal' : next_state_all['achieved_goal'],
'desired_goal' : next_state_all['desired_goal']
}
trajectories[i_agent].append((state.copy(), action_to_mem, reward, next_state.copy(), done))
goal_a = state_all['achieved_goal']
goal_b = state_all['desired_goal']
ENV_NAME = config['env_id']
if 'Rotate' in ENV_NAME:
goal_a = goal_a[:,3:]
goal_b = goal_b[:,3:]
# Move to the next state
state_all = next_state_all
# Record frames
if render:
frames.append(env.render(mode='rgb_array')[0])
distance = np.linalg.norm(goal_a - goal_b, axis=-1)
obs, ags, goals, acts = [], [], [], []
for trajectory in trajectories:
obs.append([])
ags.append([])
goals.append([])
acts.append([])
for i_step in range(config['episode_length']):
obs[-1].append(trajectory[i_step][0]['observation'])
ags[-1].append(trajectory[i_step][0]['achieved_goal'])
goals[-1].append(trajectory[i_step][0]['desired_goal'])
if (i_step < config['episode_length'] - 1):
acts[-1].append(trajectory[i_step][1])
trajectories = {
'o' : np.concatenate(obs,axis=-1).swapaxes(0,1),
'ag' : np.asarray(ags)[0,].swapaxes(0,1),
'g' : np.asarray(goals)[0,].swapaxes(0,1),
'u' : np.asarray(acts)[0,].swapaxes(0,1),
}
info = np.asarray([i_info['is_success'] for i_info in info])
return trajectories, episode_reward, info, distance
def run(model, experiment_args, train=True):
total_time_start = time.time()
envs, memory, noise, config, normalizer, agent_id = experiment_args
envs_train, envs_render = envs
N_EPISODES = config['n_episodes'] if train else config['n_episodes_test']
N_CYCLES = config['n_cycles']
N_BATCHES = config['n_batches']
N_TEST_ROLLOUTS = config['n_test_rollouts']
BATCH_SIZE = config['batch_size']
episode_reward_all = []
episode_success_all = []
episode_distance_all = []
episode_reward_mean = []
episode_success_mean = []
episode_distance_mean = []
critic_losses = []
actor_losses = []
backward_losses = []
backward_otw_losses = []
best_succeess = -1
for i_episode in range(N_EPISODES):
episode_time_start = time.time()
if train:
for i_cycle in range(N_CYCLES):
ai_object = 1 if np.random.rand() < config['ai_object_rate'] else 0
trajectories, _, _, _ = rollout(envs_train, model, noise, config, normalizer, render=False, agent_id=agent_id, ai_object=ai_object, rob_policy=config['rob_policy'])
memory.store_episode(trajectories.copy())
for i_batch in range(N_BATCHES):
model.to_cuda()
batch = memory.sample(BATCH_SIZE)
critic_loss, actor_loss = model.update_parameters(batch, normalizer)
if i_batch == N_BATCHES - 1:
critic_losses.append(critic_loss)
actor_losses.append(actor_loss)
model.update_target()
if agent_id == 1:
for i_batch in range(N_BATCHES):
batch = memory.sample(BATCH_SIZE)
backward_otw_loss = model.update_backward_otw(batch, normalizer)
if i_batch == N_BATCHES - 1:
backward_otw_losses.append(backward_otw_loss)
# <-- end loop: i_cycle
plot_durations(np.asarray(critic_losses), np.asarray(actor_losses))
episode_reward_cycle = []
episode_succeess_cycle = []
episode_distance_cycle = []
rollout_per_env = N_TEST_ROLLOUTS // config['n_envs']
for i_rollout in range(rollout_per_env):
render = config['render'] == 2 and i_episode % config['render'] == 0
_, episode_reward, success, distance = rollout(envs_train, model, False, config, normalizer=normalizer, render=render, agent_id=agent_id, ai_object=False, rob_policy=config['rob_policy'])
episode_reward_cycle.extend(episode_reward)
episode_succeess_cycle.extend(success)
episode_distance_cycle.extend(distance)
render = (config['render'] == 1) and (i_episode % config['render'] == 0) and (envs_render is not None)
if render:
for i_rollout in range(10):
_, _, _, _ = rollout(envs_render, model, False, config, normalizer=normalizer, render=render, agent_id=agent_id, ai_object=False, rob_policy=config['rob_policy'])
# <-- end loop: i_rollout
### MONITORIRNG ###
episode_reward_all.append(episode_reward_cycle)
episode_success_all.append(episode_succeess_cycle)
episode_distance_all.append(episode_distance_cycle)
episode_reward_mean.append(np.mean(episode_reward_cycle))
episode_success_mean.append(np.mean(episode_succeess_cycle))
episode_distance_mean.append(np.mean(episode_distance_cycle))
plot_durations(np.asarray(episode_reward_mean), np.asarray(episode_success_mean))
if best_succeess < np.mean(episode_succeess_cycle):
bestmodel_critic = model.critics[0].state_dict()
bestmodel_actor = model.actors[0].state_dict()
bestmodel_normalizer = copy.deepcopy(normalizer)
best_succeess = np.mean(episode_succeess_cycle)
if config['verbose'] > 0:
# Printing out
if (i_episode+1)%1 == 0:
print("==> Episode {} of {}".format(i_episode + 1, N_EPISODES))
print(' | Id exp: {}'.format(config['exp_id']))
print(' | Exp description: {}'.format(config['exp_descr']))
print(' | Env: {}'.format(config['env_id']))
print(' | Process pid: {}'.format(config['process_pid']))
print(' | Running mean of total reward: {}'.format(episode_reward_mean[-1]))
print(' | Success rate: {}'.format(episode_success_mean[-1]))
print(' | Distance to target {}'.format(episode_distance_mean[-1]))
print(' | Time episode: {}'.format(time.time()-episode_time_start))
print(' | Time total: {}'.format(time.time()-total_time_start))
# <-- end loop: i_episode
if train and agent_id==1:
print('Training Backward Model')
model.to_cuda()
for _ in range(N_EPISODES*N_CYCLES):
for i_batch in range(N_BATCHES):
batch = memory.sample(BATCH_SIZE)
backward_loss = model.update_backward(batch, normalizer)
if i_batch == N_BATCHES - 1:
backward_losses.append(backward_loss)
plot_durations(np.asarray(backward_otw_losses), np.asarray(backward_losses))
if train:
print('Training completed')
else:
print('Test completed')
return (episode_reward_all, episode_success_all, episode_distance_all), (bestmodel_critic, bestmodel_actor, bestmodel_normalizer)
# set up matplotlib
is_ipython = 'inline' in matplotlib.get_backend()
if is_ipython:
from IPython import display
plt.ion()
def plot_durations(p, r):
plt.figure(2)
plt.clf()
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(p)
plt.plot(r)
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
| null |
sldr/main.py
|
main.py
|
py
| 19,750 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.device",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "torch.float32",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "gym_wmgds.make",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "gym_wmgds.make",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "gym_wmgds.make",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "gym_wmgds.wrappers.FlattenDictWrapper",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "gym_wmgds.wrappers",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "gym_wmgds.make",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "baselines.common.vec_env.subproc_vec_env.SubprocVecEnv",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "baselines.common.vec_env.subproc_vec_env.SubprocVecEnv",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "gym_wmgds.make",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "baselines.common.vec_env.subproc_vec_env.SubprocVecEnv",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "baselines.common.vec_env.subproc_vec_env.SubprocVecEnv",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "gym_wmgds.make",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "baselines.common.vec_env.subproc_vec_env.SubprocVecEnv",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "baselines.common.vec_env.subproc_vec_env.SubprocVecEnv",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "gym_wmgds.make",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "baselines.common.vec_env.subproc_vec_env.SubprocVecEnv",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "gym_wmgds.spaces.Box",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "gym_wmgds.spaces",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "gym_wmgds.spaces.Box",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "gym_wmgds.spaces",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "gym_wmgds.spaces.Box",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "gym_wmgds.spaces",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "torch.tanh",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "sldr.algorithms.ddpg.DDPG_BD",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "sldr.exploration.Noise",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "sldr.exploration.Noise",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "torch.optim",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "sldr.agents.basic.Actor",
"line_number": 134,
"usage_type": "argument"
},
{
"api_name": "sldr.agents.basic.Critic",
"line_number": 134,
"usage_type": "argument"
},
{
"api_name": "sldr.experience.Normalizer",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "sldr.her_sampler.make_sample_her_transitions_v2",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "sldr.her_sampler.make_sample_her_transitions_v2",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "sldr.replay_buffer.ReplayBuffer_v2",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 290,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 349,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "matplotlib.get_backend",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 448,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 451,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 452,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 453,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 454,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 455,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 456,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 457,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pause",
"line_number": 459,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 459,
"usage_type": "name"
},
{
"api_name": "IPython.display.clear_output",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "IPython.display",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "IPython.display.display",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "IPython.display",
"line_number": 462,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gcf",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 462,
"usage_type": "name"
}
] |
509299678
|
import numpy as np
import matplotlib.pyplot as plt
# Lay a gid over the input space
X = np.arange(0, 10, 0.1)
# Evaluate function
Y = np.sin(X)
# Plotting
fig = plt.figure()
plt.plot(X, Y, ".")
plt.show()
plt.savefig("sin.png")
| null |
EX1/plot_sin.py
|
plot_sin.py
|
py
| 233 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.arange",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
}
] |
141050393
|
import os
from collections import defaultdict
import sys
import subprocess
import json
import math
import collections
def test(pathInput):
weight = {}
bias = {}
sOperation = ["shift", "reduce left", "reduce right"]
i_phi = {}
pathModel = "model.json"
pathOutput = "out.dep"
class cElement:
def __init__(self, index, word, POS, head, label):
self.index = index
self.word = word
self.POS = POS
self.head = head
self.children = []
self.label = label
# at first, import model.
with open(pathModel, "r") as fm:
model = json.load(fm)
i_phi = model["i_phi"]
for i in sOperation:
weight[i] = [0]*len(i_phi)
bias[i] = 0
for i, val in model["weight"].items():
for sj, w in val.items():
j = int(sj)
weight[i][j] = w
for i, b in model["bias"].items():
bias[i] = b
with open(pathOutput, "w") as fo:
with open(pathInput, "r") as fi:
queue = collections.deque()
t_list = [cElement(0, "ROOT", "ROOT", 0, "ROOT")]
for line in fi:
line = line.strip()
if line == "":
if len(queue) == 0:
break
# start learning
stack = collections.deque()
stack.append(cElement(0, "ROOT", "ROOT", 0, ""))
stack.append(queue.popleft())
feats = {}
while len(queue) > 0 or len(stack) > 1:
if len(stack) <= 1:
if len(queue) == 0:
break
else:
stack.append(queue.popleft())
continue
em1 = stack.pop()
em2 = stack.pop()
feats = []
if len(queue) > 0:
e1 = queue[0]
feats.append(em1.word + "->" + e1.word)
feats.append(em1.POS + "->" + e1.POS)
feats.append(em1.POS + "->" + e1.word)
feats.append(em1.word + "->" + e1.POS)
feats.append(em2.word + "->" + em1.word)
feats.append(em2.POS + "->" + em1.POS)
feats.append(em2.POS + "->" + em1.word)
feats.append(em2.word + "->" + em1.POS)
j_feats = defaultdict(lambda: 0)
for feat in feats:
if feat in i_phi:
j = i_phi[feat]
j_feats[j] += 1
z = defaultdict(lambda: 0)
zc = defaultdict(lambda: 0)
Zp = 0
for i in sOperation:
y_i = 0
for j, phi_j in j_feats.items():
y_i += weight[i][j]*phi_j + bias[i]
zc[i] = math.exp(y_i)
Zp += zc[i]
for i in sOperation:
z[i] = zc[i]/Zp
if z["shift"] >= z["reduce left"] and z["shift"] >= z["reduce right"] and len(queue) > 0:
stack.append(em2)
stack.append(em1)
stack.append(queue.popleft())
elif z["reduce left"] >= z["reduce right"]:
t_list[em2.index].head = em1.index
stack.append(em1)
else:
t_list[em1.index].head = em2.index
stack.append(em2)
for i in range(1, len(t_list)):
e_i = t_list[i]
print(str(e_i.index) + "\t" + e_i.word + "\t" + e_i.word + "\t" + e_i.POS + "\t" + e_i.POS + "\t_\t" + str(e_i.head) + "\t" + e_i.label, file=fo)
print("", file=fo)
t_list = [cElement(0, "ROOT", "ROOT", 0, "ROOT")]
continue
index, word, no, POS, no, no, no, label = line.split("\t")
queue.append(cElement(int(index), word, POS, -1, label))
t_list.append(cElement(int(index), word, POS, -1, label))
subprocess.call(["python3", "../../script/grade-dep2.py",pathInput,"out.dep"])
# test accuracy
print("training accuracy")
test("../../data/mstparser-en-train.dep")
print("generalized accuracy")
test("../../data/mstparser-en-test.dep")
| null |
hShibata/tutorial11/test-sr.py
|
test-sr.py
|
py
| 4,847 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "math.exp",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 120,
"usage_type": "call"
}
] |
448331769
|
import numpy as np
import csv
import nibabel as nib
import skimage.transform
import matplotlib as mptlib
mptlib.use('TkAgg')
import matplotlib.pyplot as plt
import time
import math
import scipy as sp
import scipy.stats as stats
from scipy.signal import convolve2d
from preprocess import window
npx = 256
hu_lb = -100
hu_ub = 400
std_lb = 0
std_ub = 100
def resize_to_nn(img,transpose=True):
if npx==img.shape[1]:
expected = img
else:
expected = skimage.transform.resize(img,
(npx,npx,img.shape[2]),
order=0,
mode='constant',
preserve_range=True)
if transpose:
expected = expected.transpose(2,1,0)
return expected
def reorient(imgloc, segloc=None):
imagedata = nib.load(imgloc)
orig_affine = imagedata.affine
orig_header = imagedata.header
imagedata = nib.as_closest_canonical(imagedata)
img_affine = imagedata.affine
numpyimage = imagedata.get_data().astype(np.int16)
numpyseg = None
if segloc is not None:
segdata = nib.load(segloc)
old_affine = segdata.affine
segdata = nib.as_closest_canonical(segdata)
seg_affine = segdata.affine
if not np.allclose(seg_affine, img_affine):
segcopy = nib.load(segloc).get_data()
copy_header = orig_header.copy()
segdata = nib.nifti1.Nifti1Image(segcopy, orig_affine, header=copy_header)
segdata = nib.as_closest_canonical(segdata)
seg_affine = segdata.affine
numpyseg = segdata.get_data().astype(np.uint8)
return numpyimage, orig_header, numpyseg
#dbfile_mda = '/rsrch1/ip/jacctor/livermask/trainingdata-mda-small.csv'
dbfile_mda = '/rsrch1/ip/dtfuentes/github/RandomForestHCCResponse/datalocation/trainingdata.csv'
rootloc_mda = '/rsrch1/ip/dtfuentes/github/RandomForestHCCResponse'
def get_imgs(dbfile = '/rsrch1/ip/jacctor/livermask/trainingdata_small.csv', rootloc = '/rsrch1/ip/jacctor/LiTS/LiTS'):
nscans = 0
nvalid = 0
nslices = 0
with open(dbfile, 'r') as csvfile:
myreader=csv.DictReader(csvfile, delimiter='\t')
for row in myreader:
imageloc = '%s/%s' % (rootloc, row['image'])
truthloc = '%s/%s' % (rootloc, row['label'])
print(imageloc, truthloc)
nscans += 1
try:
npimg, header, npseg = reorient(imageloc, segloc=truthloc)
nslices += header['dim'][3]
nvalid += 1
except nib.filebasedimages.ImageFileError:
print("could not read file")
print('\ndone precomputing size: ', nslices, ' slices, from ', nvalid, ' scans out of ', nscans, ' scans.\n')
imgs = np.empty((nslices,npx,npx))
segs = np.empty((nslices,npx,npx))
sidx = 0
with open(dbfile, 'r') as csvfile:
myreader = csv.DictReader(csvfile, delimiter='\t')
for row in myreader:
imageloc = '%s/%s' % (rootloc, row['image'])
truthloc = '%s/%s' % (rootloc, row['label'])
print(imageloc, truthloc)
try:
npimg, header, npseg = reorient(imageloc, segloc=truthloc)
npimg = resize_to_nn(npimg, transpose=True).astype(np.int16)
npseg = resize_to_nn(npseg, transpose=True).astype(np.uint8)
sss = header['dim'][3]
imgs[sidx:sidx+sss,...] = npimg
segs[sidx:sidx+sss,...] = npseg
sidx += sss
except nib.filebasedimages.ImageFileError:
print("ignoring the file I could't read earlier")
return imgs, segs
def get_noise_2d(data, k=5):
ker = np.ones((k,k))/(k**2)
mean = convolve2d(data, ker, mode='same', boundary='fill', fillvalue=0)
var = convolve2d(np.square(data - mean), ker, mode='same', boundary='fill', fillvalue=0)
return np.sqrt(var)
# performs slicewise
# checking noise over 3d needs to deal with anisotropic voxels
def get_noise_3d(data3d, k=5):
stdev = np.zeros_like(data3d)
nslices = data3d.shape[0]
for s in range(nslices):
stdev[s] = get_noise_2d(data3d[s,...], k=k)
return stdev
def show_histogram(data, b=100, r=(-990,990)):
to_show = data.flatten()
plt.hist(to_show, bins=b, range=r)
plt.show()
plt.close()
def show_histogram_2D(datax, datay, b=(100,10), r=[[-990,990],[0,60]]):
print(datax.shape, datay.shape) #datax = pixel values, datay = stdevs
h = plt.hist2d(datax.flatten(), datay.flatten(), bins=b, range=r)
plt.show()
plt.close()
t3 = time.time()
h = np.histogram2d(datax.flatten(), datay.flatten(), bins=b, range=r)
distlist = {hu_lb: 0.0, hu_ub: 0.0}
for hu in range(1,h[0].shape[0]):
hist_HU = h[0][hu,:]
if sum(hist_HU):
data_HU = [None]*int(sum(hist_HU))
idx=0
for i in range(h[0].shape[1]):
hval = int(hist_HU[i])
if hval > 0:
for jjj in range(hval):
data_HU[idx+jjj] = i+0.5
idx += hval
distlist[hu+hu_lb] = np.mean(data_HU)
t4 = time.time()
print('time:', t4-t3)
return distlist
def process_std(data, b=100, r=(-990,990), show=False):
stdev = get_noise_3d(data, k=5)
if show:
show_histogram(stdev, b=std_ub, r=(std_lb,std_ub))
return stdev
def process_std_roi(data):
return np.mean(data), np.std(data)
def plot_histogram(data, b=10, r=(-990,990), do_stdev=True, show=False):
print(data.shape)
if show:
show_histogram(data, b=b, r=r)
stdev = process_std(data, b=b, r=r)
dlist = show_histogram_2D(data, stdev, b=(hu_ub-hu_lb,std_ub-std_lb), r=[[hu_lb, hu_ub],[std_lb, std_ub]])
return stdev, dlist
def generate_noise_at_val(imgval, dlist):
try:
noisyval = np.random.normal(loc=imgval, scale=dlist[imgval])
except KeyError:
noisyval = imgval
return noisyval
def generate_noise_vec(dlist):
f = lambda x: generate_noise_at_val(x, dlist)
return np.vectorize(f)
def generate_noise_2(img, dlist):
noisy = generate_noise_vec(dlist)(img)
return noisy
imgs, segs = get_imgs(dbfile=dbfile_mda,rootloc=rootloc_mda)
imgs = window(imgs, hu_lb, hu_ub)
#liver_idx = (segs > 0) * (segs < 5)
#tumor_idx = (segs >= 2) * (segs <= 3)
#only_liver_idx = liver_idx * (1.0 - tumor_idx)
#all_idx = np.ones_like(segs, dtype=bool)
s,dlist = plot_histogram(imgs, b=hu_ub-hu_lb,r=(hu_lb,hu_ub))
#this_img = imgs[10,...]
#plt.imshow(this_img)
#plt.show()
#noisy_img = window(generate_noise_2(this_img, dlist), hu_lb, hu_ub)
#plt.imshow(noisy_img)
#plt.show()
#plt.imshow(this_img - noisy_img)
#plt.show()
| null |
analysis/histogram.py
|
histogram.py
|
py
| 6,801 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.use",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "skimage.transform.transform.resize",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "skimage.transform.transform",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "skimage.transform",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "nibabel.load",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "nibabel.as_closest_canonical",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.int16",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "nibabel.load",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "nibabel.as_closest_canonical",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "nibabel.load",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "nibabel.nifti1.Nifti1Image",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "nibabel.nifti1",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "nibabel.as_closest_canonical",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "csv.DictReader",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "nibabel.filebasedimages",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.int16",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint8",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "nibabel.filebasedimages",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "scipy.signal.convolve2d",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "scipy.signal.convolve2d",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hist2d",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.histogram2d",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "numpy.vectorize",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "preprocess.window",
"line_number": 196,
"usage_type": "call"
}
] |
297748593
|
# -*- coding: utf-8 -*-
###############################################################
# PyNLPl - WordAlignment Library for reading GIZA++ A3 files
# by Maarten van Gompel (proycon)
# http://ilk.uvt.nl/~mvgompel
# Induction for Linguistic Knowledge Research Group
# Universiteit van Tilburg
#
# In part using code by Sander Canisius
#
# Licensed under GPLv3
#
#
# This library reads GIZA++ A3 files. It contains three classes over which
# you can iterate to obtain (sourcewords,targetwords,alignment) pairs.
#
# - WordAlignment - Reads target-source.A3.final files, in which each source word is aligned to one target word
# - MultiWordAlignment - Reads source-target.A3.final files, in which each source word may be aligned to multiple target target words
# - IntersectionAlignment - Computes the intersection between the above two alignments
#
#
###############################################################
import bz2
from itertools import izip
def parseAlignment(tokens): #by Sander Canisius
assert tokens.pop(0) == "NULL"
while tokens.pop(0) != "})":
pass
while tokens:
word = tokens.pop(0)
assert tokens.pop(0) == "({"
positions = []
token = tokens.pop(0)
while token != "})":
positions.append(int(token))
token = tokens.pop(0)
yield word, positions
class WordAlignment:
"""Target to Source alignment: reads target-source.A3.final files, in which each source word is aligned to one target word"""
def __init__(self,filename, encoding=False):
"""Open a target-source GIZA++ A3 file. The file may be bzip2 compressed. If an encoding is specified, proper unicode strings will be returned"""
if filename.split(".")[-1] == "bz2":
self.stream = bz2.BZ2File(filename,'r')
else:
self.stream = open(filename)
self.encoding = encoding
def __del__(self):
self.stream.close()
def __iter__(self): #by Sander Canisius
line = self.stream.readline()
while line:
assert line.startswith("#")
src = self.stream.readline().split()
trg = []
alignment = [None for i in xrange(len(src))]
for i, (targetWord, positions) in enumerate(parseAlignment(self.stream.readline().split())):
trg.append(targetWord)
for pos in positions:
assert alignment[pos - 1] is None
alignment[pos - 1] = i
if self.encoding:
yield [ unicode(w,self.encoding) for w in src ], [ unicode(w,self.encoding) for w in trg ], alignment
else:
yield src, trg, alignment
line = self.stream.readline()
def targetword(self, index, targetwords, alignment):
"""Return the aligned targetword for a specified index in the source words"""
if alignment[index]:
return targetwords[alignment[index]]
else:
return None
def reset(self):
self.stream.seek(0)
class MultiWordAlignment:
"""Source to Target alignment: reads source-target.A3.final files, in which each source word may be aligned to multiple target words (adapted from code by Sander Canisius)"""
def __init__(self,filename, encoding = False):
"""Load a target-source GIZA++ A3 file. The file may be bzip2 compressed. If an encoding is specified, proper unicode strings will be returned"""
if filename.split(".")[-1] == "bz2":
self.stream = bz2.BZ2File(filename,'r')
else:
self.stream = open(filename)
self.encoding = encoding
def __del__(self):
self.stream.close()
def __iter__(self):
line = self.stream.readline()
while line:
assert line.startswith("#")
trg = self.stream.readline().split()
src = []
alignment = []
for i, (word, positions) in enumerate(parseAlignment(self.stream.readline().split())):
src.append(word)
alignment.append( [ p - 1 for p in positions ] )
if self.encoding:
yield [ unicode(w,self.encoding) for w in src ], [ unicode(w,self.encoding) for w in trg ], alignment
else:
yield src, trg, alignment
line = self.stream.readline()
def targetword(self, index, targetwords, alignment):
"""Return the aligned targeword for a specified index in the source words. Multiple words are concatenated together with a space in between"""
return " ".join(targetwords[alignment[index]])
def targetwords(self, index, targetwords, alignment):
"""Return the aligned targetwords for a specified index in the source words"""
return [ targetwords[x] for x in alignment[index] ]
def reset(self):
self.stream.seek(0)
class IntersectionAlignment:
def __init__(self,source2target,target2source,encoding=False):
self.s2t = MultiWordAlignment(source2target, encoding)
self.t2s = WordAlignment(target2source, encoding)
self.encoding = encoding
def __iter__(self):
for (src, trg, alignment), (revsrc, revtrg, revalignment) in izip(self.s2t,self.t2s):
if src != revsrc or trg != revtrg:
raise Exception("Files are not identical!")
else:
#keep only those alignments that are present in both
intersection = []
for i, x in enumerate(alignment):
if revalignment[i] and revalignment[i] in x:
intersection.append(revalignment[i])
else:
intersection.append(None)
yield src, trg, intersection
def reset(self):
self.s2t.reset()
self.t2s.reset()
| null |
formats/giza.py
|
giza.py
|
py
| 5,995 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bz2.BZ2File",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "bz2.BZ2File",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "itertools.izip",
"line_number": 152,
"usage_type": "call"
}
] |
213805330
|
from gdsfactory.components.bend_s import bend_s
from gdsfactory.port import Port
from gdsfactory.types import Route
def get_route_sbend(port1: Port, port2: Port, **kwargs) -> Route:
"""Returns an Sbend Route to connect two ports.
Args:
port1: start port.
port2: end port.
keyword Args:
nb_points: number of points.
with_cladding_box: square bounding box to avoid DRC errors.
cross_section: function.
kwargs: cross_section settings.
.. plot::
:include-source:
import gdsfactory as gf
c = gf.Component("demo_route_sbend")
mmi1 = c << gf.components.mmi1x2()
mmi2 = c << gf.components.mmi1x2()
mmi2.movex(50)
mmi2.movey(5)
route = gf.routing.get_route_sbend(mmi1.ports['o2'], mmi2.ports['o1'])
c.add(route.references)
c.show()
c.plot()
"""
ysize = port2.center[1] - port1.center[1]
xsize = port2.center[0] - port1.center[0]
size = (xsize, ysize)
bend = bend_s(size=size, **kwargs)
bend_ref = bend.ref()
bend_ref.connect("o1", port1)
return Route(
references=[bend_ref],
length=bend.info["length"],
ports=(port1, port2),
)
if __name__ == "__main__":
# import gdsfactory as gf
# from gdsfactory.routing.sort_ports import sort_ports
# c = gf.Component("test_get_route_sbend")
# pitch = 2.0
# ys_left = [0, 10, 20]
# N = len(ys_left)
# ys_right = [(i - N / 2) * pitch for i in range(N)]
# right_ports = [
# gf.Port(f"R_{i}", (0, ys_right[i]), width=0.5, orientation=180, layer=(1, 0))
# for i in range(N)
# ]
# left_ports = [
# gf.Port(f"L_{i}", (-50, ys_left[i]), width=0.5, orientation=0, layer=(1, 0))
# for i in range(N)
# ]
# left_ports.reverse()
# right_ports, left_ports = sort_ports(right_ports, left_ports)
# for p1, p2 in zip(right_ports, left_ports):
# route = get_route_sbend(p1, p2, layer=(2, 0))
# c.add(route.references)
# c.show(show_ports=True)
import gdsfactory as gf
c = gf.Component("demo_route_sbend")
mmi1 = c << gf.components.mmi1x2()
mmi2 = c << gf.components.mmi1x2()
mmi2.movex(50)
mmi2.movey(5)
route = gf.routing.get_route_sbend(mmi1.ports["o2"], mmi2.ports["o1"])
c.add(route.references)
c.show()
| null |
gdsfactory/routing/get_route_sbend.py
|
get_route_sbend.py
|
py
| 2,391 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "gdsfactory.port.Port",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "gdsfactory.components.bend_s.bend_s",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "gdsfactory.types.Route",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "gdsfactory.types.Route",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "gdsfactory.Component",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "gdsfactory.components.mmi1x2",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "gdsfactory.components",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "gdsfactory.components.mmi1x2",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "gdsfactory.components",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "gdsfactory.routing.get_route_sbend",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "gdsfactory.routing",
"line_number": 83,
"usage_type": "attribute"
}
] |
461017342
|
# -*- coding: utf-8 -*-
"""Module to render most used components of Beauty Latte."""
from model import HairSalonEs, HairSalon, HairStyleEs, HairStylistEs, CutModelEs
from flask import session
from flaskext.babel import lazy_gettext as _lg
def pc_welcome_hair_stylist(nb_hair_stylist=3, nb_hair_style=2):
"""Get the n hair stylist and n hair style """
hair_stylists = HairStylistEs.get_pickup(nb_hair_style=nb_hair_style, size=nb_hair_stylist, filter={'bool':
{'must': {'range': {'weight': { 'gte': HairStylistEs.PTS_NOT_ASSISTANT +
HairStylistEs.PTS_PROFILE_PICTURE + HairStylistEs.PTS_HAIR_STYLE}}}}}, hydrate_hair_style=True)
return 'beauty/hair_salon/partials/part_welcome_hair_stylist', {'hair_stylists': hair_stylists}
def sm_welcome_cut_model(cut_model):
"""Get a random cut model."""
return 'beauty/shared/part_detail_cut_model', {'cut_model': cut_model}
def sm_welcome_hair_stylist(nb_hairstylist=3):
"""Get n hair stylist to display on top page."""
hair_stylists, total = HairStylistEs.get_hair_stylists(size=nb_hairstylist,
without_profile=False, render='map', sort=[{'weight': 'desc'}, {'created_at': 'desc'}],
aggs_custom={'salon': {
'terms': {
'field': 'hair_salon_id',
'size': nb_hairstylist,
'order': {
'weight_order': 'desc'
}
},
'aggs': {
'weight_order': {
'sum': {
'script': "doc['weight'].value * 25 + doc['id'].value"
}
}
}
}
}
)
return 'beauty/hair_salon/partials/part_welcome_hair_stylist', {'hair_stylists': hair_stylists}
def welcome_hair_style(nb_hairstyle=3):
"""Get random hair style from director's choice to display on top page."""
result = HairStyleEs.pickup_front(director_choice=True, nb_result=nb_hairstyle)
salon_cache = {}
hair_styles_list = []
# make a list of n first hair style
hair_styles = result['hair_styles']
if not hair_styles:
# There's no director choice so choose from all pictures
result = HairStyleEs.pickup_front()
hair_styles = result['hair_styles']
for idx, hair_style_id in enumerate(hair_styles):
if idx == nb_hairstyle:
break
hair_style = hair_styles[hair_style_id]
salon_id = hair_style['hair_salon_id']
if salon_cache.get(salon_id):
salon = salon_cache.get(salon_id)
else:
salon = HairSalonEs.get_by_id(salon_id)
hair_style['salon'] = salon
hair_styles_list.append(hair_style)
salon_cache.update({salon_id: salon})
return 'beauty/hair_salon/partials/part_welcome_hair_style', {'hair_styles': hair_styles_list}
def menu_sm_favorite(current_user, hair_salon, salon_favorite=None):
"""Get the hair salon favorite.
current_user: User
hair_salon: dict
salon_favorite: bool
"""
from model import FavoriteHairSalonEs
if not salon_favorite and current_user:
salon_favorite = FavoriteHairSalonEs.get_by_id('%d-%d' % (hair_salon['id'], current_user.id), parent_id=hair_salon['id'])
return 'beauty/hair_salon/partials/part_sm_salon_favorite', {'favorite': salon_favorite,
'current_user': current_user, 'hair_salon': hair_salon}
def menu_top_message(current_user, link_message):
"""Get the number of message for the user.
current_user: int
link_message: string
return: template, values
"""
from model import ChatConversation
nb_message = ChatConversation.get_unread(current_user)
return 'beauty/shared/menu_top_message', {'nb_message': nb_message,
'link_message': link_message}
def menu_top_stylist(current_user):
"""Get the data to display the stylist top menu.
current_user: int
return: template, values
"""
#get the stylist
stylist = HairStylistEs.get_by_user_id(current_user.id)
if stylist:
#get his salon
stylist["salon"] = HairSalonEs.get_by_id(stylist["hair_salon_id"])
return 'beauty/shared/menu_top_stylist', {'stylist': stylist}
def have_cut_model(stylist):# pragma: no cover
"""Get or not if the stylist have cut model.
stylist: dict
return: template, values
"""
return 'beauty/shared/have_cut_model', {'have_cut_model': CutModelEs.count_active(stylist)}
def right_side_salon_information(hair_salon_id):# pragma: no cover
"""Right side hair salon information.
hair_salon_id: int
return: template, values
"""
return 'beauty/shared/part_information', {'hair_salon': HairSalonEs.get_by_id(hair_salon_id)}
def get_columns(size=3, display="top"):
"""Centralize the column calls to display on the differents pages.
size: int
display: str
return: template, dict
"""
from model import Entry, Category, ColumnEs
category_alias = u'beauty'
category = Category.get_category_by_alias(category_alias)
columns = []
total = 0
data = ColumnEs.search_column(category_ids=[category.id], status=Entry.STATUS_PUBLIC, page=1, limit=size, field='id')
if data:
from apps.pc.column.column import columns_from_es_result
columns, total = columns_from_es_result(data) # Fetch columns
if total:
Entry.prefetch_users(columns)
if display == "top":
# Pickup has special css style.
pickup_column = None
if columns:
# set first column as pickup.
pickup_column = columns[0]
# remove first column from columns list.
columns = columns[1:]
return '/beauty/shared/part_column_top', {'pickup_column': pickup_column, 'columns': columns}
elif display == "my_page":
return '/beauty/shared/part_column_user', {'columns': columns}
def menu_left_stylist_page(active, sum_message=0):
"""Manage the menu of the stylist.
active: str
sum_message: int
return: template, dict
"""
list_menu = [
{'link': 'top', 'name': 'top', 'display': _lg(u'TOP'), 'status': True},
{'link': 'profile', 'name': ('profile',), 'display': _lg(u'プロフィール編集'), 'icon': 'profile', 'status': True},
{'link': 'hairstyle', 'name': ('hairstyle',), 'display': _lg(u'ヘアスタイル'), 'icon': 'hairstyle', 'status': True},
{'link': 'cut-model', 'name': ('cut-model',), 'display': _lg(u'カットモデル'), 'icon': 'cutmodel', 'status': True},
{'link': 'inbox', 'name': ('message',), 'display': _lg(u'メッセージ'), 'icon': 'message', 'status': True, 'sum_message': sum_message},
{'link': 'salon', 'name': ('salon',), 'display': _lg(u'サロン登録'), 'icon': 'salon g', 'status': False},
{'url': 'pc.beauty.hair_salon.feedback', 'name': ('feedback',), 'display': _lg(u'機能要望・ご意見'), 'icon': 'feedback', 'status': True},
{'link': 'settings', 'name': ('settings',), 'display': _lg(u'設定'), 'icon': 'settings', 'status': True},
]
return '/beauty/shared/menu_stylist', {
'list_menu': list_menu,
'active': active,
}
def menu_left_user_page(active, user, me, is_stylist, sum_message):
"""Manage the menu of user page set selected for the current menu.
active: str
user: User
me: bool
is_stylist: bool
sum_message: int
return: template, dict
"""
from apps import url_for as url
list_menu = [
{'link': 'top', 'name': 'top', 'display': _lg(u'TOP'), 'status': True},
{'link': 'profile', 'name': ('profile',), 'display': _lg(u'プロフィール編集'), 'icon': 'profile', 'status': True},
{'link': 'favorite', 'name': ('favorite', 'favorite-hair-style', 'favorite-hair-salon', 'favorite-hair-stylist',
'favorite-cut-model'), 'display': _lg(u'お気に入り'), 'icon': 'favo', 'status': True},
{'link': 'review', 'name': ('review',), 'display': _lg(u'クチコミ'), 'icon': 'review g', 'status': False},
]
#add message menu if it's my page
if me:
list_menu.append(list_menu[3])
list_menu[3] = {'link': 'inbox', 'name': ('message', 'beauty_inbox', 'inbox', 'outbox', 'cutmodel-apply', 'cutmodel-confirm', 'cutmodel-complete', 'compose'), 'display': _lg(u'メッセージ'), 'icon': 'message', 'status': True, 'sum_message': sum_message}
# add the feedback link
list_menu.append({'url': url('pc.beauty.hair_salon.feedback'), 'name': ('feedback',), 'display': _lg(u'機能要望・ご意見'), 'icon': 'feedback', 'status': True})
if not is_stylist:
list_menu.append({'url': url('pc.beauty.user.stylist_register'), 'name': ('stylist',), 'display': _lg(u'スタイリスト登録'), 'icon': 'stylist', 'status': True, 'li_class': 'border-top'})
return '/beauty/shared/menu_user', {
'list_menu': list_menu,
'active': active,
'user': user,
'me': me
}
def sidebar_near_salon(remote_addr, hair_salon=None, page=1, limit=5):
"""Return the sidebar with list of closest hair salon."""
coord = []
id = None
#get the hair salons near the one we show
if hair_salon:
id = hair_salon["id"]
if (hair_salon.get("pin")):
coord.append(hair_salon["pin"]["location"]["lat"])
coord.append(hair_salon["pin"]["location"]["lon"])
#get the hair salons near the user coord
if not coord:
coord = HairSalon.get_location_by_ip(remote_addr)
near_salon = HairSalonEs.get_salon_near_here(coord, page=page, size=limit, except_id=id)
return '/beauty/hair_salon/partials/part_salon_near', {
'near_salon': near_salon
}
def menu_hair_salon(active):
"""Manage the menu bar set selected for the current menu.
active: str
return: template, dict
"""
list_menu = [
{'endpoint': 'pc.beauty.welcome.index_hair', 'name': 'hair', 'display': _lg(u'トップ'), 'display_en': _lg(u'TOP'), 'status': True},
{'endpoint': 'pc.beauty.hair_salon.index', 'name': 'salon', 'display': _lg(u'ヘアサロン'), 'display_en': _lg(u'SALON'), 'status': True},
{'endpoint': 'pc.beauty.hair_salon.stylist', 'name': 'stylist', 'display': _lg(u'ヘアスタイリスト'), 'display_en': _lg(u'STYLIST'), 'status': True},
{'endpoint': 'pc.beauty.hair_salon.hair_catalog', 'name': 'catalog', 'display': _lg(u'ヘアカタログ'), 'display_en': _lg(u'CATALOG'), 'status': True},
{'endpoint': 'pc.beauty.hair_salon.news', 'name': '', 'display': _lg(u'ニュース'), 'display_en': _lg(u'NEWS'), 'status': False},
{'endpoint': 'pc.beauty.column.index', 'name': 'column', 'display': _lg(u'コラム'), 'display_en': _lg(u'COLUMN'), 'status': True},
{'endpoint': 'pc.beauty.hair_salon.items', 'name': '', 'display': _lg(u'ヘアケアアイテム'), 'display_en': _lg(u'ITEM'), 'status': False},
{'endpoint': 'pc.beauty.hair_salon.jobs', 'name': '', 'display': _lg(u'求人'), 'display_en': _lg(u'RECRUIT'), 'status': False},
{'endpoint': 'pc.beauty.hair_salon.cut_model', 'name': 'cut-model', 'display': _lg(u'カットモデル'), 'display_en': _lg(u'CUT MODEL'), 'status': True}
]
return '/beauty/shared/main_menu', {
'list_menu': list_menu,
'active': active
}
def menu_detail_hair_salon(hair_salon, current_page):
"""Manage the menu bar in the hair salon detail page.
hair_salon: dict
current_page: str
return: template, dict
"""
# get the menu content count to display or not the menu
total_menu, aggs_menu = HairSalonEs.count_for_salon_content(hair_salon['id'])
status_stylist = False
status_hair_styles = False
status_cut_models = False
if total_menu > 0:
for menu in aggs_menu['menu_count']['buckets']:
if menu['key'] == u'hair_styles':
status_hair_styles = True
elif menu['key'] == u'hair_stylists':
status_stylist = True
elif menu['key'] == u'cut_models':
status_cut_models = True
navigation_bar = [
{'endpoint': 'pc.beauty.hair_salon.show', 'name': ('information', ), 'display': _lg(u'サロン情報'), 'status': True, 'position': 'main'},
{'endpoint': 'pc.beauty.hair_salon.hair_style', 'name': ('style', 'style-show'), 'display': _lg(u'ヘアスタイル'), 'display_sm': _lg(u'スタイル'), 'status': status_hair_styles, 'position': 'main'},
{'endpoint': 'pc.beauty.hair_salon.hair_stylist', 'name': ('stylist', ), 'display': _lg(u'スタイリスト'), 'status': status_stylist, 'position': 'main'},
{'endpoint': '', 'name': ('menu', ), 'display': _lg(u'メニュー'), 'status': False, 'position': 'main'},
{'endpoint': 'pc.beauty.hair_salon.hair_salon_map', 'name': ('map', ), 'display': _lg(u'地図'), 'display_sm': _lg(u'地図・アクセス'), 'status': True, 'position': 'sub'},
{'endpoint': '', 'name': ('coupon', ), 'display': _lg(u'クーポン'), 'status': False, 'position': 'sub'},
{'endpoint': 'pc.beauty.hair_salon.hair_salon_cut_model', 'name': ('cut-model', 'cut-model-show'), 'display': _lg(u'カットモデル'), 'status': status_cut_models, 'position': 'sub'},
{'endpoint': '', 'name': ('recruit', ), 'display': _lg(u'求人'), 'status': False, 'position': 'sub'},
]
if hair_salon.get("menu"):
navigation_bar[3].update({'endpoint': 'pc.beauty.hair_salon.hair_salon_menu',
'status': True})
if hair_salon.get("coupon"):
navigation_bar[5].update({'endpoint': 'pc.beauty.hair_salon.hair_salon_coupon',
'status': True})
template = '/beauty/hair_salon/partials/part_menu_bar'
values = {
'navigation_bar': navigation_bar,
'hair_salon': hair_salon,
'current_page': current_page
}
return template, values
def viewed_salons(current_id=0, page=1, limit=4):
"""Return the list of viewed salons by current user.
current_id: int
page: int
limit: int
return: template, dict
"""
ids = []
if session.get(HairSalon.LAST_SALON_SESSION_VAR):
ids = [id for id in session.get(HairSalon.LAST_SALON_SESSION_VAR).split(',') if current_id and long(id) != long(current_id)]
viewed_salons = HairSalonEs.get_by_ids(ids=ids, page=page, size=limit, render="map")
return '/beauty/shared/part_viewed_salons', {
'viewed_salons': viewed_salons
}
def sidebar_link_catalog():
"""Return the facets links for the lenght option of catalog page.
return: template, dict
"""
hair_lengths = {'ladies': {}, 'mens': {}}
result = HairStyleEs.pickup_front()
for ladies_hair_length in result['hair_length']['ladies']:
for eng, jap in HairSalon.HAIR_LENGTH_TRANS.iteritems():
if ladies_hair_length['name'] == jap:
ladies_hair_length['eng'] = eng
ladies_hair_length['hair_style'] = result['hair_styles'][ladies_hair_length['hair_style_id']]
hair_lengths['ladies'][eng] = ladies_hair_length
break
for mens_hair_length in result['hair_length']['mens']:
for eng, jap in HairSalon.MENS_HAIR_LENGTH_TRANS.iteritems():
if mens_hair_length['name'] == jap:
mens_hair_length['eng'] = eng
mens_hair_length['hair_style'] = result['hair_styles'][mens_hair_length['hair_style_id']]
hair_lengths['mens'][eng] = mens_hair_length
break
return '/beauty/hair_salon/partials/part_link_catalog', {
'hair_lengths': hair_lengths
}
def sidebar_pickup_salon():
"""Return random hair salon with a picture and intro and body (i.e lots of data).
return: template, dict
"""
pickup_salon = {}
result, total = HairSalonEs.get_pickup_hair_salon(size=1)
#reformat don't want the id only the data to display
for id, salon in result.iteritems():
pickup_salon = salon
return '/beauty/hair_salon/partials/part_pickup_salon', {
'pickup_salon': pickup_salon
}
def stylist_hair_styles(stylist, hair_salon, except_id, stylist_ctx=False):
"""Get the list of the hair style for a stylist.
stylist: dict
hair_salon: dict
except_id: int
return: template, dict
"""
stylist_hair_styles = HairStyleEs.get_by_stylist(stylist, except_id, total=False)
return '/beauty/hair_salon/partials/part_stylist_hairstyles', {
'stylist_hair_styles': stylist_hair_styles, 'stylist': stylist,
'hair_salon': hair_salon, 'stylist_ctx': stylist_ctx
}
def salon_length_hair_style(hair_salon, hair_length, excepted_id):
"""Get the hair styles of a hair salon by their hair length.
hair_salon: dict
hair_lenght: str
excepted_id: int
return: template, dict
"""
filters = {"bool": {"must": [{"term": {"hair_length": hair_length}}, {"bool": {"must_not": {"term": {"id": excepted_id}}}}]}}
salon_type_hair_styles, total = HairStyleEs.get_hair_styles(hair_salon_id=hair_salon["id"], filters=filters, size=4, render="map")
return '/beauty/hair_salon/partials/part_salon_type_hair_styles', {
'salon_type_hair_styles': salon_type_hair_styles,
'hair_salon': hair_salon,
'hair_length': hair_length
}
def hair_salon_footer_area(action='salon', version='minify', title=None, available_prefecture=None):
"""Build the area footer for the hair salon.
action: str
version: str
title: str
available_prefecture: list
return: template, dict
"""
prefecture_list = HairSalonEs.get_prefecture_list()
if available_prefecture is None:
available_prefecture = []
elastic_prefecture = HairSalonEs.facet_select(['prefecture'])
for idx, facet_prefecture in enumerate(elastic_prefecture['aggregations']['prefecture']['buckets']):
available_prefecture.append(facet_prefecture['key'])
if action == 'cut-model':
action = 'pc.beauty.hair_salon.cut_model'
elif action == 'stylist':
action = 'pc.beauty.hair_salon.stylist'
else:
action = 'pc.beauty.hair_salon.index'
if version == 'TOP':
template = '/beauty/hair_salon/partials/part_top_area_list'
for prefectures in prefecture_list['hits']['hits']:
prefectures['_source']['available'] = False
for prefecture in prefectures['_source']['places']:
if prefecture['name'] in available_prefecture:
prefectures['_source']['available'] = True
break
elif version == 'maxify':
template = '/beauty/hair_salon/partials/part_prefecture_list'
else:
template = '/beauty/hair_salon/partials/part_footer_area_list'
return template, {
'prefecture_list': prefecture_list, 'available_prefecture': available_prefecture,
'action': action, 'title': title
}
| null |
apps/component/beauty.py
|
beauty.py
|
py
| 17,914 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "model.HairStylistEs.get_pickup",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "model.HairStylistEs",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "model.HairStylistEs.PTS_NOT_ASSISTANT",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "model.HairStylistEs",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "model.HairStylistEs.PTS_PROFILE_PICTURE",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "model.HairStylistEs",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "model.HairStylistEs.PTS_HAIR_STYLE",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "model.HairStylistEs.get_hair_stylists",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "model.HairStylistEs",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "model.HairStyleEs.pickup_front",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "model.HairStyleEs",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "model.HairStyleEs.pickup_front",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "model.HairStyleEs",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "model.HairSalonEs.get_by_id",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "model.HairSalonEs",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "model.FavoriteHairSalonEs.get_by_id",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "model.FavoriteHairSalonEs",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "model.ChatConversation.get_unread",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "model.ChatConversation",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "model.HairStylistEs.get_by_user_id",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "model.HairStylistEs",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "model.HairSalonEs.get_by_id",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "model.HairSalonEs",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "model.CutModelEs.count_active",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "model.CutModelEs",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "model.HairSalonEs.get_by_id",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "model.HairSalonEs",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "model.Category.get_category_by_alias",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "model.Category",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "model.ColumnEs.search_column",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "model.ColumnEs",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "model.Entry.STATUS_PUBLIC",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "model.Entry",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "apps.pc.column.column.columns_from_es_result",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "model.Entry.prefetch_users",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "model.Entry",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "apps.url_for",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "apps.url_for",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "model.HairSalon.get_location_by_ip",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "model.HairSalon",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "model.HairSalonEs.get_salon_near_here",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "model.HairSalonEs",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "model.HairSalonEs.count_for_salon_content",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "model.HairSalonEs",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "flaskext.babel.lazy_gettext",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 357,
"usage_type": "name"
},
{
"api_name": "model.HairSalon.LAST_SALON_SESSION_VAR",
"line_number": 357,
"usage_type": "attribute"
},
{
"api_name": "model.HairSalon",
"line_number": 357,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 358,
"usage_type": "name"
},
{
"api_name": "model.HairSalon.LAST_SALON_SESSION_VAR",
"line_number": 358,
"usage_type": "attribute"
},
{
"api_name": "model.HairSalon",
"line_number": 358,
"usage_type": "name"
},
{
"api_name": "model.HairSalonEs.get_by_ids",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "model.HairSalonEs",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "model.HairStyleEs.pickup_front",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "model.HairStyleEs",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "model.HairSalon.HAIR_LENGTH_TRANS.iteritems",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "model.HairSalon.HAIR_LENGTH_TRANS",
"line_number": 376,
"usage_type": "attribute"
},
{
"api_name": "model.HairSalon",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "model.HairSalon.MENS_HAIR_LENGTH_TRANS.iteritems",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "model.HairSalon.MENS_HAIR_LENGTH_TRANS",
"line_number": 384,
"usage_type": "attribute"
},
{
"api_name": "model.HairSalon",
"line_number": 384,
"usage_type": "name"
},
{
"api_name": "model.HairSalonEs.get_pickup_hair_salon",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "model.HairSalonEs",
"line_number": 403,
"usage_type": "name"
},
{
"api_name": "model.HairStyleEs.get_by_stylist",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "model.HairStyleEs",
"line_number": 423,
"usage_type": "name"
},
{
"api_name": "model.HairStyleEs.get_hair_styles",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "model.HairStyleEs",
"line_number": 441,
"usage_type": "name"
},
{
"api_name": "model.HairSalonEs.get_prefecture_list",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "model.HairSalonEs",
"line_number": 460,
"usage_type": "name"
},
{
"api_name": "model.HairSalonEs.facet_select",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "model.HairSalonEs",
"line_number": 463,
"usage_type": "name"
}
] |
361417917
|
import time
from warnings import warn
from bs4 import BeautifulSoup
import xml.etree.ElementTree as ET
import json
from requests import get
from requests_xml import XMLSession
class ImdbSpider(object):
def scraping(self):
municipios = []
codigo_iteraveis = []
session = XMLSession()
r = session.get('http://www.der.sp.gov.br/Upload/XML/codigo_rodoviario_cadastro_rodoviario.xml')
headers = {"Accept-Language": "en-US,en;q=0.8", "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.3"}
print(r.text)
requests = 0
cod_id_value = []
for item in r:
cod_id = r.xml.find('cod_id')
cod_id[requests].text
cod_tipo = r.xml.find('cod_tipo')
cod_tipo[requests].text
cod_codigo = r.xml.find('cod_codigo')
cod_codigo[requests].text
municipios.append([cod_id, cod_tipo, cod_codigo])
codigo_iteraveis.append(cod_id[requests].text)
print('Qtd iteracoes: %s' % (requests))
requests += 1
if requests == 10:
break
requests = 0
for item in municipios:
print(item)
for codigo in codigo_iteraveis:
print('Codigo sendo utilizado: %s'%(codigo))
response = get('http://www.der.sp.gov.br/WebSite/Acessos/MalhaRodoviaria/Services/rodovia_pesquisa.aspx??'
'pg=4500'
'&codigo=' + codigo +
'&superfice='
'&jurisdicao='
'&municipio='
'&kminicial='
'&kmfinal='
'&administracao='
'&operadora=', headers)
time.sleep(10)
if response.status_code != 200:
warn('Request: {}; Status code: {}'.format(requests, response.status_code))
page_html = BeautifulSoup(response.text, 'html.parser')
tag = page_html.findAll('td')
km_inicial = tag[0]
km_final = tag[1]
extensao = tag[2]
tag_municipio = tag[3]
tag_regional = tag[4]
tag_jurisdicao = tag[6]
tag_superficie = tag[9]
requests += 1
if requests == 3:
break
print('=======================================================================================================')
print(km_inicial.text)
print(km_final.text)
print(extensao.text)
print(tag_municipio.text)
print(tag_regional.text)
print(tag_jurisdicao.text)
print(tag_superficie.text)
# print(page_html)
def separa_genero(self):
filmes_por_genero = {}
format=[]
for filme in self:
generos = filme[2]
for genero in generos:
if genero in filmes_por_genero:
format = format_to_use_json(filme)
filmes_por_genero[genero].append(format)
else:
format = format_to_use_json(filme)
filmes_por_genero[genero] = [format]
return filmes_por_genero
def salva_json(filmes_por_genero):
dict_key = filmes_por_genero.keys()
for filmes in filmes_por_genero:
if dict_key.__contains__(filmes):
with open('JSONs/%s.json'% (filmes), 'w') as fp:
json.dump(filmes_por_genero[filmes], fp)
print('Arquivos JSONs gerados')
| null |
spider/imdb.py
|
imdb.py
|
py
| 3,648 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests_xml.XMLSession",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 106,
"usage_type": "call"
}
] |
427236732
|
# !/usr/bin/env python3
# -*- coding:utf-8 -*-
# Author:yhq
import xlrd
import re
excel_path = r'C:\WD\WeiDuReport\report\test_en.xlsx'
# excel_path = 'test_en.xlsx'
data = xlrd.open_workbook(excel_path)
table = data.sheet_by_index(3)
row_count = table.nrows
substandard_name = ""
substandard_comments_map = {}
def hm_template_en():
for i in range(row_count):
if i == 0:
continue
if i > 48:
continue
info = table.row_values(i)
name = info[1][:4]
if name:
substandard_name = name
if substandard_name not in substandard_comments_map:
substandard_comments_map[substandard_name] = []
score_range = info[2]
comment = info[4]
score_range = score_range.replace("[", "").replace("]", "")
if score_range.find(",") > -1:
score_ranges = score_range.split(",")
elif score_range.find(",") > -1:
score_ranges = score_range.split(",")
else:
raise Exception("error")
min_value = int(score_ranges[0].strip())
max_value = int(score_ranges[1].strip())
data = {
"min_value": min_value,
"max_value": max_value,
"comment": comment,
"word_template": ""
}
black_template = '''
<w:r w:rsidRPr="00040D97">
<w:rPr>
<w:rFonts w:ascii="微软雅黑" w:eastAsia="微软雅黑" w:hAnsi="微软雅黑" w:hint="eastAsia"/>
<w:b/>
<w:szCs w:val="21"/>
</w:rPr>
<w:t>%s</w:t>
</w:r>
'''
not_black_template = '''
<w:r w:rsidRPr="00040D97">
<w:rPr>
<w:rFonts w:ascii="微软雅黑" w:eastAsia="微软雅黑" w:hAnsi="微软雅黑" w:hint="eastAsia"/>
<w:szCs w:val="21"/>
</w:rPr>
<w:t>%s</w:t>
</w:r>
'''
re_comment = re.findall(r'(.*?)<b>(.*?)</b>', comment, re.S | re.M)
left_comment = comment.split('</b>')[-1]
word_template = ''
for re_c in re_comment:
not_black_template_comment = re_c[0]
blank_template_comment = re_c[1]
word_template += not_black_template % not_black_template_comment
word_template += black_template % blank_template_comment
if len(left_comment) > 0:
word_template += not_black_template % left_comment
data["word_template"] = word_template
substandard_comments_map[substandard_name].append(data)
return substandard_comments_map
| null |
report/hm_temp_en.py
|
hm_temp_en.py
|
py
| 2,582 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "xlrd.open_workbook",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "re.M",
"line_number": 66,
"usage_type": "attribute"
}
] |
401854579
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import os
import glob
import skimage.io as skio
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from scipy.ndimage.morphology import binary_erosion
##########################################
def MinimizePatternByTemplMatchV1(pattern):
height = pattern.shape[0]
width = pattern.shape[1]
left = pattern[:, :width / 2]
right = pattern[:, width / 2:]
top = pattern[:height / 2]
bottom = pattern[height / 2:]
# search left on right
result = cv2.matchTemplate(right, left[:, :left.shape[1] / 3], cv2.TM_CCORR_NORMED)
maxLoc = cv2.minMaxLoc(result)[3]
max_x = maxLoc[0] + width / 2 - left.shape[1] / 3 / 2
# plt.imshow(result)
# search top on bottom
result = cv2.matchTemplate(bottom, top[:top.shape[0] / 3, :], cv2.TM_CCORR_NORMED)
maxLoc = cv2.minMaxLoc(result)[3]
max_y = maxLoc[1] + height / 2 - top.shape[0] / 3 / 2
return pattern[:max_y, :max_x]
def MinimizePatternByTemplMatchV2(pattern, brdSize=5, isDebug=False):
ptrnSize = brdSize+2
ptrnLef = pattern[:, :ptrnSize]
ptrnTop = pattern[:ptrnSize, :]
# ccMapH = cv2.matchTemplate(pattern, ptrnLef, method=cv2.TM_CCOEFF_NORMED).reshape(-1)
# ccMapV = cv2.matchTemplate(pattern, ptrnTop, method=cv2.TM_CCOEFF_NORMED).reshape(-1)
ccMapH = 1. - cv2.matchTemplate(pattern, ptrnLef, method=cv2.TM_SQDIFF_NORMED).reshape(-1)
ccMapV = 1. - cv2.matchTemplate(pattern, ptrnTop, method=cv2.TM_SQDIFF_NORMED).reshape(-1)
ccMapHflt = ccMapH.copy()
ccMapVflt = ccMapV.copy()
ccMapHflt[:-2 * brdSize] = 0
ccMapVflt[:-2 * brdSize] = 0
pMaxH = np.argmax(ccMapHflt)
pMaxV = np.argmax(ccMapVflt)
if isDebug:
plt.figure()
plt.subplot(1, 2, 1)
plt.hold(True)
plt.plot(ccMapH)
plt.plot(ccMapHflt)
plt.hold(False)
plt.title('CC-Map-H')
#
plt.subplot(1, 2, 2)
plt.hold(True)
plt.plot(ccMapV)
plt.plot(ccMapVflt)
plt.hold(False)
plt.title('CC-Map-V')
plt.show()
tret = pattern[:pMaxV, :pMaxH]
return tret
def MinimizePatternByTemplMatchV3(pattern, brdSize=5, isDebug=False, parMethod = cv2.TM_SQDIFF):
ptrnSize = brdSize+2
brdSizeExt = int(1.7*brdSize)
ptrnLef = pattern[ptrnSize:-ptrnSize, :ptrnSize]
ptrnTop = pattern[:ptrnSize, ptrnSize:-ptrnSize]
ccMapH = cv2.matchTemplate(pattern, ptrnLef, method=parMethod)
ccMapV = cv2.matchTemplate(pattern, ptrnTop, method=parMethod)
if parMethod==cv2.TM_SQDIFF or parMethod==cv2.TM_SQDIFF_NORMED:
ccMapH = 1. - ccMapH
ccMapV = 1. - ccMapV
minH = ccMapH.min()
minV = ccMapV.min()
ccMapHflt = ccMapH.copy()
ccMapVflt = ccMapV.copy()
ccMapHflt[:, :-brdSizeExt] = minH
ccMapVflt[:-brdSizeExt, :] = minV
# pMaxH = np.argmax(ccMapHflt)
# pMaxV = np.argmax(ccMapVflt)
_, _, _, posMaxH = cv2.minMaxLoc(ccMapHflt)
_, _, _, posMaxV = cv2.minMaxLoc(ccMapVflt)
#
shiftH_X, shiftH_Y = posMaxH
shiftV_X, shiftV_Y = posMaxV
tretCrop = pattern[:shiftV_Y, :shiftH_X]
#
plt.subplot(2, 2, 1), plt.imshow(ccMapH)
plt.subplot(2, 2, 2), plt.imshow(ccMapHflt)
plt.subplot(2, 2, 3), plt.imshow(ccMapV)
plt.subplot(2, 2, 4), plt.imshow(ccMapVflt)
plt.show()
if isDebug:
plt.figure()
plt.subplot(1, 2, 1)
plt.hold(True)
plt.plot(ccMapH)
plt.plot(ccMapHflt)
plt.hold(False)
plt.title('CC-Map-H')
#
plt.subplot(1, 2, 2)
plt.hold(True)
plt.plot(ccMapV)
plt.plot(ccMapVflt)
plt.hold(False)
plt.title('CC-Map-V')
plt.show()
dRightY = shiftH_Y - 0*ptrnSize
dBottomX = shiftV_X - 0*ptrnSize
return (tretCrop, dRightY, dBottomX)
def generateTiledTexton(texton, dRightY, dBottomX, nr=5, nc=5):
tsiz = texton.shape[:2]
sizR, sizC = tsiz
dRR = np.abs(dRightY * nr)
dCC = np.abs(dBottomX * nc)
sizRT = tsiz[0] * (nr + 2) + dRR
sizCT = tsiz[1] * (nc + 2) + dCC
if texton.ndim<3:
retTexture = np.zeros((sizRT, sizCT), dtype=texton.dtype)
else:
nch = texton.shape[-1]
retTexture = np.zeros((sizRT, sizCT, nch), dtype=texton.dtype)
r0 = dRR + tsiz[0] / 2
c0 = dCC + tsiz[1] / 2
for rri in range(nr):
rr = r0 + rri*sizR + 1 * dRightY
for cci in range(nc):
cc = c0 + cci * sizC + 1 * dBottomX
if texton.ndim>2:
retTexture[rr:rr+sizR, cc:cc+sizC,:] = texton.copy()
else:
retTexture[rr:rr + sizR, cc:cc + sizC, :] = texton.copy()
return retTexture
def ReadGraph(pdir, shirt_num):
wdir = '%s/%s_result/' % (pdir, shirt_num )
v_x = np.loadtxt(wdir+'pts_x.csv', delimiter=',') - 100 - 1 #FIXME: shift in 100px is automaticaly added by Texture Extraction Algorithm
v_y = np.loadtxt(wdir+'pts_y.csv', delimiter=',') - 100 - 1
is_good = np.loadtxt(wdir+'is_good.csv', dtype='bool', delimiter=',')
return [v_x, v_y, is_good]
##########################################
def getRandomTexton(vx, vy, isGood, sizN=1, numErosionMax=2):
tmpIsGood = isGood.copy()
cntErosion = 0
for ii in range(numErosionMax):
tmp = binary_erosion(tmpIsGood)
if np.sum(tmp) > 0:
tmpIsGood = tmp
cntErosion += 1
else:
break
rndR, rndC = np.where(tmpIsGood)
idxRnd = np.random.randint(len(rndR))
rndRC = (rndR[idxRnd], rndC[idxRnd])
rndRC = (16, 14)
print(rndRC)
# plt.subplot(1, 2, 1), plt.imshow(is_good)
# plt.subplot(1, 2, 2), plt.imshow(tmpIsGood)
# plt.title('pos = %s, #Erosion=%d' % (list(rndRC), cntErosion))
print ('pos = %s, #Erosion=%d' % (list(rndRC), cntErosion))
X = []
Y = []
X += [vx[(rndRC[0] + 0 , rndRC[1] + 0)]]
X += [vx[(rndRC[0] + sizN, rndRC[1] + 0)]]
X += [vx[(rndRC[0] + 0 , rndRC[1] + sizN)]]
X += [vx[(rndRC[0] + sizN, rndRC[1] + sizN)]]
print(X)
Y += [vy[(rndRC[0] + 0 , rndRC[1] + 0)]]
Y += [vy[(rndRC[0] + sizN, rndRC[1] + 0)]]
Y += [vy[(rndRC[0] + 0 , rndRC[1] + sizN)]]
Y += [vy[(rndRC[0] + sizN, rndRC[1] + sizN)]]
print(Y)
min_x = min(X)
max_x = max(X)
min_y = min(Y)
max_y = max(Y)
# bbox = np.array([[min_y, min_x], [max_y, max_x]])
bbox = np.array([[min_x, max_x], [min_y, max_y]])
bbox = np.round(bbox)
return (bbox, tmpIsGood)
##########################################
def getGoodGridPoints(vx,vy, isGood):
nr,nc = isGood.shape
lstXX = []
lstYY = []
for rr in range(nr):
for cc in range(nc):
if isGood[rr,cc]:
x0 = vx[rr, cc]
y0 = vy[rr, cc]
lstXX.append(x0)
lstYY.append(y0)
return np.array([lstXX,lstYY]).transpose()
def cropTexton(timg, texBBox, brdPrcnt=0.1, brdPx=None):
tsiz = np.array(timg.shape[:2])
xmin = texBBox[0][0]
xmax = texBBox[0][1]
ymin = texBBox[1][0]
ymax = texBBox[1][1]
if brdPrcnt is None:
if brdPx is not None:
dr = brdPx
dc = brdPx
else:
dr = 0
dc = 0
else:
dr = int(brdPrcnt * np.abs(ymax - ymin))
dc = int(brdPrcnt * np.abs(xmax - xmin))
if timg.ndim<3:
tret = timg[ymin - dr:ymax + dr, xmin - dc:xmax + dc].copy()
else:
tret = timg[ymin - dr:ymax + dr, xmin - dc:xmax + dc, :].copy()
return tret
##########################################
if __name__ == '__main__':
# fidx = '/home/ar/github.com/Texture_Detection_and_Synthesis_Experiments.git/data/data04_for_test1_results_v1/txt01_pxy_S/cropped_and_results/idx.txt'
fidx = '/home/ar/github.com/Texture_Detection_and_Synthesis_Experiments.git/data/data04_for_test1_results_v1/txt02_pxy_M/cropped_and_results/idx.txt'
wdir = os.path.dirname(fidx)
with open(fidx, 'r') as f:
lstIdx = f.read().splitlines()
numImg = len(lstIdx)
if numImg<1:
raise Exception('Cant find image Idxs in file [%s]' % fidx)
lstPathImg = [os.path.join(wdir, '%s.jpg' % ii) for ii in lstIdx]
for ii,pathImg in enumerate(lstPathImg):
if ii!=6:
continue
print ('[%d/%d] : %s' % (ii, numImg, pathImg))
tidx = lstIdx[ii]
timg = skio.imread(pathImg)
[v_x, v_y, is_good] = ReadGraph(wdir, tidx)
# arrXY = getGoodGridPoints(v_x, v_y, is_good)
#FIxME: remove buttons
# is_good[:, is_good.shape[1] / 2] = False
retBBox, isGoodMsk = getRandomTexton(v_x, v_y, is_good, sizN=1, numErosionMax=0)
print (retBBox)
bbW = np.abs(retBBox[0][0] - retBBox[0][1])
bbH = np.abs(retBBox[1][0] - retBBox[1][1])
# parBorder = int(min(bbH,bbW) * 0.2)
parBorder = 5
print ('Border parameter: %s' % parBorder)
#
arrXY = getGoodGridPoints(v_x, v_y, isGoodMsk)
imgTexton = cropTexton(timg, retBBox, brdPx=parBorder, brdPrcnt=None)
# imgTextonCorr = MinimizePatternByTemplMatchV1(imgTexton)
# imgTextonCorr = MinimizePatternByTemplMatchV2(imgTexton, brdSize=parBorder)
imgTextonCorr, pdRightY, pdBottomX = MinimizePatternByTemplMatchV3(imgTexton, brdSize=parBorder)
genTexture = generateTiledTexton(imgTextonCorr, pdRightY, pdBottomX)
imgTextonCorrTiled = np.tile(imgTextonCorr, (9, 9, 1))
# imgTextonCorrTiled = genTexture
imgTextonStupidTiled = np.tile(imgTexton, (9, 9, 1))
#
plt.figure()
tmpH = plt.subplot(2, 3, 1)
plt.hold(True)
plt.imshow(timg)
plt.plot(arrXY[:,0], arrXY[:,1], 'or')
tp1 = patches.Rectangle((retBBox[0][0], retBBox[1][0]), bbW, bbH, fill=False, linewidth=3, edgecolor='g')
tp2 = patches.Rectangle((retBBox[0][0], retBBox[1][0]), 2*bbW, 2*bbH, fill=False, linewidth=2, edgecolor='g')
tmpH.add_patch(tp1)
tmpH.add_patch(tp2)
plt.hold(False)
plt.title('Corr-Grid-of-Points')
plt.subplot(2, 3, 2)
plt.imshow(np.dstack((is_good, isGoodMsk, is_good)))
plt.title('Good-Mask')
tmpH = plt.subplot(2, 3, 3)
plt.hold(True)
plt.imshow(imgTexton)
plt.title('Random sampled Texton')
plt.hold(False)
plt.subplot(2, 3, 4)
plt.imshow(imgTextonCorrTiled)
plt.title('Texture Synth: Simple Correlation V1')
plt.subplot(2, 3, 5)
plt.imshow(genTexture)
plt.title('Texture Synth: Simple Correlation V2')
plt.subplot(2, 3, 6)
plt.imshow(imgTextonStupidTiled)
plt.title('Texture Synth: periodic tiling')
plt.show()
| null |
src_experimental_1/step06_simpleCorrSynth/run01_Simple_CorrTextSynth_v2.py
|
run01_Simple_CorrTextSynth_v2.py
|
py
| 10,818 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.matchTemplate",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.TM_CCORR_NORMED",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cv2.minMaxLoc",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.matchTemplate",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.TM_CCORR_NORMED",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "cv2.minMaxLoc",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.matchTemplate",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.TM_SQDIFF_NORMED",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cv2.matchTemplate",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cv2.TM_SQDIFF_NORMED",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmax",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "cv2.TM_SQDIFF",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "cv2.matchTemplate",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "cv2.matchTemplate",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "cv2.TM_SQDIFF",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "cv2.TM_SQDIFF_NORMED",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "cv2.minMaxLoc",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "cv2.minMaxLoc",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.morphology.binary_erosion",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "skimage.io.imread",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "skimage.io",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "matplotlib.patches.Rectangle",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "matplotlib.patches.Rectangle",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 277,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "numpy.dstack",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hold",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 295,
"usage_type": "name"
}
] |
236446895
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 00:21:43 2019
@author: molguitianzn
"""
import numpy as np
import matplotlib.pyplot as plt
'''
b = 2
for i in range(10):
a += b
print("a = ", a)
a = np.multiply(a, b)
print("new a = ", a)
'''
c = ([1,2,3,4])
d = ([5,6,7,8])
e = np.multiply(c,d)
f = np.transpose(c)
g = np.transpose(d)
h = np.multiply(c,d)
i = ([[1],[2],[3],[4]])
print(i)
#f = np.multiarray(c,d)
print(c,d,e,f,h)
plt.plot(range(10),'o')
print("hello world")
a_1 = 1
print(a_1)
a_1 = a_1 + 1
print(a_1)
| null |
HelloWorld.py
|
HelloWorld.py
|
py
| 535 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.multiply",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
}
] |
646051753
|
# -*- coding: utf-8 -*-
from django.core.cache import caches
cache = caches['default']
def get_cached_or_qs(cache_key, qs, time_to_live):
value = cache.get(cache_key)
if None is value:
value = qs()
cache.set(cache_key, value, time_to_live)
return value
| null |
core/instruments.py
|
instruments.py
|
py
| 284 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.core.cache.caches",
"line_number": 4,
"usage_type": "name"
}
] |
477495737
|
from omxplayer.player import OMXPlayer
from PIL import Image, ImageTk
from collections import OrderedDict
from time import time
import tkinter as tk
import threading
import logging
import dbus
import json
import praw
import re
import os
from tkinter import messagebox
class Reddit:
def __init__(self, data):
self.praw = praw.Reddit(client_id=data['client_id'],
client_secret=data['client_secret'],
username=data['username'],
password=data['password'],
user_agent=data['user_agent'])
def get_video_link(self, subreddit_name, max=100):
pool = self.praw.subreddit(subreddit_name)
vids = []
for i, submission in enumerate(pool.hot(limit=max)):
if submission.is_reddit_media_domain and submission.media:
vids.append(self.get_480_url(submission.media['reddit_video']['fallback_url']))
elif submission.is_reddit_media_domain and 'v.redd.it' in submission.url:
if 'DASH' in submission.url:
vids.append(submission.url)
elif hasattr(submission, 'preview'):
if 'reddit_video_preview' in submission.preview.keys():
if submission.preview['reddit_video_preview']['fallback_url']:
vids.append(self.get_480_url(submission.preview['reddit_video_preview']['fallback_url']))
elif submission.url.endswith('gif'):
pass
else:
pass
return vids
def get_480_url(self, url):
pattern = r'https?:\/\/v\.redd\.it\/[a-zA-Z0-9]*\/[a-zA-Z]+\_(?P<res>[0-9]{0,4})[.]*'
match = re.match(pattern, url)
res = match.group('res')
if int(res) > 480:
url = url.replace(res, '480')
return url
class PlayerControl:
def __init__(self, logger, master, data, subreddit_dic, subreddit_list):
self.data = data
self.logging = logger
self.subreddits = OrderedDict(subreddit_dic)
self.subreddits_list = subreddit_list
self.curr_subreddit = self.subreddits_list[0]
self.playing = False
self.paused = False
self.spawn_new = False
self.alive = False
self.last_click = time() - 1
self.delay = 2
self.position = 0
self.root = master
self.init_root()
self.make_overlay()
self.reddit = Reddit(self.data)
self.toggle_loading_text()
self.subreddits[self.curr_subreddit][2] = self.reddit.get_video_link(self.subreddits[self.curr_subreddit][0], 100)
self.toggle_loading_text()
self.logging.debug(self.subreddits[self.curr_subreddit][2])
self.play_vid()
def init_root(self):
self.root.bind("<Escape>", exit)
self.root.overrideredirect(True)
self.root.wait_visibility(self.root)
self.root.wm_attributes("-alpha", 0.0)
self.root.geometry("%dx%d+%d+%d" % (800, 480, 0, 0))
def make_overlay(self):
self.create_bg()
self.create_playback_buttons()
self.create_subreddit_buttons()
self.create_center_icon()
self.create_loading_text()
def create_bg(self):
img = Image.open('images/background.jpg').resize((800, 480))
self.image = ImageTk.PhotoImage(img)
self.panel = tk.Canvas(master=self.root)
self.panel.create_image(0, 0, anchor=tk.NW, image=self.image)
def create_center_icon(self):
button_img = Image.open("images/subreddits/%s.png" % self.curr_subreddit).resize((200,200))
self.center_icon = ImageTk.PhotoImage(button_img)
self.panel.create_image(400, 170, image = self.center_icon)
self.root.update()
def create_loading_text(self):
self.loading_hidden = True
self.loading_text = self.panel.create_text((400, 363), anchor = tk.S, font = ('DinosaursAreAlive', 65, 'bold'), text="LOADING..", fill='RoyalBlue1', state = tk.HIDDEN)
def toggle_loading_text(self):
if self.loading_hidden:
self.panel.itemconfig(self.loading_text, state = tk.NORMAL)
self.loading_hidden = False
else:
self.panel.itemconfig(self.loading_text, state = tk.HIDDEN)
self.loading_hidden = True
self.root.update()
def create_playback_buttons(self):
self.playback_buttons = {}
self.playback_button_photos = {}
playback_types = ['next', 'play', 'prev', 'shutdown']
btn_positions = {"next": (800, 240), "play": (400, 480), "prev": (0, 240), "shutdown": (0,480)}
btn_anchors = {"next": tk.E, "play": tk.S, "prev": tk.W, "shutdown":tk.SW}
btn_sizes = {"next":(150,150), "play":(150,150), "prev":(150,150), "shutdown":(60,60)}
for playback_type in playback_types:
button_img = Image.open("images/playback/%s.png" % playback_type).resize(btn_sizes[playback_type])
self.playback_button_photos[playback_type] = ImageTk.PhotoImage(button_img)
self.playback_buttons[playback_type] = self.panel.create_image(btn_positions[playback_type], anchor = btn_anchors[playback_type], image = self.playback_button_photos[playback_type])
self.panel.tag_bind(self.playback_buttons[playback_type], '<Button-1>', getattr(self, '%s_button_func' % playback_type))
self.panel.pack(fill='both', expand='yes')
def subreddit_button_event(self, event):
x = event.x
relative_x = x - 120
index = relative_x // 70
return self.change_subreddit(event, self.current_subreddit_keys[index])
def create_subreddit_buttons(self):
self.subreddit_buttons = {}
self.subreddit_button_photos = {}
x_pos = 120
y_pos = 0
self.current_subreddit_keys = list(filter(lambda x: x != self.curr_subreddit, self.subreddits.keys()))
for subreddit_key, _ in self.subreddits.items():
if subreddit_key == self.curr_subreddit:
continue
x_pos += 5
button_img = Image.open("images/subreddits/%s.png" % subreddit_key).resize((60,60))
self.subreddit_button_photos[subreddit_key] = ImageTk.PhotoImage(button_img)
self.subreddit_buttons[subreddit_key] = self.panel.create_image(x_pos, y_pos, anchor = tk.NW, image = self.subreddit_button_photos[subreddit_key])
self.panel.tag_bind(self.subreddit_buttons[subreddit_key], '<Button-1>', lambda event: self.subreddit_button_event(event))
x_pos += 65
self.root.update()
def change_subreddit(self, event, subreddit):
self.logging.debug("Change subreddit called to %s" % subreddit)
curr_time = time()
if curr_time-self.last_click > self.delay:
self.last_click = time()
if not self.alive:
if self.subreddits[subreddit][2] == []:
self.curr_subreddit = subreddit
self.create_center_icon()
self.create_subreddit_buttons()
self.paused = False
self.position = 0
self.toggle_loading_text()
self.subreddits[self.curr_subreddit][2] = self.reddit.get_video_link(self.subreddits[self.curr_subreddit][0], 100)
self.toggle_loading_text()
self.play_vid()
else:
self.curr_subreddit = subreddit
self.create_center_icon()
self.create_subreddit_buttons()
self.paused = False
self.position = 0
self.play_vid()
def next_button_func(self, event):
self.logging.debug("Next Pressed")
curr_time = time()
if curr_time-self.last_click > self.delay:
self.last_click = time()
if self.subreddits[self.curr_subreddit][1] < len(self.subreddits[self.curr_subreddit][2]):
self.subreddits[self.curr_subreddit][1] += 1
else:
self.subreddits[self.curr_subreddit][1] = 0
if self.alive:
self.spawn_new = True
self.player.stop()
else:
self.play_vid()
def prev_button_func(self, event):
self.logging.debug("Prev button func")
curr_time = time()
if curr_time-self.last_click > self.delay:
self.last_click = time()
if self.subreddits[self.curr_subreddit][1] > 0:
self.subreddits[self.curr_subreddit][1] -= 1
else:
self.subreddits[self.curr_subreddit][1] = len(self.subreddits[self.curr_subreddit][2]) - 1
if self.alive:
self.spawn_new = True
self.player.stop()
else:
self.play_vid()
def play_button_func(self, event):
curr_time = time()
if curr_time-self.last_click > self.delay:
self.last_click = time()
self.play_vid()
def shutdown_button_func(self, event):
self.logging.debug("Shutdown")
if not self.playing:
self.root.withdraw()
if tk.messagebox.askyesno("Shutdown", "Shutdown Moo-ltimedia Player?"):
os.system('sudo shutdown -h now')
else:
self.root.deiconify()
def play_vid(self):
self.logging.debug("Play button func")
self.logging.debug("Playing: {0} Paused: {1}".format(self.playing, self.paused))
self.logging.debug("Current subreddit index: %d" % self.subreddits[self.curr_subreddit][1])
self.logging.debug("Current video URL: %s" % self.subreddits[self.curr_subreddit][2][self.subreddits[self.curr_subreddit][1]])
if not self.playing:
if self.paused:
self.player = OMXPlayer(self.subreddits[self.curr_subreddit][2][self.subreddits[self.curr_subreddit][1]], args=['--aspect-mode', 'Letterbox', '--pos', self.position])
self.paused = False
else:
self.player = OMXPlayer(self.subreddits[self.curr_subreddit][2][self.subreddits[self.curr_subreddit][1]], args=['--aspect-mode', 'Letterbox'])
self.alive = True
self.player.exitEvent += lambda x, y: self.exit_event_func()
self.root.update()
self.playing = True
else:
self.paused = True
self.playing = False
self.position = self.player.position()
self.player.stop()
def play_vid_after(self, durr):
self.logging.debug("Play vid func")
self.position = 0
self.root.after(durr, self.play_vid())
def exit_event_func(self):
self.logging.debug("Exit event func")
self.playing = False
self.root.update()
if not self.paused:
self.position = 0.0
if self.spawn_new:
self.spawn_new = False
self.play_vid_after(2000)
else:
self.alive = False
def exit_button_func(self):
self.logging.debug("Exit button func")
if self.alive:
self.player.stop()
self.root.destroy()
if __name__ == "__main__":
logging.basicConfig(filename='redditplayer.log', level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
root_logger = logging.getLogger()
with open('redditinfo.json')as file_:
data = json.load(file_)
subreddit_dic = {'cow': ['happycowgifs', 0, []],
'aww': ['aww', 0, []],
'bear': ['BearGifs', 0, []],
'cat': ['CatGifs', 0, []],
'dog': ['HappyWoofGifs', 0, []],
'horse': ['horsegifs', 0, []],
'panda': ['Panda_Gifs', 0, []],
'pig': ['Pigifs', 0, []],
'tiger': ['babybigcatgifs', 0, []]}
subreddit_list = ['cow', 'aww', 'bear', 'cat', 'dog', 'horse', 'panda', 'pig', 'tiger']
root = tk.Tk()
cow = PlayerControl(root_logger, root, data, subreddit_dic, subreddit_list,)
root.mainloop()
| null |
redditplayer.py
|
redditplayer.py
|
py
| 12,439 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "praw.Reddit",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "tkinter.Canvas",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "tkinter.NW",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "tkinter.S",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "tkinter.HIDDEN",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "tkinter.NORMAL",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "tkinter.HIDDEN",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "tkinter.E",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "tkinter.S",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "tkinter.W",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "tkinter.SW",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "tkinter.NW",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox.askyesno",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "omxplayer.player.OMXPlayer",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "omxplayer.player.OMXPlayer",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 288,
"usage_type": "call"
}
] |
86127612
|
import cv2
from matplotlib import pyplot as plt
import numpy as np
file_name = '/home/nearlab/Downloads/' \
'trainingData_semanticSegmentation/' \
'trainingData_semanticSegmentation/' \
'00000.tif'
image = cv2.imread(file_name)
print(np.unique(image))
plt.figure()
plt.subplot(131)
plt.imshow(image[:, :, 0])
plt.subplot(132)
plt.imshow(image[:, :, 1])
plt.subplot(133)
plt.imshow(image[:, :, 2])
plt.show()
| null |
general/small_test.py
|
small_test.py
|
py
| 446 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.imread",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
}
] |
624135038
|
import os
import time
# Widgets
import ipywidgets as widgets
from IPython.display import display, Javascript
# Jupy4Syn
from jupy4syn.utils import logprint
class ExportButtonHTML(widgets.Button):
def __init__(self, config, *args, **kwargs):
"""
**Constructor**
Parameters
----------
config: `jupy4syn.Configuration`, optional
Configuration object that contains Jupyter Notebook runtime information, by default Configuration()
Examples
--------
>>> config = Configuration()
config.display()
>>> html_export = ExportButtonHTML(config)
html_export.display()
"""
widgets.Button.__init__(self, *args, **kwargs)
# Config
self.config = config
self.notebook_name = config.notebook_name.value
self.plots_list = config.plots_list
# class Button values for ExportButtonHTML
self.description='Export Notebook to HTML'
self.disabled=False
self.button_style='warning' # 'success', 'info', 'warning', 'danger' or ''
self.tooltip='Click me'
self.icon=''
self.layout = widgets.Layout(width='300px')
# Set callback function for click event
self.on_click(self._click_button)
# Logging
self.output = widgets.Output()
# Widgets display box
self.display_box = widgets.VBox([self, self.output])
@staticmethod
def _click_button(b):
with b.output:
# Change button to a "clicked status"
b.disabled = True
b.button_style = ''
b.description='Exporting...'
# We should sleep for some time to give some responsiveness to the user
time.sleep(0.5)
# Get configuration run-time values
b.notebook_name = b.config.notebook_name.value
b.plots_list = b.config.plots_list
# Check if notebook name is not empty, if it is, print an error message and
# change button status temporarily to an error descripton. Then restart button.
if b.notebook_name == "":
logprint("Notebook name not defined in configuration cell", "[ERROR]", config=b.config)
# Change button status to a "error status"
b.disabled = True
b.button_style = 'danger'
b.description='ERROR. Notebook\'s name not set'
time.sleep(2.0)
# Reenable button
b.disabled = False
b.button_style = 'warning'
b.description='Export Notebook to HTML'
return
try:
# For every plot registered in the plots_list, we have to set these
# plots export flag to True to start the export
for plot in b.plots_list:
plot.export = True
# Time sleep to the plot_list thread update the display
time.sleep(1.0)
# Get time stamp for the export name
ts = time.gmtime()
time_stamp = time.strftime("%Y-%m-%d-%H:%M:%S", ts)
output_file = time_stamp + '-' + b.notebook_name
# Save the notebook to display the static images
display(Javascript('IPython.notebook.save_checkpoint();'))
# Call nbconvert to do the export
os.system("python3 -m nbconvert ./" + b.notebook_name + ".ipynb --template=nbextensions --output-dir=./exports --output=" + output_file + " --to html")
# For every plot registered in the plots_list, we have to set these
# plots export flag to False to end the export
for plot in b.plots_list:
plot.export = False
except Exception as e:
logprint(str(e), "[ERROR]", config=b.config)
# Reenable button
b.disabled = False
b.button_style = 'warning'
b.description='Export Notebook to HTML'
def display(self):
display(self.display_box)
| null |
jupy4syn/ExportButtonHTML.py
|
ExportButtonHTML.py
|
py
| 4,233 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "ipywidgets.Button",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "ipywidgets.Button.__init__",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "ipywidgets.Button",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "ipywidgets.Layout",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "ipywidgets.Output",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "ipywidgets.VBox",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "jupy4syn.utils.logprint",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "time.gmtime",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "IPython.display.display",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "IPython.display.Javascript",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "jupy4syn.utils.logprint",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "IPython.display.display",
"line_number": 121,
"usage_type": "call"
}
] |
353893996
|
#!/usr/bin/env python
# coding:utf-8
import os
import numpy as np
import xlsxwriter
#----------------------------------------------------------
class Analyser(object):
def __init__(self, outdir, category, top=1, multilabel=False):
self.outdir = outdir
self.category = category
self.top = top
self.multilabel = multilabel
self.flag_train = True
self.flag_test = True
#---------- make Directories
self.resultdir = os.path.join(self.outdir,"results")
if not os.path.isdir(self.resultdir):
os.makedirs(self.resultdir)
def save_data(self, epoch, logmode="", detail=False):
filepath = os.path.join(self.resultdir,"result_{0:05d}.xlsx".format(epoch))
workbook = xlsxwriter.Workbook(filepath)
logs = ["train", "test"]
if logmode == "testonly":
logs = ["test"]
for mode in logs:
if mode == "train":
preds = self.preds_train
labels = self.labels_train
batch_idxss = self.batch_idxs_train
paths = self.paths_train
elif mode == "test":
preds = self.preds_test
labels = self.labels_test
batch_idxss = self.batch_idxs_test
paths = self.paths_test
#---------- calc top-n val
worksheet = workbook.add_worksheet("{}_val".format(mode))
bold = workbook.add_format({'bold': True})
bold_line = workbook.add_format({'bold': True})
bold_line.set_left()
cel_line = workbook.add_format()
cel_line.set_left()
cel_line2 = workbook.add_format()
cel_line2.set_left()
cel_line2.set_top()
cel_line3 = workbook.add_format()
cel_line3.set_top()
cel_line4 = workbook.add_format()
cel_line4.set_top()
cel_line4.set_left()
cel_line4.set_right()
cel_line4.set_bottom()
worksheet.write(0, 0, " ")
#------------ multi label
if self.multilabel:
worksheet.write(1, 0, "accuracy", bold)
result = labels - preds
result = np.absolute(result) < 0.5
accs = []
for n in range(self.category):
worksheet.write(0, n+1, "label {}".format(n), bold)
pred = result[:,n]
acc = np.sum(pred)/float(len(pred))
worksheet.write(1, n+1, acc)
accs.append(acc)
worksheet.write(0, n+2, "average", bold)
acc = sum(accs) / float(len(accs))
worksheet.write(1, n+2, acc)
worksheet = workbook.add_worksheet("{}_detailnum".format(mode))
worksheet.write(0, 0, " ")
worksheet.write(1, 0, "label 0", bold)
worksheet.write(2, 0, "label 1", bold)
worksheet.write(3, 0, "num", bold)
for n in range(self.category):
worksheet.write(0, n*2+1, "label {}-0".format(n), bold_line)
worksheet.write(0, n*2+1+1, "label {}-1".format(n), bold)
pred = result[:,n]
for i in range(2):
idx = np.where(labels[:,n]==i)
num = np.sum(pred[idx])
if i == 1:
worksheet.write(1, n*2+1+1-i, num, cel_line)
worksheet.write(2, n*2+1+1-i, len(pred[idx])-num, cel_line)
worksheet.write(3, n*2+1+1-i, len(pred[idx]), cel_line2)
else:
worksheet.write(2, n*2+1+1-i, num)
worksheet.write(1, n*2+1+1-i, len(pred[idx])-num)
worksheet.write(3, n*2+1+1-i, len(pred[idx]), cel_line3)
worksheet = workbook.add_worksheet("{}_detailacc".format(mode))
worksheet.write(0, 0, " ")
worksheet.write(1, 0, "label 0", bold)
worksheet.write(2, 0, "label 1", bold)
worksheet.write(3, 0, "num", bold)
for n in range(self.category):
worksheet.write(0, n*2+1, "label {}-0".format(n), bold_line)
worksheet.write(0, n*2+1+1, "label {}-1".format(n), bold)
pred = result[:,n]
for i in range(2):
idx = np.where(labels[:,n]==i)
num = float(np.sum(pred[idx]))
if i == 1:
worksheet.write(1, n*2+1+1-i, num/len(pred[idx]), bold_line)
worksheet.write(2, n*2+1+1-i, (len(pred[idx])-num)/len(pred[idx]), cel_line)
worksheet.write(3, n*2+1+1-i, len(pred[idx]), cel_line2)
else:
worksheet.write(2, n*2+1+1-i, num/len(pred[idx]), bold)
worksheet.write(1, n*2+1+1-i, (len(pred[idx])-num)/len(pred[idx]))
worksheet.write(3, n*2+1+1-i, len(pred[idx]), cel_line3)
#------------ single label
else:
total_num = [0]*(self.category+1)
all_acc = np.zeros((self.category,self.category+1))
all_val = np.zeros((self.category,self.category))
ranks = np.argsort(preds, axis=1)[:,::-1]
for n in range(self.category):
worksheet.write(n+1, 0, "top-{}".format(n+1), bold)
worksheet.write(0, n+1, "label {}".format(n), bold)
top_n = ranks[:,n]
top_n_bool = (top_n == labels)
top_n_idx = np.where(top_n_bool==True)[0]
#top_n_acc = len(top_n_idx) / float(len(self.preds)) #accuracy of top-n
top_n_pred = top_n[top_n_idx] #value of correct prediction of each label
for i in range(self.category):
idx_num = np.where(top_n_pred==i)[0].shape[0]
if n < self.top:
worksheet.write(n+1, i+1, idx_num, bold)
else:
worksheet.write(n+1, i+1, idx_num)
total_num[i] += idx_num
all_acc[n,i] = total_num[i]
if n < self.top:
worksheet.write(n+1, i+2, len(top_n_pred),bold)
else:
worksheet.write(n+1, i+2, len(top_n_pred))
total_num[-1] += len(top_n_pred)
all_acc[n,-1] = total_num[-1]
if n == 0:
for i,pred_num in enumerate(top_n):
label_num = labels[i]
all_val[label_num, pred_num] += 1
worksheet.write(n+2, 0, "total", bold)
worksheet.write(0, n+2, "total", bold)
for i in range(len(total_num)):
worksheet.write(n+2, i+1, total_num[i])
#---------- calc top-n acc
worksheet = workbook.add_worksheet("{}_acc".format(mode))
worksheet.write(0, 0, " ")
for n in range(self.category):
worksheet.write(n+1, 0, "top-{}".format(n+1), bold)
worksheet.write(0, n+1, "label {}".format(n), bold)
for i in range(self.category):
if n < self.top:
worksheet.write(n+1, i+1, all_acc[n,i]/float(total_num[i])*100., bold)
else:
worksheet.write(n+1, i+1, all_acc[n,i]/float(total_num[i])*100.)
if n < self.top:
worksheet.write(n+1, i+2, all_acc[n,-1]/float(total_num[-1])*100.,bold)
else:
worksheet.write(n+1, i+2, all_acc[n,-1]/float(total_num[-1])*100.)
worksheet.write(0, i+2, "total", bold)
#---------- detail of pred (num)
worksheet = workbook.add_worksheet("{}_detail".format(mode))
for n in range(self.category):
worksheet.write(n+1, 0, "label {}".format(n), bold)
worksheet.write(0, n+1, "label {}".format(n), bold)
for i in range(self.category):
if n==i:
worksheet.write(n+1, i+1, all_val[n,i],bold)
else:
worksheet.write(n+1, i+1, all_val[n,i])
#---------- detail of pred2 (accuracy)
worksheet = workbook.add_worksheet("{}_detail_acc".format(mode))
for n in range(self.category):
worksheet.write(n+1, 0, "label {}".format(n), bold)
worksheet.write(0, n+1, "label {}".format(n), bold)
for i in range(self.category):
if n==i:
worksheet.write(n+1, i+1, all_val[n,i]/float(total_num[n])*100., bold)
else:
worksheet.write(n+1, i+1, all_val[n,i]/float(total_num[n])*100.)
if detail:
idx = np.argsort(batch_idxss)
preds_sort = preds[idx]
labels_sort = labels[idx]
batch_idxss_sort = batch_idxss[idx]
paths_sort = paths[idx]
worksheet = workbook.add_worksheet("{}_result".format(mode))
for n in range(self.category):
worksheet.write(0, n+1, "label {}".format(n), bold)
worksheet.write(n+2, 0, "label", bold)
worksheet.write(n+3, 0, "path", bold)
for i in range(len(batch_idxss)):
worksheet.write(i+1, 0, batch_idxss_sort[i])
max_idx = np.argsort(preds_sort[i])[-1]
for n in range(self.category):
if n == max_idx:
worksheet.write(i+1, n+1, preds_sort[i,n], bold)
else:
worksheet.write(i+1, n+1, preds_sort[i,n])
if max_idx != labels_sort[i]:
worksheet.write(i+1, n+2, labels_sort[i], cel_line4)
else:
worksheet.write(i+1, n+2, labels_sort[i])
worksheet.write(i+1, n+3, paths_sort[i])
workbook.close()
self.flag_train = True
self.flag_test = True
def __call__(self, preds, labels, batch_idxs, paths, mode):
if mode == "train":
if self.flag_train:
self.preds_train = preds
self.labels_train = labels
self.batch_idxs_train = batch_idxs
self.paths_train = paths
self.flag_train = False
else:
self.preds_train = np.r_[self.preds_train, preds]
self.labels_train = np.r_[self.labels_train, labels]
#print self.preds_train.shape, self.labels_train.shape #batch32: (64,2),(64,)
#print self.preds_train.shape, self.labels_train.shape #batch32-multi: (64,9),(64,9)
self.batch_idxs_train = np.r_[self.batch_idxs_train, batch_idxs]
self.paths_train = np.r_[self.paths_train, paths]
elif mode == "test":
if self.flag_test:
self.preds_test = preds
self.labels_test = labels
self.batch_idxs_test = batch_idxs
self.paths_test = paths
self.flag_test = False
else:
self.preds_test = np.r_[self.preds_test, preds]
self.labels_test = np.r_[self.labels_test, labels]
self.batch_idxs_test = np.r_[self.batch_idxs_test, batch_idxs]
self.paths_test = np.r_[self.paths_test, paths]
#print self.preds_test.shape #(50000,10)
#print self.labels_test.shape #50000
#print self.batch_idxs_test.shape #50000
| null |
chainer/ssd_withRotate_Score/utils/analyser.py
|
analyser.py
|
py
| 12,696 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "xlsxwriter.Workbook",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.absolute",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "numpy.r_",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "numpy.r_",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "numpy.r_",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "numpy.r_",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "numpy.r_",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "numpy.r_",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "numpy.r_",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "numpy.r_",
"line_number": 257,
"usage_type": "attribute"
}
] |
650049456
|
from AccessControl import allow_module
from Acquisition import aq_inner
from Products.CMFCore.permissions import ModifyPortalContent
from Products.CMFCore.utils import getToolByName
from Products.PluggableAuthService.interfaces.plugins import IRolesPlugin
from Products.ATContentTypes.lib import constraintypes
from zope.app.component.hooks import getSite
from AccessControl import ClassSecurityInfo, getSecurityManager
from AccessControl.SecurityManagement import newSecurityManager
from AccessControl.User import UnrestrictedUser as BaseUnrestrictedUser
from Products.PlonePAS.utils import decleanId
from Products.ATContentTypes.lib import constraintypes
from Products.Five.utilities.marker import mark
from bungenicms.workspaces.interfaces import IMemberSpace, IGroupSpace
from bungenicms.workspaces.config import MEMBER_SPACE_CONTENT
from bungenicms.workspaces.config import GROUP_SPACE_CONTENT
from bungenicms.workspaces.config import PUBLIC_FOLDER_ENTRY_NAME
from bungenicms.workspaces.config import PRIVATE_FOLDER_ENTRY_NAME
from bungenicms.workspaces.config import ROLES_FOR_WEB_SPACE
def doSearch(acl_tool, groupId):
""" Search for a group by id or title"""
rolemakers = acl_tool.plugins.listPlugins(IRolesPlugin)
group = acl_tool.getGroupById(groupId)
allAssignedRoles = []
for rolemaker_id, rolemaker in rolemakers:
allAssignedRoles.extend(rolemaker.getRolesForPrincipal(group))
return allAssignedRoles
def create_space(parent, object_id, object_name, object_status, owner_id, owner,
contenttype):
portal = getSite()
portal_types = getToolByName(portal, "portal_types")
type_info = portal_types.getTypeInfo(contenttype)
space = type_info._constructInstance(parent, object_id)
space.setTitle(object_name)
portal.plone_utils.changeOwnershipOf( space, portal.getOwner().getId(), 1 )
space._setRoles(owner_id, ("Reader","Contributor","Editor", "Reviewer"))
space.reindexObjectSecurity()
space.content_status_modify(workflow_action=object_status)
space.reindexObject
def create_content(parent, content, owner, content_status):
#Create custom content.
for content_type in content:
parent.invokeFactory(content_type["type"],id=content_type["id"])
content = getattr(parent, content_type["id"])
content.setTitle(content_type["title"])
content.manage_setLocalRoles(owner.getId(), ["Contributor","Editor",])
content.content_status_modify(workflow_action=content_status)
content.setConstrainTypesMode(constraintypes.ENABLED)
content.setLocallyAllowedTypes(content_type["addable_types"])
content.setImmediatelyAddableTypes(content_type["addable_types"])
content.reindexObject
def initializeAreas(pm_tool, acl_tool, request, member_folder_id=None):
"""
Creates custom content in the member's home folder.
Create group spaces and content for the member's groups.
1. Create private space for the user.
2. Create public space for user if they are a member of parliament.
3. Populate the public space with custom content
4. Create group home folder for any groups (except a parliament) this user
is a member of.
4.1 Create a private space for the group home folder.
4.2 Create a public space for the group home folder
4.3 Populate the public space with custom content.
"""
portal = getSite()
sm = getSecurityManager()
tmp_user = BaseUnrestrictedUser(sm.getUser().getId(),'', ['Manager'],'')
newSecurityManager(None, tmp_user)
acl_tool = getToolByName(portal, 'acl_users')
if "groups" in portal.objectIds():
groups_space = portal["groups"]
if member_folder_id:
member = pm_tool.getMemberById(decleanId(member_folder_id))
else:
member = pm_tool.getAuthenticatedMember()
member_id = member.getId()
folder = pm_tool.getHomeFolder(member_id)
mark(folder, IMemberSpace)
#All members get a private workspace area.
object_name = member_id +": Private Space"
create_space(folder, "private_space", object_name, "private", member_id,
member, PRIVATE_FOLDER_ENTRY_NAME)
member_groupIds = member.getGroupIds()
for member_groupId in member_groupIds:
group_membership_roles = doSearch(acl_tool, member_groupId)
if bool(set(ROLES_FOR_WEB_SPACE) & set(group_membership_roles)):
object_name = member_id +": Web Space"
create_space(folder, "web_space", object_name, "publish", member_id,
member, PUBLIC_FOLDER_ENTRY_NAME)
parent_space = getattr(folder, "web_space")
mark(parent_space, IMemberSpace)
create_content(parent_space, MEMBER_SPACE_CONTENT, member,
"publish")
groups_space = portal["groups"]
for member_groupId in member_groupIds:
group_membership_roles = doSearch(acl_tool, member_groupId)
#if group home folder does not exist
#it is cheaper to check if the folder exists, then exit if it does
for bungeni_group in acl_tool.bungeni_groups.enumerateGroups():
if ((member_groupId == bungeni_group["id"])
and (not bool(set(ROLES_FOR_WEB_SPACE) & set(group_membership_roles)))
and (bungeni_group["id"]not in groups_space.objectIds())):
group = acl_tool.bungeni_groups.getGroupById(bungeni_group["id"])
create_space(groups_space, bungeni_group["id"],
bungeni_group["title"], "private", bungeni_group["id"],
group, "Folder")
parent_space = getattr(groups_space, bungeni_group["id"])
mark(parent_space, IGroupSpace)
object_name = bungeni_group["title"] + ": Private Space"
create_space(parent_space, "private_space", object_name,
"private", bungeni_group["id"],
group, PRIVATE_FOLDER_ENTRY_NAME)
object_name = bungeni_group["title"] + ": Web Space"
create_space(parent_space, "web_space", object_name,
"publish", bungeni_group["id"],
group, PUBLIC_FOLDER_ENTRY_NAME)
parent_space = getattr(parent_space, "web_space")
mark(parent_space, IGroupSpace)
create_content(parent_space, GROUP_SPACE_CONTENT, group,
"publish")
| null |
plone.products/bungenicms.workspaces/trunk/bungenicms/workspaces/content/principal_content.py
|
principal_content.py
|
py
| 6,618 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "Products.PluggableAuthService.interfaces.plugins.IRolesPlugin",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "zope.app.component.hooks.getSite",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "Products.CMFCore.utils.getToolByName",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "Products.ATContentTypes.lib.constraintypes.ENABLED",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "Products.ATContentTypes.lib.constraintypes",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "zope.app.component.hooks.getSite",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "AccessControl.getSecurityManager",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "AccessControl.User.UnrestrictedUser",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "AccessControl.SecurityManagement.newSecurityManager",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "Products.CMFCore.utils.getToolByName",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "Products.PlonePAS.utils.decleanId",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "Products.Five.utilities.marker.mark",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "bungenicms.workspaces.interfaces.IMemberSpace",
"line_number": 87,
"usage_type": "argument"
},
{
"api_name": "bungenicms.workspaces.config.PRIVATE_FOLDER_ENTRY_NAME",
"line_number": 93,
"usage_type": "argument"
},
{
"api_name": "bungenicms.workspaces.config.ROLES_FOR_WEB_SPACE",
"line_number": 98,
"usage_type": "argument"
},
{
"api_name": "bungenicms.workspaces.config.PUBLIC_FOLDER_ENTRY_NAME",
"line_number": 101,
"usage_type": "argument"
},
{
"api_name": "Products.Five.utilities.marker.mark",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "bungenicms.workspaces.interfaces.IMemberSpace",
"line_number": 103,
"usage_type": "argument"
},
{
"api_name": "bungenicms.workspaces.config.MEMBER_SPACE_CONTENT",
"line_number": 104,
"usage_type": "argument"
},
{
"api_name": "bungenicms.workspaces.config.ROLES_FOR_WEB_SPACE",
"line_number": 117,
"usage_type": "argument"
},
{
"api_name": "Products.Five.utilities.marker.mark",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "bungenicms.workspaces.interfaces.IGroupSpace",
"line_number": 124,
"usage_type": "argument"
},
{
"api_name": "bungenicms.workspaces.config.PRIVATE_FOLDER_ENTRY_NAME",
"line_number": 128,
"usage_type": "argument"
},
{
"api_name": "bungenicms.workspaces.config.PUBLIC_FOLDER_ENTRY_NAME",
"line_number": 132,
"usage_type": "argument"
},
{
"api_name": "Products.Five.utilities.marker.mark",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "bungenicms.workspaces.interfaces.IGroupSpace",
"line_number": 134,
"usage_type": "argument"
},
{
"api_name": "bungenicms.workspaces.config.GROUP_SPACE_CONTENT",
"line_number": 135,
"usage_type": "argument"
}
] |
319320604
|
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import re
from urllib.parse import urlparse
from urllib.parse import urljoin
class HtmlParser(object):
def _get_new_urls(self,page_url,soup):
new_urls = set()
links = soup.find_all('a',class_='vd_folder')
for link in links:
new_url = link['href']
new_urls.add(new_url)
return new_urls
def _get_new_data(self,page_url,soup):
res_data=[]
lists= soup.find_all(class_="filelist")
for i in lists:
print(i.find('div',class_='sort_name_detail').find('a')['title'])
res_data.append(i.find('div',class_='sort_name_detail').find('a')['title'])
return res_data
def parse(self,page_url,html_cont):
if page_url is None or html_cont is None:
return
soup = BeautifulSoup(html_cont,'html.parser',from_encoding = 'utf-8')
new_urls = self._get_new_urls(page_url,soup)
new_data = self._get_new_data(page_url,soup)
return new_urls,new_data
| null |
spider/A_Frame/template/sample_split_version/html_parser.py
|
html_parser.py
|
py
| 933 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bs4.BeautifulSoup",
"line_number": 32,
"usage_type": "call"
}
] |
89264749
|
#!/usr/bin/env python
"""
@package ion.services.mi.sbe37_driver
@file ion/services/mi/sbe37_driver.py
@author Edward Hunter
@brief Driver class for sbe37 CTD instrument.
"""
__author__ = 'Edward Hunter'
__license__ = 'Apache 2.0'
import logging
import time
import re
import datetime
from ion.services.mi.instrument_driver import InstrumentDriver
from ion.services.mi.instrument_driver import DriverChannel
from ion.services.mi.instrument_driver import DriverCommand
from ion.services.mi.instrument_driver import DriverState
from ion.services.mi.instrument_driver import DriverEvent
from ion.services.mi.instrument_driver import DriverParameter
from ion.services.mi.exceptions import InstrumentProtocolException
from ion.services.mi.exceptions import InstrumentTimeoutException
from ion.services.mi.exceptions import InstrumentStateException
from ion.services.mi.exceptions import InstrumentConnectionException
from ion.services.mi.exceptions import RequiredParameterException
from ion.services.mi.common import InstErrorCode
from ion.services.mi.common import BaseEnum
from ion.services.mi.instrument_protocol import InstrumentProtocol
from ion.services.mi.instrument_protocol import CommandResponseInstrumentProtocol
from ion.services.mi.instrument_fsm_args import InstrumentFSM
#import ion.services.mi.mi_logger
mi_logger = logging.getLogger('mi_logger')
class SBE37State(BaseEnum):
"""
"""
UNCONFIGURED = DriverState.UNCONFIGURED
DISCONNECTED = DriverState.DISCONNECTED
COMMAND = DriverState.COMMAND
AUTOSAMPLE = DriverState.AUTOSAMPLE
class SBE37Event(BaseEnum):
"""
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
CONFIGURE = DriverEvent.CONFIGURE
INITIALIZE = DriverEvent.INITIALIZE
CONNECT = DriverEvent.CONNECT
DISCONNECT = DriverEvent.DISCONNECT
DETACH = DriverEvent.DETACH
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
TEST = DriverEvent.TEST
GET = DriverEvent.GET
SET = DriverEvent.SET
UPDATE_PARAMS = DriverEvent.UPDATE_PARAMS
class SBE37Channel(BaseEnum):
"""
"""
CTD = DriverChannel.CTD
ALL = DriverChannel.ALL
class SBE37Command(DriverCommand):
pass
# Device prompts.
class SBE37Prompt(BaseEnum):
"""
SBE37 io prompts.
"""
COMMAND = 'S>'
BAD_COMMAND = '?cmd S>'
AUTOSAMPLE = 'S>\r\n'
SBE37_NEWLINE = '\r\n'
SBE37_SAMPLE = 'SBE37_SAMPLE'
# Device specific parameters.
class SBE37Parameter(DriverParameter):
"""
Add sbe37 specific parameters here.
"""
OUTPUTSAL = 'OUTPUTSAL'
OUTPUTSV = 'OUTPUTSV'
NAVG = 'NAVG'
SAMPLENUM = 'SAMPLENUM'
INTERVAL = 'INTERVAL'
STORETIME = 'STORETIME'
TXREALTIME = 'TXREALTIME'
SYNCMODE = 'SYNCMODE'
SYNCWAIT = 'SYNCWAIT'
TCALDATE = 'TCALDATE'
TA0 = 'TA0'
TA1 = 'TA1'
TA2 = 'TA2'
TA3 = 'TA3'
CCALDATE = 'CCALDATE'
CG = 'CG'
CH = 'CH'
CI = 'CI'
CJ = 'CJ'
WBOTC = 'WBOTC'
CTCOR = 'CTCOR'
CPCOR = 'CPCOR'
PCALDATE = 'PCALDATE'
PA0 = 'PA0'
PA1 = 'PA1'
PA2 = 'PA2'
PTCA0 = 'PTCA0'
PTCA1 = 'PTCA1'
PTCA2 = 'PTCA2'
PTCB0 = 'PTCB0'
PTCB1 = 'PTCB1'
PTCB2 = 'PTCB2'
POFFSET = 'POFFSET'
RCALDATE = 'RCALDATE'
RTCA0 = 'RTCA0'
RTCA1 = 'RTCA1'
RTCA2 = 'RTCA2'
###############################################################################
# Seabird Electronics 37-SMP MicroCAT protocol.
###############################################################################
class SBE37Protocol(CommandResponseInstrumentProtocol):
"""
"""
def __init__(self, prompts, newline, evt_callback):
"""
"""
CommandResponseInstrumentProtocol.__init__(self, evt_callback, prompts, newline)
# Build protocol state machine.
self._fsm = InstrumentFSM(SBE37State, SBE37Event, SBE37Event.ENTER,
SBE37Event.EXIT, InstErrorCode.UNHANDLED_EVENT)
# Add handlers for all events.
self._fsm.add_handler(SBE37State.UNCONFIGURED, SBE37Event.ENTER, self._handler_unconfigured_enter)
self._fsm.add_handler(SBE37State.UNCONFIGURED, SBE37Event.EXIT, self._handler_unconfigured_exit)
self._fsm.add_handler(SBE37State.UNCONFIGURED, SBE37Event.INITIALIZE, self._handler_unconfigured_initialize)
self._fsm.add_handler(SBE37State.UNCONFIGURED, SBE37Event.CONFIGURE, self._handler_unconfigured_configure)
self._fsm.add_handler(SBE37State.DISCONNECTED, SBE37Event.ENTER, self._handler_disconnected_enter)
self._fsm.add_handler(SBE37State.DISCONNECTED, SBE37Event.EXIT, self._handler_disconnected_exit)
self._fsm.add_handler(SBE37State.DISCONNECTED, SBE37Event.INITIALIZE, self._handler_disconnected_initialize)
self._fsm.add_handler(SBE37State.DISCONNECTED, SBE37Event.CONFIGURE, self._handler_disconnected_configure)
self._fsm.add_handler(SBE37State.DISCONNECTED, SBE37Event.CONNECT, self._handler_disconnected_connect)
self._fsm.add_handler(SBE37State.COMMAND, SBE37Event.ENTER, self._handler_command_enter)
self._fsm.add_handler(SBE37State.COMMAND, SBE37Event.EXIT, self._handler_command_exit)
self._fsm.add_handler(SBE37State.COMMAND, SBE37Event.DISCONNECT, self._handler_command_disconnect)
self._fsm.add_handler(SBE37State.COMMAND, SBE37Event.GET, self._handler_command_autosample_get)
self._fsm.add_handler(SBE37State.COMMAND, SBE37Event.SET, self._handler_command_set)
self._fsm.add_handler(SBE37State.COMMAND, SBE37Event.ACQUIRE_SAMPLE, self._handler_command_acquire_sample)
self._fsm.add_handler(SBE37State.COMMAND, SBE37Event.START_AUTOSAMPLE, self._handler_command_start_autosample)
self._fsm.add_handler(SBE37State.COMMAND, SBE37Event.TEST, self._handler_command_test)
self._fsm.add_handler(SBE37State.COMMAND, SBE37Event.UPDATE_PARAMS, self._handler_command_update_params)
self._fsm.add_handler(SBE37State.AUTOSAMPLE, SBE37Event.ENTER, self._handler_autosample_enter)
self._fsm.add_handler(SBE37State.AUTOSAMPLE, SBE37Event.EXIT, self._handler_autosample_exit)
self._fsm.add_handler(SBE37State.AUTOSAMPLE, SBE37Event.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample)
self._fsm.add_handler(SBE37State.AUTOSAMPLE, SBE37Event.GET, self._handler_command_autosample_get)
# Start state machine.
self._fsm.start(SBE37State.UNCONFIGURED)
# Add build command handlers.
self._add_build_handler('ds', self._build_simple_command)
self._add_build_handler('dc', self._build_simple_command)
self._add_build_handler('ts', self._build_simple_command)
self._add_build_handler('startnow', self._build_simple_command)
self._add_build_handler('stop', self._build_simple_command)
self._add_build_handler('set', self._build_set_command)
# Add parse response handlers.
self._add_response_handler('ds', self._parse_dsdc_response)
self._add_response_handler('dc', self._parse_dsdc_response)
self._add_response_handler('ts', self._parse_ts_response)
self._add_response_handler('set', self._parse_set_response)
# Add sample handlers.
self._sample_pattern = r'^#? *(-?\d+\.\d+), *(-?\d+\.\d+), *(-?\d+\.\d+)'
self._sample_pattern += r'(, *(-?\d+\.\d+))?(, *(-?\d+\.\d+))?'
self._sample_pattern += r'(, *(\d+) +([a-zA-Z]+) +(\d+), *(\d+):(\d+):(\d+))?'
self._sample_pattern += r'(, *(\d+)-(\d+)-(\d+), *(\d+):(\d+):(\d+))?'
self._sample_regex = re.compile(self._sample_pattern)
# Add parameter handlers to parameter dict.
self._add_param_dict(SBE37Parameter.OUTPUTSAL,
r'(do not )?output salinity with each sample',
lambda match : False if match.group(1) else True,
self._true_false_to_string)
self._add_param_dict(SBE37Parameter.OUTPUTSV,
r'(do not )?output sound velocity with each sample',
lambda match : False if match.group(1) else True,
self._true_false_to_string)
self._add_param_dict(SBE37Parameter.NAVG,
r'number of samples to average = (\d+)',
lambda match : int(match.group(1)),
self._int_to_string)
self._add_param_dict(SBE37Parameter.SAMPLENUM,
r'samplenumber = (\d+), free = \d+',
lambda match : int(match.group(1)),
self._int_to_string)
self._add_param_dict(SBE37Parameter.INTERVAL,
r'sample interval = (\d+) seconds',
lambda match : int(match.group(1)),
self._int_to_string)
self._add_param_dict(SBE37Parameter.STORETIME,
r'(do not )?store time with each sample',
lambda match : False if match.group(1) else True,
self._true_false_to_string)
self._add_param_dict(SBE37Parameter.TXREALTIME,
r'(do not )?transmit real-time data',
lambda match : False if match.group(1) else True,
self._true_false_to_string)
self._add_param_dict(SBE37Parameter.SYNCMODE,
r'serial sync mode (enabled|disabled)',
lambda match : False if (match.group(1)=='disabled') else True,
self._true_false_to_string)
self._add_param_dict(SBE37Parameter.SYNCWAIT,
r'wait time after serial sync sampling = (\d+) seconds',
lambda match : int(match.group(1)),
self._int_to_string)
self._add_param_dict(SBE37Parameter.TCALDATE,
r'temperature: +((\d+)-([a-zA-Z]+)-(\d+))',
lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),
self._date_to_string)
self._add_param_dict(SBE37Parameter.TA0,
r' +TA0 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.TA1,
r' +TA1 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.TA2,
r' +TA2 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.TA3,
r' +TA3 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.CCALDATE,
r'conductivity: +((\d+)-([a-zA-Z]+)-(\d+))',
lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),
self._date_to_string)
self._add_param_dict(SBE37Parameter.CG,
r' +G = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.CH,
r' +H = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.CI,
r' +I = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.CJ,
r' +J = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.WBOTC,
r' +WBOTC = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.CTCOR,
r' +CTCOR = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.CPCOR,
r' +CPCOR = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.PCALDATE,
r'pressure .+ ((\d+)-([a-zA-Z]+)-(\d+))',
lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),
self._date_to_string)
self._add_param_dict(SBE37Parameter.PA0,
r' +PA0 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.PA1,
r' +PA1 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.PA2,
r' +PA2 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.PTCA0,
r' +PTCA0 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.PTCA1,
r' +PTCA1 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.PTCA2,
r' +PTCA2 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.PTCB0,
r' +PTCSB0 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.PTCB1,
r' +PTCSB1 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.PTCB2,
r' +PTCSB2 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.POFFSET,
r' +POFFSET = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.RCALDATE,
r'rtc: +((\d+)-([a-zA-Z]+)-(\d+))',
lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),
self._date_to_string)
self._add_param_dict(SBE37Parameter.RTCA0,
r' +RTCA0 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.RTCA1,
r' +RTCA1 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
self._add_param_dict(SBE37Parameter.RTCA2,
r' +RTCA2 = (-?\d.\d\d\d\d\d\de[-+]\d\d)',
lambda match : float(match.group(1)),
self._float_to_string)
########################################################################
# Protocol connection interface.
########################################################################
def initialize(self, *args, **kwargs):
"""
"""
# Construct state machine params and fire event.
return self._fsm.on_event(SBE37Event.INITIALIZE, *args, **kwargs)
def configure(self, *args, **kwargs):
"""
"""
# Construct state machine params and fire event.
return self._fsm.on_event(SBE37Event.CONFIGURE, *args, **kwargs)
def connect(self, *args, **kwargs):
"""
"""
# Construct state machine params and fire event.
return self._fsm.on_event(SBE37Event.CONNECT, *args, **kwargs)
def disconnect(self, *args, **kwargs):
"""
"""
# Construct state machine params and fire event.
return self._fsm.on_event(SBE37Event.DISCONNECT, *args, **kwargs)
def detach(self, *args, **kwargs):
"""
"""
# Construct state machine params and fire event.
return self._fsm.on_event(SBE37Event.DETACH, *args, **kwargs)
########################################################################
# Protocol command interface.
########################################################################
def get(self, *args, **kwargs):
"""
"""
return self._fsm.on_event(SBE37Event.GET, *args, **kwargs)
def set(self, *args, **kwargs):
"""
"""
return self._fsm.on_event(SBE37Event.SET, *args, **kwargs)
def execute_direct(self, *args, **kwargs):
"""
"""
return self._fsm.on_event(SBE37Event.EXECUTE, *args, **kwargs)
def execute_acquire_sample(self, *args, **kwargs):
"""
"""
return self._fsm.on_event(SBE37Event.ACQUIRE_SAMPLE, *args, **kwargs)
def execute_start_autosample(self, *args, **kwargs):
"""
"""
return self._fsm.on_event(SBE37Event.START_AUTOSAMPLE, *args, **kwargs)
def execute_stop_autosample(self, *args, **kwargs):
"""
"""
return self._fsm.on_event(SBE37Event.STOP_AUTOSAMPLE, *args, **kwargs)
def execute_test(self, *args, **kwargs):
"""
"""
return self._fsm.on_event(SBE37Event.TEST, *args, **kwargs)
def update_params(self, *args, **kwargs):
"""
"""
return self._fsm.on_event(SBE37Event.UPDATE_PARAMS, *args, **kwargs)
########################################################################
# Protocol query interface.
########################################################################
def get_resource_commands(self):
"""
"""
return [cmd for cmd in dir(self) if cmd.startswith('execute_')]
def get_resource_params(self):
"""
"""
return self._get_param_dict_names()
def get_current_state(self):
"""
"""
return self._fsm.get_current_state()
########################################################################
# State handlers
########################################################################
########################################################################
# SBE37State.UNCONFIGURED
########################################################################
def _handler_unconfigured_enter(self, *args, **kwargs):
"""
"""
mi_logger.info('channel %s entered state %s', SBE37Channel.CTD,
SBE37State.UNCONFIGURED)
self._publish_state_change(SBE37State.UNCONFIGURED)
# Initialize throws no exceptions.
InstrumentProtocol.initialize(self, *args, **kwargs)
def _handler_unconfigured_exit(self, *args, **kwargs):
"""
"""
pass
def _handler_unconfigured_initialize(self, *args, **kwargs):
"""
"""
next_state = None
result = None
# Reenter initialize.
next_state = SBE37State.UNCONFIGURED
return (next_state, result)
def _handler_unconfigured_configure(self, *args, **kwargs):
"""
"""
next_state = None
result = None
try:
InstrumentProtocol.configure(self, *args, **kwargs)
except (TypeError, KeyError, InstrumentConnectionException, IndexError):
result = InstErrorCode.INVALID_PARAMETER
next_state = None
# Everything worked, set next state.
else:
next_state = SBE37State.DISCONNECTED
return (next_state, result)
########################################################################
# SBE37State.DISCONNECTED
########################################################################
def _handler_disconnected_enter(self, *args, **kwargs):
"""
"""
mi_logger.info('channel %s entered state %s',SBE37Channel.CTD,
SBE37State.DISCONNECTED)
self._publish_state_change(SBE37State.DISCONNECTED)
def _handler_disconnected_exit(self, *args, **kwargs):
"""
"""
pass
def _handler_disconnected_initialize(self, *args, **kwargs):
"""
"""
next_state = None
result = None
# Switch to unconfigured to initialize comms.
next_state = SBE37State.UNCONFIGURED
return (next_state, result)
def _handler_disconnected_configure(self, *args, **kwargs):
"""
"""
next_state = None
result = None
try:
InstrumentProtocol.configure(self, *args, **kwargs)
except (TypeError, KeyError, InstrumentConnectionException, IndexError):
result = InstErrorCode.INVALID_PARAMETER
next_state = SBE37State.UNCONFIGURED
return (next_state, result)
def _handler_disconnected_connect(self, *args, **kwargs):
"""
@throw InstrumentTimeoutException on timeout
"""
next_state = None
result = None
try:
InstrumentProtocol.connect(self, *args, **kwargs)
timeout = kwargs.get('timeout', 10)
prompt = self._wakeup(timeout)
if prompt == SBE37Prompt.COMMAND:
next_state = SBE37State.COMMAND
elif prompt == SBE37Prompt.AUTOSAMPLE:
next_state = SBE37State.AUTOSAMPLE
except InstrumentConnectionException:
# Connection failed, fail and stay here.
next_state = None
result = InstErrorCode.DRIVER_CONNECT_FAILED
except InstrumentTimeoutException:
# Timeout connecting or waking device. Stay disconnected.
InstrumentProtocol.disconnect(self, *args, **kwargs)
next_state = None
result = InstErrorCode.DRIVER_CONNECT_FAILED
return (next_state, result)
########################################################################
# SBE37State.COMMAND
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
"""
mi_logger.info('channel %s entered state %s',SBE37Channel.CTD,
SBE37State.COMMAND)
self._publish_state_change(SBE37State.COMMAND)
self._update_params(*args, **kwargs)
def _handler_command_exit(self, *args, **kwargs):
"""
"""
pass
def _handler_command_disconnect(self, *args, **kwargs):
"""
"""
next_state = None
result = None
try:
mi_logger.info('DISCONNECTING')
InstrumentProtocol.disconnect(self, *args, **kwargs)
mi_logger.info('DONE DISCONNECTING')
next_state = SBE37State.DISCONNECTED
except InstrumentConnectionException:
# Disconnect failed. Fail and stay here.
next_state = None
result = InstErrorCode.DISCONNECT_FAILED
else:
next_state = SBE37State.DISCONNECTED
result = InstErrorCode.OK
return (next_state, result)
def _handler_command_set(self, *args, **kwargs):
"""
"""
next_state = None
result = None
try:
result = self._do_cmd_resp('set', *args, **kwargs)
next_state = None
except InstrumentTimeoutException:
next_state = None
result = InstErrorCode.TIMEOUT
except IndexError:
next_state = None
result = InstErrorCode.REQUIRED_PARAMETER
return (next_state, result)
def _handler_command_acquire_sample(self, *args, **kwargs):
"""
"""
next_state = None
result = None
try:
result = self._do_cmd_resp('ts', *args, **kwargs)
except InstrumentTimeoutException:
result = InstErrorCode.TIMEOUT
return (next_state, result)
def _handler_command_start_autosample(self, *args, **kwargs):
"""
"""
next_state = None
result = None
try:
self._do_cmd_no_resp('startnow', *args, **kwargs)
next_state = SBE37State.AUTOSAMPLE
except InstrumentTimeoutException:
result = InstErrorCode.TIMEOUT
return (next_state, result)
def _handler_command_test(self, *args, **kwargs):
"""
"""
next_state = None
result = None
return (next_state, result)
def _handler_command_update_params(self, *args, **kwargs):
"""
"""
next_state = None
result = None
try:
self._update_params(*args, **kwargs)
except InstrumentTimeoutError:
result = InstErrorCode.TIMEOUT
return (next_state, result)
########################################################################
# SBE37State.AUTOSAMPLE
########################################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
"""
mi_logger.info('channel %s entered state %s',SBE37Channel.CTD,
SBE37State.AUTOSAMPLE)
self._publish_state_change(SBE37State.AUTOSAMPLE)
def _handler_autosample_exit(self, *args, **kwargs):
"""
"""
pass
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
@throw InstrumentProtocolException on invalid command
"""
next_state = None
result = None
try:
prompt = None
timeout = kwargs.get('timeout', 10)
while prompt != SBE37Prompt.AUTOSAMPLE:
prompt = self._wakeup(timeout)
self._do_cmd_resp('stop', *args, **kwargs)
prompt = None
while prompt != SBE37Prompt.COMMAND:
prompt = self._wakeup(timeout)
next_state = SBE37State.COMMAND
except InstrumentTimeoutException:
result = InstErrorCode.TIMEOUT
return (next_state, result)
########################################################################
# SBE37State.COMMAND and SBE37State.AUTOSAMPLE common handlers.
########################################################################
def _handler_command_autosample_get(self, *args, **kwargs):
"""
"""
next_state = None
result = None
try:
parameter = args[0]
except IndexError:
result = InstErrorCode.REQUIRED_PARAMETER
else:
try:
result = self._get_param_dict(parameter)
except KeyError:
result = InstErrorCode.INVALID_PARAMETER
return (next_state, result)
########################################################################
# Private helpers
########################################################################
def _got_data(self, data):
"""
"""
CommandResponseInstrumentProtocol._got_data(self, data)
# Only keep the latest characters in the prompt buffer.
if len(self._promptbuf)>7:
self._promptbuf = self._promptbuf[-7:]
# If we are streaming, process the line buffer for samples.
if self._fsm.get_current_state() == SBE37State.AUTOSAMPLE:
self._process_streaming_data()
def _process_streaming_data(self):
"""
"""
if self.eoln in self._linebuf:
lines = self._linebuf.split(SBE37_NEWLINE)
self._linebuf = lines[-1]
for line in lines:
sample = self._extract_sample(line, True)
def _send_wakeup(self):
"""
"""
self._logger_client.send(SBE37_NEWLINE)
def _update_params(self, *args, **kwargs):
"""
"""
timeout = kwargs.get('timeout', 10)
old_config = self._get_config_param_dict()
self._do_cmd_resp('ds',timeout=timeout)
self._do_cmd_resp('dc',timeout=timeout)
new_config = self._get_config_param_dict()
if new_config != old_config:
if self.send_event:
event = {
'type' : 'config_change',
'value' : new_config
}
self.send_event(event)
def _build_simple_command(self, cmd):
"""
"""
return cmd+SBE37_NEWLINE
def _build_set_command(self, cmd, param, val):
"""
"""
str_val = self._format_param_dict(param, val)
set_cmd = '%s=%s' % (param, str_val)
set_cmd = set_cmd + SBE37_NEWLINE
return set_cmd
def _parse_dsdc_response(self, response, prompt):
"""
"""
for line in response.split(SBE37_NEWLINE):
self._update_param_dict(line)
def _parse_ts_response(self, response, prompt):
"""
"""
sample = None
for line in response.split(SBE37_NEWLINE):
sample = self._extract_sample(line, True)
if sample: break
return sample
def _extract_sample(self, line, publish=True):
"""
"""
sample = None
match = self._sample_regex.match(line)
if match:
sample = {}
sample['t'] = [float(match.group(1))]
sample['c'] = [float(match.group(2))]
sample['p'] = [float(match.group(3))]
# Extract sound velocity and salinity if present.
#if match.group(5) and match.group(7):
# sample['salinity'] = float(match.group(5))
# sample['sound_velocity'] = float(match.group(7))
#elif match.group(5):
# if self._get_param_dict(SBE37Parameter.OUTPUTSAL):
# sample['salinity'] = float(match.group(5))
# elif self._get_param_dict(SBE37Parameter.OUTPUTSV):
# sample['sound_velocity'] = match.group(5)
# Extract date and time if present.
# sample_time = None
#if match.group(8):
# sample_time = time.strptime(match.group(8),', %d %b %Y, %H:%M:%S')
#
#elif match.group(15):
# sample_time = time.strptime(match.group(15),', %m-%d-%Y, %H:%M:%S')
#
#if sample_time:
# sample['time'] = \
# '%4i-%02i-%02iT:%02i:%02i:%02i' % sample_time[:6]
# Add UTC time from driver in iso 8601 format.
#sample['driver_time'] = datetime.datetime.utcnow().isoformat()
# Driver timestamp.
sample['time'] = [time.time()]
if publish and self.send_event:
event = {
'type':'sample',
'name':'ctd_parsed',
'value':sample
}
self.send_event(event)
return sample
def _parse_set_response(self, response, prompt):
"""
"""
if prompt == SBE37Prompt.COMMAND:
return InstErrorCode.OK
else:
return InstErrorCode.BAD_DRIVER_COMMAND
def _publish_state_change(self, state):
"""
"""
if self.send_event:
event = {
'type': 'state_change',
'value': state
}
self.send_event(event)
########################################################################
# Static helpers to format set commands.
########################################################################
@staticmethod
def _true_false_to_string(v):
"""
Write a boolean value to string formatted for sbe37 set operations.
@param v a boolean value.
@retval A yes/no string formatted for sbe37 set operations, or
None if the input is not a valid bool.
"""
if not isinstance(v,bool):
return None
if v:
return 'y'
else:
return 'n'
@staticmethod
def _int_to_string(v):
"""
Write an int value to string formatted for sbe37 set operations.
@param v An int val.
@retval an int string formatted for sbe37 set operations, or None if
the input is not a valid int value.
"""
if not isinstance(v,int):
return None
else:
return '%i' % v
@staticmethod
def _float_to_string(v):
"""
Write a float value to string formatted for sbe37 set operations.
@param v A float val.
@retval a float string formatted for sbe37 set operations, or None if
the input is not a valid float value.
"""
if not isinstance(v,float):
return None
else:
return '%e' % v
@staticmethod
def _date_to_string(v):
"""
Write a date tuple to string formatted for sbe37 set operations.
@param v a date tuple: (day,month,year).
@retval A date string formatted for sbe37 set operations,
or None if the input is not a valid date tuple.
"""
if not isinstance(v,(list,tuple)):
return None
if not len(v)==3:
return None
months = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep',
'Oct','Nov','Dec']
day = v[0]
month = v[1]
year = v[2]
if len(str(year)) > 2:
year = int(str(year)[-2:])
if not isinstance(day,int) or day < 1 or day > 31:
return None
if not isinstance(month,int) or month < 1 or month > 12:
return None
if not isinstance(year,int) or year < 0 or year > 99:
return None
return '%02i-%s-%02i' % (day,months[month-1],year)
@staticmethod
def _string_to_date(datestr,fmt):
"""
Extract a date tuple from an sbe37 date string.
@param str a string containing date information in sbe37 format.
@retval a date tuple, or None if the input string is not valid.
"""
if not isinstance(datestr,str):
return None
try:
date_time = time.strptime(datestr,fmt)
date = (date_time[2],date_time[1],date_time[0])
except ValueError:
return None
return date
###############################################################################
# Seabird Electronics 37-SMP MicroCAT driver.
###############################################################################
class SBE37Driver(InstrumentDriver):
"""
class docstring
"""
def __init__(self, evt_callback):
"""
method docstring
"""
InstrumentDriver.__init__(self, evt_callback)
# Build the protocol for CTD channel.
protocol = SBE37Protocol(SBE37Prompt, SBE37_NEWLINE, evt_callback)
self._channels = {SBE37Channel.CTD:protocol}
########################################################################
# Channel connection interface.
########################################################################
def initialize(self, channels = [SBE37Channel.CTD], *args, **kwargs):
"""
"""
try:
(result, valid_channels) = self._check_channel_args(channels)
for channel in valid_channels:
result[channel] = self._channels[channel].initialize(*args, **kwargs)
except RequiredParameterException:
result = InstErrorCode.REQUIRED_PARAMETER
return result
def configure(self, configs, *args, **kwargs):
"""
"""
try:
channels = configs.keys()
(result, valid_channels) = self._check_channel_args(channels)
for channel in valid_channels:
config = configs[channel]
result[channel] = self._channels[channel].configure(config, *args, **kwargs)
except (RequiredParameterException, TypeError):
result = InstErrorCode.REQUIRED_PARAMETER
return result
def connect(self, channels = [SBE37Channel.CTD], *args, **kwargs):
"""
"""
try:
(result, valid_channels) = self._check_channel_args(channels)
for channel in valid_channels:
result[channel] = self._channels[channel].connect(*args, **kwargs)
except RequiredParameterException:
result = InstErrorCode.REQUIRED_PARAMETER
return result
def disconnect(self, channels = [SBE37Channel.CTD], *args, **kwargs):
"""
"""
try:
(result, valid_channels) = self._check_channel_args(channels)
for channel in valid_channels:
result[channel] = self._channels[channel].disconnect(*args, **kwargs)
except RequiredParameterException:
result = InstErrorCode.REQUIRED_PARAMETER
return result
def detach(self, channels=[SBE37Channel.CTD], *args, **kwargs):
"""
"""
try:
(result, valid_channels) = self._check_channel_args(channels)
for channel in valid_channels:
result[channel] = self._channels[channel].detach(*args, **kwargs)
except RequiredParameterException:
result = InstErrorCode.REQUIRED_PARAMETER
return result
########################################################################
# Channel command interface.
########################################################################
def get(self, params, *args, **kwargs):
"""
"""
try:
(result, params) = self._check_get_args(params)
for (channel, parameter) in params:
success = InstErrorCode.OK
result[(channel, parameter)] = self._channels[channel].get(parameter, *args, **kwargs)
except RequiredParameterException:
result = InstErrorCode.REQUIRED_PARAMETER
# Return overall success and individual results.
return result
def set(self, params, *args, **kwargs):
"""
"""
try:
(result, params) = self._check_set_args(params)
updated_channels = []
# Process each parameter-value pair.
for (key, val) in params.iteritems():
channel = key[0]
parameter = key[1]
result[key] = self._channels[channel].set(parameter, val, *args, **kwargs)
if channel not in updated_channels:
updated_channels.append(channel)
for channel in updated_channels:
self._channels[channel].update_params(*args, **kwargs)
except RequiredParameterException:
result = InstErrorCode.REQUIRED_PARAMETER
# Additional checking can go here.
# Return overall success and individual results.
return result
def execute_direct(self, channels=[SBE37Channel.CTD], *args, **kwargs):
"""
"""
pass
def execute_acquire_sample(self, channels=[SBE37Channel.CTD], *args, **kwargs):
"""
"""
try:
(result, valid_channels) = self._check_channel_args(channels)
for channel in valid_channels:
result[channel] = self._channels[SBE37Channel.CTD].\
execute_acquire_sample(*args, **kwargs)
except RequiredParameterException:
result = InstErrorCode.REQUIRED_PARAMETER
return result
def start_autosample(self, channels=[SBE37Channel.CTD], *args, **kwargs):
"""
"""
try:
(result, valid_channels) = self._check_channel_args(channels)
for channel in valid_channels:
result[channel] = self._channels[SBE37Channel.CTD].\
execute_start_autosample(*args, **kwargs)
except RequiredParameterException:
result = InstErrorCode.REQUIRED_PARAMETER
return result
def stop_autosample(self, channels=[SBE37Channel.CTD], *args, **kwargs):
"""
"""
try:
(result, valid_channels) = self._check_channel_args(channels)
for channel in valid_channels:
result[channel] = self._channels[SBE37Channel.CTD].\
execute_stop_autosample(*args, **kwargs)
except RequiredParameterException:
result = InstErrorCode.REQUIRED_PARAMETER
return result
def execute_test(self, channels=[SBE37Channel.CTD], *args, **kwargs):
"""
"""
try:
(result, valid_channels) = self._check_channel_args(channels)
for channel in valid_channels:
result[channel] = self._channels[SBE37Channel.CTD].\
execute_test(*args, **kwargs)
except RequiredParameterException:
result = InstErrorCode.REQUIRED_PARAMETER
return result
########################################################################
# TBD.
########################################################################
def get_resource_commands(self):
"""
"""
result = []
cmds = self._channels[SBE37Channel.CTD].get_resource_commands()
if cmds:
result = [(SBE37Channel.CTD, cmd) for cmd in cmds]
return result
def get_resource_params(self):
"""
"""
result = []
params = self._channels[SBE37Channel.CTD].get_resource_params()
if params:
result = [(SBE37Channel.CTD, param) for param in params]
return result
def get_channels(self):
"""
"""
return SBE37Channels.list()
def get_active_channels(self):
"""
"""
state = self.get_current_state()[SBE37Channel.CTD]
if state in [SBE37State.COMMAND, SBE37State.AUTOSAMPLE]:
result = [SBE37Channel.CTD]
else:
result = []
return result
def get_current_state(self, channels=[SBE37Channel.CTD]):
"""
"""
try:
(result, valid_channels) = self._check_channel_args(channels)
for channel in valid_channels:
result[channel] = self._channels[channel].get_current_state()
except RequiredParameterException:
result = InstErrorCode.REQUIRED_PARAMETER
return result
########################################################################
# Private helpers.
########################################################################
@staticmethod
def _check_channel_args(channels):
"""
"""
valid_channels = []
result = {}
if channels == None or not isinstance(channels, (list, tuple)):
raise RequiredParameterException()
elif len(channels) == 0:
raise RequiredParameterException()
else:
clist = SBE37Channel.list()
if SBE37Channel.ALL in clist:
clist.remove(SBE37Channel.ALL)
# Expand "ALL channel keys.
if SBE37Channel.ALL in channels:
channels += clist
channels = [c for c in channels if c != SBE37Channel.ALL]
# Make unique.
channels = list(set(channels))
# Separate valid and invalid channels.
valid_channels = [c for c in channels if c in clist]
invalid_channels = [c for c in channels if c not in clist]
# Build result dict with invalid entries.
for c in invalid_channels:
result[c] = InstErrorCode.INVALID_CHANNEL
return (result, valid_channels)
@staticmethod
def _check_get_args(params):
"""
"""
valid_params = []
result = {}
if params == None or not isinstance(params, (list, tuple)):
raise RequiredParameterException()
elif len(params) == 0:
raise RequiredParameterException()
else:
temp_list = []
plist = SBE37Parameter.list()
if SBE37Parameter.ALL in plist:
plist.remove(SBE37Parameter.ALL)
clist = SBE37Channel.list()
if SBE37Channel.ALL in clist:
clist.remove(SBE37Channel.ALL)
# Expand and remove "ALL" channel specifiers.
params += [(c, parameter) for (channel, parameter) in params
if channel == SBE37Channel.ALL for c in clist]
params = [(c, p) for (c, p) in params if c != SBE37Channel.ALL]
# Expand and remove "ALL" parameter specifiers.
params += [(channel, p) for (channel, parameter) in params
if parameter == SBE37Parameter.ALL for p in plist]
params = [(c, p) for (c, p) in params if p != SBE37Parameter.ALL]
# Make list unique.
params = list(set(params))
# Separate valid and invalid params.
invalid_params = [(c, p) for (c, p) in params if c in clist and p not in plist]
invalid_channels = [(c, p) for (c, p) in params if c not in clist]
valid_params = [(c, p) for (c, p) in params if c in clist and p in plist]
# Build result
for (c, p) in invalid_params:
result[(c, p)] = InstErrorCode.INVALID_PARAMETER
for (c, p) in invalid_channels:
result[(c, p)] = InstErrorCode.INVALID_CHANNEL
return (result, valid_params)
@staticmethod
def _check_set_args(params):
"""
"""
valid_params = {}
result = {}
if params == None or not isinstance(params, dict):
raise RequiredParameterException()
elif len(params) == 0:
raise RequiredParameterException()
else:
plist = SBE37Parameter.list()
if SBE37Parameter.ALL in plist:
plist.remove(SBE37Parameter.ALL)
clist = SBE37Channel.list()
if SBE37Channel.ALL in clist:
clist.remove(SBE37Channel.ALL)
# Expand and remove "ALL" channel specifiers.
for (key, val) in params.iteritems():
if key[0] == SBE37Channel.ALL:
for c in clist: params[(c, key[1])] = val
params.pop(key)
# Remove invalid parameters.
temp_params = params.copy()
for (key, val) in temp_params.iteritems():
if key[0] not in clist:
result[key] = InstErrorCode.INVALID_CHANNEL
params.pop(key)
elif key[1] not in plist:
result[key] = InstErrorCode.INVALID_PARAMETER
params.pop(key)
return (result, params)
########################################################################
# Misc and temp.
########################################################################
def driver_echo(self, msg):
"""
"""
echo = 'driver_echo: %s' % msg
return echo
| null |
ion/services/mi/drivers/sbe37_driver.py
|
sbe37_driver.py
|
py
| 50,667 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.common.BaseEnum",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverState.UNCONFIGURED",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverState",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverState.DISCONNECTED",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverState",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverState.COMMAND",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverState",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverState.AUTOSAMPLE",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverState",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.BaseEnum",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.ENTER",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.EXIT",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.CONFIGURE",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.INITIALIZE",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.CONNECT",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.DISCONNECT",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.DETACH",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.ACQUIRE_SAMPLE",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.START_AUTOSAMPLE",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.STOP_AUTOSAMPLE",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.TEST",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.GET",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.SET",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent.UPDATE_PARAMS",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverEvent",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.BaseEnum",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverChannel.CTD",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverChannel",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverChannel.ALL",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverChannel",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverCommand",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.BaseEnum",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.DriverParameter",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_protocol.CommandResponseInstrumentProtocol",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_protocol.CommandResponseInstrumentProtocol.__init__",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.instrument_protocol.CommandResponseInstrumentProtocol",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_fsm_args.InstrumentFSM",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.UNHANDLED_EVENT",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.instrument_protocol.InstrumentProtocol.initialize",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.instrument_protocol.InstrumentProtocol",
"line_number": 464,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_protocol.InstrumentProtocol.configure",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.instrument_protocol.InstrumentProtocol",
"line_number": 489,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.InstrumentConnectionException",
"line_number": 491,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.INVALID_PARAMETER",
"line_number": 492,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 492,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_protocol.InstrumentProtocol.configure",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.instrument_protocol.InstrumentProtocol",
"line_number": 535,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.InstrumentConnectionException",
"line_number": 537,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.INVALID_PARAMETER",
"line_number": 538,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 538,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_protocol.InstrumentProtocol.connect",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.instrument_protocol.InstrumentProtocol",
"line_number": 551,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.InstrumentConnectionException",
"line_number": 560,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.DRIVER_CONNECT_FAILED",
"line_number": 563,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 563,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.InstrumentTimeoutException",
"line_number": 565,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_protocol.InstrumentProtocol.disconnect",
"line_number": 567,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.instrument_protocol.InstrumentProtocol",
"line_number": 567,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.DRIVER_CONNECT_FAILED",
"line_number": 569,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 569,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_protocol.InstrumentProtocol.disconnect",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.instrument_protocol.InstrumentProtocol",
"line_number": 598,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.InstrumentConnectionException",
"line_number": 602,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.DISCONNECT_FAILED",
"line_number": 605,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 605,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.OK",
"line_number": 609,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 609,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.InstrumentTimeoutException",
"line_number": 623,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.TIMEOUT",
"line_number": 625,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 625,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 629,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 629,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.InstrumentTimeoutException",
"line_number": 642,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.TIMEOUT",
"line_number": 643,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 643,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.InstrumentTimeoutException",
"line_number": 657,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.TIMEOUT",
"line_number": 658,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 658,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.TIMEOUT",
"line_number": 680,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 680,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.InstrumentTimeoutException",
"line_number": 718,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.TIMEOUT",
"line_number": 719,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 719,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 737,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 737,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.INVALID_PARAMETER",
"line_number": 744,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 744,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_protocol.CommandResponseInstrumentProtocol._got_data",
"line_number": 755,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.instrument_protocol.CommandResponseInstrumentProtocol",
"line_number": 755,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 861,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.OK",
"line_number": 879,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 879,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.BAD_DRIVER_COMMAND",
"line_number": 881,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 881,
"usage_type": "name"
},
{
"api_name": "time.strptime",
"line_number": 987,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.instrument_driver.InstrumentDriver",
"line_number": 999,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.instrument_driver.InstrumentDriver.__init__",
"line_number": 1008,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.instrument_driver.InstrumentDriver",
"line_number": 1008,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1027,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 1028,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1028,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1043,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 1044,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1044,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1057,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 1058,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1058,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1071,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 1072,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1072,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1085,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 1086,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1086,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.OK",
"line_number": 1101,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1101,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1104,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 1105,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1105,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1129,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 1130,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1130,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1151,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 1152,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1152,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1166,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 1167,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1167,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1181,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 1182,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1182,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1196,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 1197,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1197,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1249,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.REQUIRED_PARAMETER",
"line_number": 1250,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1250,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1266,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1269,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.INVALID_CHANNEL",
"line_number": 1290,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1290,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1302,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1305,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.INVALID_PARAMETER",
"line_number": 1337,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1337,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.INVALID_CHANNEL",
"line_number": 1339,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1339,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1351,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.exceptions.RequiredParameterException",
"line_number": 1354,
"usage_type": "call"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.INVALID_CHANNEL",
"line_number": 1375,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1375,
"usage_type": "name"
},
{
"api_name": "ion.services.mi.common.InstErrorCode.INVALID_PARAMETER",
"line_number": 1379,
"usage_type": "attribute"
},
{
"api_name": "ion.services.mi.common.InstErrorCode",
"line_number": 1379,
"usage_type": "name"
}
] |
95062391
|
import sys
import argparse
import os.path
from api import get_course, split_url, find_module
def parse_args(args):
# help text and argument parser
# solution based on https://stackoverflow.com/a/24181138/462692
desc = '\n'.join(["Adds an exitsing page on Canvas to an existing module in the same course.",
"An optional argument -c/--config_file can be used with the path to the config file. "
"Otherwise the default config file '~/.config/canvasapi.conf' will be used.\n"
])
parser = argparse.ArgumentParser(description=desc)
required_named = parser.add_argument_group('required named arguments')
required_named.add_argument("-u", "--url", help="The full url of the page \
on Canvas that will be added to the module.", required = True)
required_named.add_argument("-m", "--module_name", help="The name of the \
module that will be updated, enclosed in quotation marks if it \
contains one or more spaces", required = True)
parser.add_argument("-cf", "--config_file", help="Path to config file", \
default = '~/.config/canvasapi.conf')
args = parser.parse_args(args)
return args
def main(args):
args = parse_args(args)
# extract course information from url and get course
API_URL, course_id, page_name = split_url(args.url, expected = 'page')
course = get_course(API_URL, course_id, args.config_file)
# check whether page to add actually exists
try:
page_to_add = course.get_page(page_name)
except:
sys.exit("Error: could not find page '%s' on Canvas.\nFull url: %s" % (page_name, args.url))
# find the module
module = find_module(course, args.module_name)
if not module:
sys.exit("Could not find module '%s' on Canvas" % args.module_name)
# update the module
try:
new_module_item = module.create_module_item(module_item = {
"type":"Page",
"content_id":"",
"page_url": page_to_add.url
})
print("Sucessfully added page '%s' to module '%s'." %(page_name, args.module_name))
except Exception as e:
sys.exit("Could not add page '%s' to module '%s':\n%s." %(page_name, args.module_name, str(e)))
if __name__ == "__main__":
main(sys.argv[1:])
| null |
add_to_module.py
|
add_to_module.py
|
py
| 2,331 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "api.split_url",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "api.get_course",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "api.find_module",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 55,
"usage_type": "attribute"
}
] |
517257352
|
import pandas as pd
import dateutil.parser
import json
import math
import os.path
import shlex
import subprocess
import tempfile
def execute_command(
command):
result = subprocess.call(shlex.split(command))
if result != 0:
raise RuntimeError("Error executing {}".format(command))
def lue_translate():
return os.path.expandvars("$LUE_OBJECTS/bin/lue_translate")
def import_lue_json(
lue_json_pathname,
lue_dataset_pathname):
command = "{} import --add {} {}".format(
lue_translate(),
lue_dataset_pathname,
lue_json_pathname)
execute_command(command)
def create_dot_graph(
lue_dataset_pathname,
pdf_graph_pathname):
"""
Create a dot graph of the LUE file containing the experiment results
"""
dot_properties_pathname = os.path.expandvars(
"$LUE/document/lue_translate/dot_properties.json")
with tempfile.NamedTemporaryFile(suffix=".dot") as dot_graph_file:
commands = []
commands.append(
"{} export --meta {} {} {}".format(
lue_translate(),
dot_properties_pathname,
lue_dataset_pathname,
dot_graph_file.name))
commands.append(
"dot -Tpdf -o {} {}".format(
pdf_graph_pathname,
dot_graph_file.name))
for command in commands:
execute_command(command)
# millnames = ['',' thousand',' Million',' Billion',' Trillion']
#
# def millify(n):
# n = float(n)
# millidx = max(0,min(len(millnames)-1,
# int(math.floor(0 if n == 0 else math.log10(abs(n))/3))))
#
# return '{:.0f}{}'.format(n / 10**(3 * millidx), millnames[millidx])
def format_duration(
duration):
# TODO Pass in units and be smarter
return duration
# return "{:,}".format(int(duration))
def format_nr_workers(
size):
nr_workers = math.floor(size)
return "{:,}".format(int(nr_workers)) if nr_workers == size else ""
def format_partition_size(
size):
partition_size = math.floor(size)
return "{:,}".format(int(partition_size)) if partition_size == size else ""
def select_data_for_plot(
data_frame,
name,
count):
# Select data needed for plotting
result = data_frame.filter(
items=
["nr_workers"] +
["{}_{}".format(name, i) for i in range(count)])
# Durations per nr workers
result = result.set_index(keys="nr_workers")
result = pd.DataFrame(
data=result.stack(),
columns=[name])
# Get rid of introduced level of index
result.index = result.index.droplevel(1)
# Create a new index, moving nr_workers index level into columns
result = result.reset_index()
return result
def json_to_data(
pathname):
pathname = os.path.expandvars(pathname)
assert os.path.isfile(pathname), pathname
lines = open(pathname).readlines()
lines = "".join(
[line for line in lines if not line.strip().startswith("#")])
return json.loads(lines)
def sort_benchmarks_by_time(
cluster,
benchmark,
experiment):
items = []
for benchmark_idx in range(benchmark.worker.nr_benchmarks):
nr_workers = benchmark.worker.nr_workers(benchmark_idx)
benchmark_pathname = experiment.benchmark_result_pathname(
cluster.name, benchmark.scenario_name, nr_workers, "json")
assert os.path.exists(benchmark_pathname), benchmark_pathname
benchmark_json = json_to_data(benchmark_pathname)
benchmark_start = dateutil.parser.isoparse(benchmark_json["start"])
items.append((benchmark_start, benchmark_idx))
assert len(items) > 0
items.sort(key=lambda item: item[0])
epoch = items[0][0]
idxs = [item[1] for item in items]
return idxs, epoch
def thread_binding(
nr_threads):
# Bind OS threads to the first processing unit of each core
return "thread:0-{}=core:0-{}.pu:0".format(
nr_threads-1,
nr_threads-1)
def performance_counter_name_to_property_name(
counter_name):
assert counter_name.find("|") == -1, counter_name
return counter_name.replace("/", "|")
def property_name_to_performance_counter_name(
property_name):
assert property_name.find("/") == -1, counter_name
return property_name.replace("|", "/")
| null |
benchmark/lue/benchmark/util.py
|
util.py
|
py
| 4,435 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "subprocess.call",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "shlex.split",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.path.expandvars",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "os.path.path.expandvars",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.path.path.expandvars",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "os.path.path.isfile",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path.path.exists",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "dateutil.parser.parser.isoparse",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "dateutil.parser.parser",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "dateutil.parser",
"line_number": 146,
"usage_type": "name"
}
] |
574030004
|
import os, webapp2, jinja2
from google.appengine.api import users, memcache
from dkc import jinja_functions
from dkc.models import User
import query
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
extensions=['jinja2.ext.autoescape'])
JINJA_ENVIRONMENT.filters.update({
'datetimeformat': jinja_functions.datetimeformat,
'getblobdata': jinja_functions.getBlobData,
'byteconvert': jinja_functions.byteConversion,
'split_string': jinja_functions.splitString,
'split_regex': jinja_functions.splitRegex,
'highlight_search': jinja_functions.search,
'getvars': jinja_functions.getVars
})
JINJA_ENVIRONMENT.tests.update({
'still_early': jinja_functions.getEarlyStatus
})
class AdminBaseHandler(webapp2.RequestHandler):
def user(self):
return users.get_current_user()
def render_template(self, template_filename, template_values={}):
user = users.get_current_user()
user_object = {"email": user.email(),
"nickname": user.nickname(),
"user_id": user.user_id(),
"logout_url": users.create_logout_url('/')
}
template_values['user'] = user_object
template = JINJA_ENVIRONMENT.get_template(template_filename)
self.response.out.write(template.render(template_values))
def display_message(self, message):
template_values = {
'message': message
}
self.render_template('message.html', template_values)
| null |
manage/__init__.py
|
__init__.py
|
py
| 1,594 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "jinja2.Environment",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "dkc.jinja_functions.datetimeformat",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "dkc.jinja_functions",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "dkc.jinja_functions.getBlobData",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "dkc.jinja_functions",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "dkc.jinja_functions.byteConversion",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "dkc.jinja_functions",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "dkc.jinja_functions.splitString",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "dkc.jinja_functions",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "dkc.jinja_functions.splitRegex",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "dkc.jinja_functions",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "dkc.jinja_functions.search",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "dkc.jinja_functions",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "dkc.jinja_functions.getVars",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "dkc.jinja_functions",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "dkc.jinja_functions.getEarlyStatus",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "dkc.jinja_functions",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.api.users.get_current_user",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.users",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "google.appengine.api.users.get_current_user",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.users",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "google.appengine.api.users.create_logout_url",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.users",
"line_number": 33,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.