id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
3207573 | <reponame>gatsby-sahani/svGPFA<filename>scripts/demoUtils.py
import sys
import os
import torch
sys.path.append(os.path.expanduser("../src"))
import myMath.utils
def getLegQuadPointsAndWeights(nQuad, trialsLengths, dtype=torch.double):
nTrials = len(trialsLengths)
legQuadPoints = torch.empty((nTrials, nQuad, 1), dtype=dtype)
legQuadWeights = torch.empty((nTrials, nQuad, 1), dtype=dtype)
for r in range(nTrials):
legQuadPoints[r,:,0], legQuadWeights[r,:,0] = myMath.utils.leggaussVarLimits(n=nQuad, a=0, b=trialsLengths[r])
return legQuadPoints, legQuadWeights
def getIndPointLocs0(nIndPointsPerLatent, trialsLengths, firstIndPoint):
nLatents = len(nIndPointsPerLatent)
nTrials = len(trialsLengths)
Z0 = [None]*nLatents
for i in range(nLatents):
Z0[i] = torch.empty((nTrials, nIndPointsPerLatent[i], 1), dtype=torch.double)
for i in range(nLatents):
for j in range(nTrials):
Z0[i][j,:,0] = torch.linspace(firstIndPoint, trialsLengths[j], nIndPointsPerLatent[i])
return Z0
def getSVPosteriorOnIndPointsParams0(nIndPointsPerLatent, nLatents, nTrials, scale):
qMu0 = [None]*nLatents
qSVec0 = [None]*nLatents
qSDiag0 = [None]*nLatents
for i in range(nLatents):
qMu0[i] = torch.zeros(nTrials, nIndPointsPerLatent[i], 1, dtype=torch.double)
qSVec0[i] = scale*torch.eye(nIndPointsPerLatent[i], 1, dtype=torch.double).repeat(nTrials, 1, 1)
qSDiag0[i] = scale*torch.ones(nIndPointsPerLatent[i], 1, dtype=torch.double).repeat(nTrials, 1, 1)
return qMu0, qSVec0, qSDiag0
def getKernelsParams0(kernels, noiseSTD):
nTrials = len(kernels)
nLatents = len(kernels[0])
kernelsParams0 = [[] for r in range(nTrials)]
for r in range(nTrials):
kernelsParams0[r] = [[] for r in range(nLatents)]
for k in range(nLatents):
trueParams = kernels[r][k].getParams()
kernelsParams0[r][k] = noiseSTD*torch.randn(len(trueParams))+trueParams
return kernelsParams0
| StarcoderdataPython |
3396000 | <reponame>robert-giaquinto/survae_flows
import math
import torch
from torch import nn
class Distribution(nn.Module):
"""Distribution base class."""
def log_prob(self, x):
"""Calculate log probability under the distribution.
Args:
x: Tensor, shape (batch_size, ...)
Returns:
log_prob: Tensor, shape (batch_size,)
"""
raise NotImplementedError()
def sample(self, num_samples, temperature):
"""Generates samples from the distribution.
Args:
num_samples: int, number of samples to generate.
Returns:
samples: Tensor, shape (num_samples, ...)
"""
raise NotImplementedError()
def interpolate(self, num_samples, z1=None, z2=None):
"""
Generates samples from the distribution, interpolating between z1 and z2.
If z1 and z2 are None then it interpolates between 0 and a point on the shell (tails).
Args:
num_samples: int, number of samples to interpolate along.
z1: first point to interpolate from.
z2: final point to interpolate to.
Returns:
samples: Tensor, shape (num_samples, ...)
"""
raise NotImplementedError()
def sample_with_log_prob(self, num_samples):
"""Generates samples from the distribution together with their log probability.
Args:
num_samples: int, number of samples to generate.
Returns:
samples: Tensor, shape (num_samples, ...)
log_prob: Tensor, shape (num_samples,)
"""
samples = self.sample(num_samples)
log_prob = self.log_prob(samples)
return samples, log_prob
def forward(self, *args, mode, **kwargs):
'''
To allow Distribution objects to be wrapped by DataParallelDistribution,
which parallelizes .forward() of replicas on subsets of data.
DataParallelDistribution.log_prob() calls DataParallel.forward().
DataParallel.forward() calls Distribution.forward() for different
data subsets on each device and returns the combined outputs.
'''
if mode == 'log_prob':
return self.log_prob(*args, **kwargs)
else:
raise RuntimeError("Mode {} not supported.".format(mode))
| StarcoderdataPython |
38267 | <filename>mainpages/views.py<gh_stars>0
from django.shortcuts import render
from django.shortcuts import redirect
from django.http import HttpResponseRedirect
from .forms import RegisterForm, LoginForm
from django.urls import reverse
from .datebase_func import make_bd, check_car_wash, add_car_wash, time_by_id, update_time_by_id, get_info_about
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
import requests
from pprint import pprint
from datetime import datetime
def time_to_seconds(time):
time = time.split(':')
hh = 0
mm = 0
ss = 0
if len(time) == 2:
hh = int(time[0])
mm = int(time[1])
if len(time) == 3:
hh = int(time[0])
mm = int(time[1])
ss = int(time[2])
return hh * 3600 + mm * 60 + ss
def second_to_str(time):
hh = time // 3600 % 24
mm = time // 60 % 60
ss = time % 60
result = f"{hh}:{mm}:{ss}"
return result
def fast_search_car_wash(data, u=None):
data = dict(data)
current_time = datetime.now()
cur_pos = data.get('pos')[0]
count = int(data.get('count')[0])
make_bd()
winner_id = wash_id = data.get('0id')[0]
pohui = 99999999999
time_to_wash = 15*60
for i in range(count):
name = data.get(str(i) + 'name')[0]
wash_id = data.get(str(i) + 'id')[0]
wash_cord = data.get(str(i) + 'coords')[0]
if not check_car_wash(wash_id):
add_car_wash(wash_id, name, wash_cord)
request_body = 'https://api.distancematrix.ai/maps/api/distancematrix/json?'
request_body += f'origins={cur_pos}&'
request_body += f'destinations={wash_cord}&'
request_body += 'key=<KEY>mLF76mUkqYyaJI5UiDc6g'
response = requests.get(request_body)
print(response.text)
trip_time = response.json()['rows'][0]['elements'][0]['duration']['value']
trip_minutes = trip_time // 60
trip_seconds = trip_time % 60
trip_hours = trip_time // 3600
#pprint(f'{trip_hours}:{trip_minutes}:{trip_seconds}')
current_minutes = int(current_time.strftime('%M'))
current_seconds = int(current_time.strftime('%S'))
current_hours = int(current_time.strftime('%H'))
arrive_seconds = trip_seconds + current_seconds
arrive_minutes = trip_minutes + current_minutes
arrive_hours = trip_hours + current_hours
days = 0
if arrive_seconds // 60 != 0:
arrive_minutes += arrive_seconds // 60
arrive_seconds %= 60
if arrive_minutes // 60 != 0:
arrive_hours += arrive_minutes // 60 % 60
arrive_minutes %= 60
if arrive_hours // 24 != 0:
days = arrive_hours // 24
arrive_hours %= 24
#pprint(f'{days} {arrive_hours}:{arrive_minutes}:{arrive_seconds}')
arrive_time = str(arrive_hours) + ':' + str(arrive_minutes) + ':' + str(arrive_seconds)
open_time, close_time, free_time = time_by_id(wash_id)
if days == 0:
if time_to_seconds(arrive_time) + time_to_wash < time_to_seconds(close_time):
start_time = max(time_to_seconds(arrive_time), time_to_seconds(free_time))
#pprint(second_to_str(start_time))
if start_time < pohui:
pohui = start_time
winner_id = wash_id
#pprint(second_to_str(pohui))
update_time_by_id(winner_id, second_to_str(pohui + time_to_wash))
result = {}
response = get_info_about(winner_id)
coords_xy = response[2].split(',')
result['coords_x'] = coords_xy[0]
result['coords_y'] = coords_xy[1]
pos_xy = cur_pos.split(',')
result['pos_x'] = pos_xy[0]
result['pos_y'] = pos_xy[1]
if u is not None:
print('NICE')
u.email_user(subject='Талон на автомойку',
message=f'Вы записаны на автомойку, приезжайте к {second_to_str(pohui)}',
from_email='<EMAIL>'
)
return result
def main_page(request):
u = request.user
flag = u.is_authenticated
data = ''
if request.method == 'POST' and request.is_ajax:
if request.POST.get('pos') is not None:
print("hui")
u = request.user
if u.is_authenticated:
data = fast_search_car_wash(request.POST, u)
print(data)
request.session['data'] = data
return HttpResponseRedirect('/main/road_map')
#return redirect('mainpages:road_map')
if flag:
data = {
'button_1': 'logout_user',
'flag': flag,
}
return render(
request,
'main_page.html',
context=data
)
else:
data = {
'button_1': 'auth',
'flag': flag,
}
return render(
request,
'main_page.html',
context=data
)
def auth_page(request):
data = {
'button_1' : 'login',
'button_2': 'registration'
}
return render(
request,
'authorization.html',
context=data
)
def login_page(request):
if request.method == 'POST':
username = request.POST.get("login")
password = request.POST.get("password")
pprint(username)
pprint(password)
if password is not None:
username = request.POST.get("login")
password = request.POST.get("password")
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/main')
else:
form = LoginForm()
data = {
"text": "Неверный логин или пароль",
}
return render(
request,
'login.html',
context=data,
)
else:
return render(
request,
'login.html',
)
def registration_page(request):
if request.method == 'POST':
username = request.POST.get("login")
password = request.POST.get("password")
re_password = request.POST.get("re_password")
mail = request.POST.get("mail")
pprint(username)
pprint(password)
if re_password is not None:
if password != re_password:
data = {
'text': 'Пароли не совпадают',
}
return render(
request,
'registration.html',
context=data,
)
try:
validate_email(mail)
except ValidationError as e:
data = {
'text': 'Неверный формат email' + str(e)
}
return render(
request,
'registration.html',
context=data,
)
names = get_user_model()
names = list(names.objects.all())
for name in names:
if username in str(name):
form = RegisterForm()
data = {
"text": "Пользователь с таким логином уже существует",
}
return render(
request,
'registration.html',
context=data,
)
user = User.objects.create_user(
username=username,
password=password,
email=mail
)
user.save()
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/main')
else:
return render(
request,
'registration.html',
)
def logout_user(request):
u = request.user
if u.is_authenticated:
logout(request)
return redirect('/')
def road_map(request):
data = request.session['data']
print(data)
print('PIZDA')
print(request.GET)
return render(
request,
'map.html',
context=data
)
| StarcoderdataPython |
1715040 | <reponame>evestidor/svc-stock-manager<filename>src/operations/update_stock_price.py
from src import interfaces
from src.domain import Stock
class UpdateStockPriceOperation(interfaces.Operation):
def __init__(self, storage: interfaces.StockStorage):
self._storage = storage
def execute(self, symbol: str, price: float) -> Stock:
stock = Stock(symbol=symbol, price=price)
return self._storage.update_price(stock)
| StarcoderdataPython |
3229340 | <reponame>imaginal/openprocurement.storage.files
from openprocurement.storage.files.storage import FilesStorage
def includeme(config):
settings = config.registry.settings
config.registry.storage = FilesStorage(settings)
| StarcoderdataPython |
174686 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
if sys.version_info >= (3, 0, 0):
from urllib.parse import urlparse
else:
from urlparse import urlparse
if sys.version_info >= (3, 5, 0):
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
else:
from math import isclose
import json
from django.shortcuts import render, get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from django.contrib import auth
from django.contrib.auth import get_user_model
User = get_user_model()
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from guardian.decorators import permission_required, permission_required_or_403
from django.utils.decorators import method_decorator
from spmo.common import Common
from spmo.data_serialize import DataSerialize
from spcc.views.common import list_data, del_model_items, display_confirm_msg
from spcc.models.common import del_model_data
from spcc.views.common import Ajax
from appxs.account.models.user import Menu, Role
from appxs.account.forms.permission import AddResourceForm
co = Common()
def app_info():
app = {
"name": "account",
"fun": "permission",
"edit_url": 'account:edit_permission',
"del_url": 'account:del_permission'
}
return app
def get_all_resource(r_type='all'):
resources = list(Menu.objects.values(
'id',
'name',
'url',
'url_target',
'icon',
'code',
'parent',
'type',
'level',
'view_name',
'desc',
'order',
).distinct())
# 按order字段重新排序
resources = sorted(resources, key=lambda y: str(y['order']))
if r_type == 'menu':
r_menu = [r for r in resources if r['permissions__type'] == 'menu']
else:
r_menu = resources
return r_menu
def get_resource_from_role(role_id=None, r_type='all'):
# try:
resources = list(Role.objects.get(id=role_id).permissions.values(
'id',
'name',
'url',
'url_target',
'icon',
'code',
'parent',
'type',
'level',
'view_name',
'desc',
'order',
).distinct())
resources = sorted(resources, key=lambda y: str(y['order']))
if r_type == 'menu':
r_menu = [r for r in resources if r['permissions__type'] == 'menu']
else:
r_menu = resources
return r_menu
def get_resource_by_user(user=None):
if isinstance(user, User) is False:
user = get_object_or_404(User, pk=user)
resources = list(user.roles.values(
'permissions__id',
'permissions__name',
'permissions__url',
'permissions__url_target',
'permissions__icon',
'permissions__code',
'permissions__parent',
'permissions__type',
'permissions__level',
'permissions__view_name',
'permissions__desc',
'permissions__order',
).distinct())
resources = sorted(resources, key=lambda y: str(y['permissions__order']))
o_resources = []
for item in resources:
resource = {
'id': item['permissions__id'],
'name': item['permissions__name'],
'url': item['permissions__url'],
'url_target': item['permissions__url_target'],
'icon': item['permissions__icon'],
'code': item['permissions__code'],
'type': item['permissions__type'],
'parent': item['permissions__parent'],
'level': item['permissions__level'],
'view_name': item['permissions__view_name'],
'desc': item['permissions__desc'],
'order': item['permissions__order'],
}
o_resources.append(resource)
return o_resources
@csrf_exempt # 禁用csrf
@login_required
def index(request, ):
app = app_info()
app['location'] = 'index'
return render(request, 'account/permission.html',
{'app': app})
@csrf_exempt # 禁用csrf
@login_required
def grant_perm(request, role_id):
app = app_info()
app['location'] = 'grant_perm'
role_ins = get_object_or_404(Role, pk=role_id)
return render(request, 'account/grant_permission.html',
{'app': app, 'role_id': role_ins.id, 'role_name': role_ins.name}, )
@csrf_exempt # 禁用csrf
@login_required
def grant_perm_min(request, role_id):
app = app_info()
app['location'] = 'grant_perm_min'
role_ins = get_object_or_404(Role, pk=role_id)
return render(request, 'account/grant_perm_min.html',
{'app': app, 'role_id': role_ins.id, 'role_name': role_ins.name}, )
@csrf_exempt # 禁用csrf
@login_required
def show_user_perm(request, user_id):
app = app_info()
app['location'] = 'show_user_perm'
user_ins = get_object_or_404(User, pk=user_id)
return render(request, 'account/show_user_perm.html',
{'app': app, 'user_id': user_ins.id, 'user_name': user_ins.__str__}, )
@csrf_exempt # 禁用csrf
@login_required
def get_user_perm_tree(request, user_id):
perm_data = []
user_ins = get_object_or_404(User, pk=user_id)
all_roles_ins = list(user_ins.roles.all())
perm_rolenames = {}
for role_ins in all_roles_ins:
all_resources = get_resource_from_role(role_id=role_ins.id)
for a_resource in all_resources:
pid = a_resource['parent']
r_id = a_resource['id']
checked = "true"
if pid is None:
pid = 0
if r_id in perm_rolenames:
role_name = '%s, %s' % (perm_rolenames[r_id], role_ins.name)
else:
role_name = role_ins.name
perm_rolenames[r_id] = role_name
perm_i = {'id': r_id, 'pId': pid,
'name': '%s[%s]<-[%s]' % (a_resource['view_name'], a_resource['type'], role_name),
'checked': checked}
perm_data.append(perm_i)
if request.method == "GET":
tree_res = {'data': perm_data}
html = json.dumps(tree_res)
return HttpResponse(html, content_type="application/json")
@csrf_exempt # 禁用csrf
@login_required
def get_resource_tree(request, ):
perm_data = []
all_resources = get_all_resource()
for a_resource in all_resources:
pid = a_resource['parent']
if pid is None:
pid = 0
perm_i = {'id': a_resource['id'], 'pId': pid, 'name': '%s[%s]' % (a_resource['view_name'], a_resource['type']),
'order': a_resource['order'], }
perm_data.append(perm_i)
if request.method == "GET":
tree_res = {'data': perm_data}
html = json.dumps(tree_res)
return HttpResponse(html, content_type="application/json")
def sort_nodes(nodes=[], is_recursion=True):
'''
更新所有节点的位置、排序、层级信息
:param nodes:
:param is_recursion:
:return:
'''
has_done = {}
nodes_count = len(nodes)
init_max_order_num = 2.0
init_min_order_num = 1.0
node_order_step = 0.0
if nodes_count > 0:
node_order_step = round((init_max_order_num - init_min_order_num) / nodes_count, 5) # 保留5位小数
curr_order_num = 0.0
curr_node_num = 0
for node in nodes:
if is_recursion and node['isParent']:
sort_nodes(nodes=node['children'], is_recursion=True)
if node['id'] not in has_done:
n_ins = get_object_or_404(Menu, pk=node['id'])
curr_order_num = init_min_order_num + node_order_step * curr_node_num
if curr_node_num == nodes_count - 1:
t_order = init_max_order_num
elif curr_node_num == 0:
t_order = init_min_order_num
else:
t_order = curr_order_num
n_ins.level = node['level']
n_ins.order = t_order
is_update_pid = False
if n_ins.parent is not None: # 非根节点
if node['pId'] is None: # 非根节点 --> 根节点
n_ins.parent = None
elif n_ins.parent.id != node['pId']: # 非根节点 --> 非根节点(父节点有变化)
is_update_pid = True
elif node['pId'] is not None and n_ins.parent is None: # 根节点 --> 非根节点
is_update_pid = True
else:
is_update_pid = False
if is_update_pid:
n_ins.parent = get_object_or_404(Menu, pk=node['pId'])
n_ins.save()
curr_node_num = curr_node_num + 1
has_done[node['id']] = 'done'
del n_ins
def sort_subnodes(parent_ins):
'''
更新父节点下的所有子节点排序
:param parent_ins: Menu instance
:return: nothing
'''
nodes = Menu.objects.filter(parent=parent_ins)
nodes_count = len(nodes)
init_max_order_num = 2.0
init_min_order_num = 1.0
node_order_step = 0.0
curr_order_num = 0.0
curr_node_num = 0
if nodes_count > 0:
node_order_step = round((init_max_order_num - init_min_order_num) / nodes_count, 5) # 保留5位小数
for n_ins in nodes:
curr_order_num = init_min_order_num + node_order_step * curr_node_num
if curr_node_num == nodes_count - 1:
t_order = init_max_order_num
elif curr_node_num == 0:
t_order = init_min_order_num
else:
t_order = curr_order_num
# print('n_ins_name: %s, n_ins_view_name: %s, t_order: %s' % (n_ins.name, n_ins.view_name, t_order))
n_ins.order = t_order
# 更新节点的level
if n_ins.parent is None:
n_ins.level = 0
else:
n_ins.level = n_ins.parent.level + 1
n_ins.save()
curr_node_num = curr_node_num + 1
# del n_ins
@csrf_exempt # 禁用csrf
@login_required
def save_resource_tree(request):
'''
保存所有节点的位置、排序、层级信息
:param request:
:return:
'''
if request.method == "POST":
ds = DataSerialize()
tree_data = request.POST.get('tree_data')
tree_data = ds.deserialize(tree_data)
sort_nodes(nodes=tree_data)
rs_data = {'timestamp': ds.get_create_date()}
html = json.dumps(rs_data)
return HttpResponse(html, content_type="application/json")
@csrf_exempt # 禁用csrf
@login_required
def update_resource_position(request):
'''
更新资源节点的位置、排序、层级信息
:param request:
:return:
'''
if request.method == "POST":
ds = DataSerialize()
node_data = request.POST.get('node_data')
node_data = ds.deserialize(node_data)
# co.DD(node_data)
node = node_data['curr_node']
pre_node = node_data['pre_node']
next_node = node_data['next_node']
n_ins = get_object_or_404(Menu, pk=node['id'])
old_parent_ins = n_ins.parent
init_max_order_num = 2.0
init_min_order_num = 1.0
is_update_pid = False
if n_ins.parent is not None: # 非根节点
if node['pId'] is None: # 非根节点 --> 根节点
n_ins.parent = None
elif n_ins.parent.id != node['pId']: # 非根节点 --> 非根节点(父节点有变化)
is_update_pid = True
elif node['pId'] is not None and n_ins.parent is None: # 根节点 --> 非根节点
is_update_pid = True
else:
is_update_pid = False
if is_update_pid:
n_ins.parent = get_object_or_404(Menu, pk=node['pId'])
# 更新节点的level
if n_ins.parent is None:
n_ins.level = 0
else:
n_ins.level = n_ins.parent.level + 1
else:
n_ins.level = node['level']
if pre_node is not None and next_node is not None: # 夹在中间位
t_order = round((pre_node['order'] + next_node['order']) / 2, 5)
elif pre_node is None and next_node is None: # 第一位或最后一位(有且只有一个元素)
t_order = init_min_order_num
elif pre_node is None and next_node is not None: # 第一位
t_order = round(next_node['order'] / 2, 5)
elif pre_node is not None and next_node is None: # 最后一位
t_order = round((pre_node['order'] + 1), 5)
n_ins.order = t_order
n_ins.save()
# 重新整理同级所有节点排序,使得序号范围在init_min_order_num -- init_max_order_num之间,
# 同级的序号间距也重新规整为:round(init_max_order_num-init_min_order_num/subnodes_count, 5)
new_parent_ins = n_ins.parent
if new_parent_ins != old_parent_ins:
sort_subnodes(parent_ins=new_parent_ins)
sort_subnodes(parent_ins=old_parent_ins)
rs_data = {'timestamp': ds.get_create_date()}
html = json.dumps(rs_data)
return HttpResponse(html, content_type="application/json")
@csrf_exempt # 禁用csrf
@login_required
def get_perm_tree(request, role_id):
perm_data = []
role_perm = {}
role_resources = get_resource_from_role(role_id=role_id)
for r_resource in role_resources:
role_perm[r_resource['id']] = r_resource
all_resources = get_all_resource()
for a_resource in all_resources:
pid = a_resource['parent']
checked = "false"
if pid is None:
pid = 0
if a_resource['id'] in role_perm.keys():
checked = "true"
perm_i = {'id': a_resource['id'], 'pId': pid, 'name': '%s[%s]' % (a_resource['view_name'], a_resource['type']),
'checked': checked}
perm_data.append(perm_i)
if request.method == "GET":
tree_res = {'data': perm_data}
html = json.dumps(tree_res)
return HttpResponse(html, content_type="application/json")
@csrf_exempt # 禁用csrf
@login_required
def save_perm_tree(request, role_id):
if request.method == "POST":
ds = DataSerialize()
tree_data = request.POST.get('tree_data')
tree_data = ds.deserialize(tree_data)
role_id = request.POST.get('role_id')
rs_data = {'timestamp': ds.get_create_date()}
role_ins = get_object_or_404(Role, pk=role_id)
for td in tree_data:
if td['checked'] is True and td['checkedOld'] is False:
role_ins.permissions.add(Menu.objects.get(pk=td['id']))
elif td['checked'] is False and td['checkedOld'] is True:
role_ins.permissions.remove(Menu.objects.get(pk=td['id']))
role_ins.save()
html = json.dumps(rs_data)
return HttpResponse(html, content_type="application/json")
@csrf_exempt # 禁用csrf
@login_required
def add_resource(request, resource_pid=None):
if request.method == 'POST':
ds = DataSerialize()
form = AddResourceForm(model=Menu, data=request.POST)
if form.is_valid():
new_resource = form.save()
r_parent = new_resource.parent
if r_parent is not None:
new_resource.level = new_resource.parent.level - 1
elif r_parent is None:
new_resource.level = 0
new_resource.save()
sort_subnodes(parent_ins=r_parent)
rs_data = {'timestamp': ds.get_create_date(), 'result': 'success'}
html = json.dumps(rs_data)
return HttpResponse(html, content_type="application/json")
else:
if resource_pid is None or resource_pid == '' or int(resource_pid) == 0:
form = AddResourceForm(model=Menu, )
else:
form = AddResourceForm(model=Menu, instance=Menu(parent_id=resource_pid))
app = app_info()
app['name'] = "permission"
app['location'] = 'add'
return render(request, 'add_data2_not_nav.html',
{'form': form, 'app': app})
@csrf_exempt # 禁用csrf
@login_required
def edit_resource(request, resource_id):
resource = get_object_or_404(Menu, pk=resource_id)
ds = DataSerialize()
if request.method == 'POST':
form = AddResourceForm(model=Menu, instance=resource, data=request.POST)
if form.is_valid():
new_resource = form.save()
r_parent = new_resource.parent
sort_subnodes(parent_ins=r_parent)
rs_data = {'timestamp': ds.get_create_date(), 'result': 'success'}
html = json.dumps(rs_data)
return HttpResponse(html, content_type="application/json")
else:
form = AddResourceForm(model=Menu, instance=resource)
app = app_info()
app['name'] = "m_account"
app['location'] = 'edit'
return render(request, 'edit_data2_not_nav.html',
{'form': form, 'app': app})
@csrf_exempt # 禁用csrf
@login_required
def del_resource(request, resource_id):
ds = DataSerialize()
if request.method == "POST":
new_resource = get_object_or_404(Menu, pk=resource_id)
r_parent = new_resource.parent
sub_menus = Menu.objects.filter(parent=new_resource)
for sm in sub_menus:
sm.parent = r_parent
sm.save()
del_res = del_model_data(model=Menu, id=resource_id)
sort_subnodes(parent_ins=r_parent)
rs_data = {'timestamp': ds.get_create_date(), 'result': 'success'}
html = json.dumps(rs_data)
return HttpResponse(html, content_type="application/json")
| StarcoderdataPython |
3245409 | import re
import string
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
import itertools
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
class FeatureFinder:
def __init__(self):
self.featureVector = []
self.features = {}
#end
def extract_features(self , tweet, featureList):
tweet_words = set(tweet)
self.features = {}
for word in featureList:
self.features['contains(%s)' % word] = (word in tweet_words)
return self.features
#end
def getBigrams(self):
#A bigram is every sequence of two adjacent elements in a string of tokens
#find only featured bigrams using pmi test
score_fn=BigramAssocMeasures.pmi
n=100
bigram_finder = BigramCollocationFinder.from_words(self.featureVector)
bigrams = bigram_finder.nbest(score_fn, n)
#featureVector = [ngram for ngram in itertools.chain(featureVector, bigrams)]
featVector = []
for ngram in itertools.chain(self.featureVector, bigrams):
if(',' in str(ngram)):
ngram =' '.join(ngram)
featVector.append(ngram)
return featVector
#end
def replaceTwoOrMore(self ,s):
#look for 2 or more repetitions of character and replace with the character itself
pattern = re.compile(r"(.)\1{1,}", re.DOTALL)
return pattern.sub(r"\1\1", s)
#end
def getFeatureVector(self ,tweet):
self.featureVector = []
p = PorterStemmer()
operators1 = set(('lt','gt','F','etc','facebook','fb','apple','aapl','amp','inc','ltd','twitter','blackberry','twtr','bbry','microsoft','msft','yahoo','yhoo'))
operators2 = set(('down', 'nor', 'not', 'above', 'very', 'before', 'up', 'after', 'again', 'too', 'few', 'over'))
stopWords = set(stopwords.words('english')) | operators1
stopWords = stopWords - operators2
#tokenize all words / split tweet into words
for w in word_tokenize(tweet):
w = self.replaceTwoOrMore(w)
#check if the word stats with an alphabet
val = re.search(r"^[a-zA-Z][a-zA-Z0-9]*$", w)
#ignore if it is a stop word
if(w in stopWords or val is None):
continue
else:
#perform stemming eg removing ing from the end of word
w = p.stem(w)
self.featureVector.append(w)
self.featureVector = self.getBigrams()
return self.featureVector
#end
def replaceWords(self, tweet):
f = open('data/replaceWord.txt')
for l in f:
s = l.split('*')
tweet = re.sub(r"\b%s\b" % s[0] , s[1], tweet)
return tweet
#end
def processTweet(self , tweet):
# process the tweets
#Convert to lower case
tweet = tweet.lower()
#Remove www.* or https?://*
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))',' ',tweet)
#Remove @username
tweet = re.sub('@[^\s]+',' ',tweet)
#Remove $companytag
tweet = re.sub('\$[^\s]+',' ',tweet)
#Replace #word with word
tweet = re.sub(r'#([^\s]+)', r'\1', tweet)
#remove punctuation
tweet = tweet.replace('\'','')
tweet = re.sub('[%s]' % re.escape(string.punctuation), ' ', tweet)
#replace words like thats to that is, isnt to is not
tweet = self.replaceWords(tweet)
#Remove additional white spaces
tweet = re.sub('[\s]+', ' ', tweet)
return tweet
#end
def processReqFeatTweet(self, tweet):
#Remove newlines
tweet = tweet.strip('\n')
#Remove www.* or https?://*
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|f.processReqFeatTweet(text))',' ',tweet)
#Remove additional white spaces
tweet = re.sub('[\s]+', ' ', tweet)
return tweet
#end | StarcoderdataPython |
1786717 | # 源程序文件名
SOURCE_FILE = "{filename}.hs"
# 输出程序文件名
OUTPUT_FILE = "{filename}.out"
# 编译命令行
COMPILE = "ghc {source} -o {output} {extra}"
# 运行命令行
RUN = 'sh -c "./{program} {redirect}"'
# 显示名
DISPLAY = "Haskell"
# 版本
VERSION = "GHC 8.0.2"
# Ace.js模式
ACE_MODE = "haskell"
| StarcoderdataPython |
1732575 | import random
a1 = str(input('Digite o nome do aluno 1:'))
a2 = str(input('Digite o nome do aluno 2:'))
a3 = str(input('Digite o nome do aluno 3:'))
al = [a1, a2, a3]
print(f'O sorteado para o ir ao quadro foi {random.choice(al)}')
| StarcoderdataPython |
1782972 | <reponame>rsmonteiro2021/execicios_python<gh_stars>1-10
"""
Upgrade de bateria: User a última versão de Electric.car.py desta seção. Acrescente
um método chamado upgrade_battery() na classe Battery. Esse método deve verificar
a capacidade da bateria e defini-la como 85 se o valor for diferente. Crie um carro
elétrico com uma capacidade de bateria default, chame get_range() uma vez e em, seguida
chame get_ranger() uma segunda vez após fazer um upgrade da bateria. Você deverá ver
um aumento na distância que o carro é capaz de percorrer.
"""
class Car():
"""Tentativa simples de modelar um carro."""
def __init__(self, make, model, year):
"""Inicializa os atributos de um carro."""
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
""" Retonra o nome do veículo de modo formatado."""
long_name = str(self.year) + ', ' + self.make.title() + ', ' + self.model.title()
return long_name
def read_odometer(self):
""" Exibe uma frase que mostra a milhagem do carro."""
odometer = 'This car has a ' + str(self.odometer_reading) + ' miles on it.'
return odometer
def update_odometer(self, mileage):
""" Define o valor de leitura do odometro do veículo."""
if mileage >= self.odometer_reading:
self.odometer_reading = mileage
else:
print("You don't roll back an odometer.")
def increment_odometer(self, miles):
"""Atualiza o valor do odômetro do veículo."""
self.odometer_reading += miles
return self.odometer_reading
class Battery():
"""Tentativa simples de modelar uma bateria para veículos elétrico."""
def __init__(self, battery_size = 70):
"""Inicializa os atributos de uma bateria para carro elétrico."""
self.battery_size = battery_size
def describe_battery(self):
"""Retorna uma mensagem com a capacidade da bateria."""
print('\tThis car has a ' + str(self.battery_size) + ' kwh battery.')
def get_range(self):
"""Exibe uma frase acerca da distância que o carro é capaz de percorrer com essa bateria."""
if self.battery_size == 70:
range = 240
elif self.battery_size == 85:
range = 270
message = 'This car can go approximately ' + str(range) + ' miles on a full charge.'
print('\t' + message)
def upgrade_battery(self, battery_size = 85):
"""Exibe a capacidade da bateria."""
self.battery_size = battery_size
return self.battery_size
class EletricCar(Car):
"""Tetantiva simples de modelar um veículo elétrico."""
def __init__(self, make, model, year):
"""Inicializa os atributos da classe pai. Em seguida, inicializa os atributos
específicos de um carro elétrico."""
super().__init__(make, model, year)
self.battery = Battery()
self.details_car = 'details_car'
def describe_battery(self):
"""Exibe uma frase que descreve a capacidade da bateria."""
battery = "This car has a " + str(self.battery_size) + " -kwh battery."
return battery
def describe_details(self):
"""Retorna detalhes do carro elétrico."""
while True:
disponible_color = {1: 'White', 2: 'Black', 3: 'Silver', 4: 'Red'}
print('Escolha a cor do seu Tesla Eletric Car. \nDisponible Color:')
for number, color in disponible_color.items():
print('\t' + str(number) + ': ' + color + '.')
color = int(input('Digite aqui sua cor favorita:\n'))
if color in disponible_color:
if color == 1:
your_car = {'Color': 'White', 'Horse Power': '126 hp', 'Torque': '14 kNm', 'Style': 'Sedan'}
print('Congratulations!!!!\nDetails of the your Tesla Electric Car:')
for value, key in your_car.items():
print('\t' + value + ': ' + key)
break
elif color == 2:
your_car = {'Color': 'Black', 'Horse Power': '136 hp', 'Torque': '14 kNm', 'Style': 'Hatch'}
print('Congratulations!!!!\nDetails of the your Tesla Electric Car:')
for value, key in your_car.items():
print('\t' + value + ': ' + key)
break
elif color == 3:
your_car = {'Color': 'Silver', 'Horse Power': '126 hp', 'Torque': '16 kNm', 'Style': 'SUV'}
print('Congratulations!!!!\nDetails of the your Tesla Electric Car:')
for value, key in your_car.items():
print('\t' + value + ': ' + key)
break
else:
your_car = {'Color': 'Red', 'Power': '150 hp', 'Torque': '18 kNm', 'Style': 'Cupê'}
print('Congratulations!!!!\nDetails of the your Tesla Electric Car:')
for value, key in your_car.items():
print('\t' + value + ': ' + key)
break
else:
print('Não estamos personalizando o carro elétrico no momento!')
"""my_new_car = Car('audi', 'a4', 2016)
print(my_new_car.get_descriptive_name())
my_new_car.update_odometer(23)
my_new_car.read_odometer()
my_new_car.increment_odometer(100)
my_new_car.read_odometer()
"""
my_tesla = EletricCar("tesla", "model's", 2022)
my_tesla.describe_details()
my_tesla.battery.describe_battery()
my_tesla.battery.get_range()
print('\t' + my_tesla.get_descriptive_name())
my_tesla.battery.upgrade_battery()
my_tesla.battery.describe_battery()
my_tesla.battery.get_range()
| StarcoderdataPython |
1785095 | <reponame>ceddlyburge/unit-testing-calculations<filename>tests/test_construction_margin_calculator_isolate_partial_values.py
from tests.construction_margin_calculator_mockable_abstraction_builder import ConstructionMarginCalculatorMockableAbstractionBuilder
from tests.cash_flow_step_builder import CashFlowStepBuilder
# The full calculation for `construction_profit` is quite long and complicated,
# but we can simplify by setting some "additive" properties to 0, and some
# "multiplicative" properties to 1, so that they don't effect the result.
# This test sets balance_of_plant_costs_at_financial_close to 0, so that we can
# concentrate on the turbine_costs. This again means that the test code is
# simpler than the system under test, which helps with "tests as documentation"
# and the "Obscure Test" smell.
# There should obvioulsy be more tests like this, but only one is shown for
# simplicity.
def test_construction_profit_includes_turbine_costs():
turbine_costs = 10
balance_of_plant_costs_at_financial_close = 0
fraction_of_spend = 0.3
epc_margin = 0.1
inflation = 1.2
sut = ConstructionMarginCalculatorMockableAbstractionBuilder() \
.with_balance_of_plant_costs_at_financial_close(balance_of_plant_costs_at_financial_close) \
.with_turbine_costs(turbine_costs) \
.with_inflation(inflation) \
.with_epc_margin(epc_margin) \
.in_selling_mode() \
.build()
cash_flow_step = CashFlowStepBuilder().build()
sut.calculate_step(cash_flow_step, fraction_of_spend)
assert cash_flow_step.construction_profit == -1 * turbine_costs * inflation * fraction_of_spend * epc_margin
# The test for the CashFlowStepsCalculator is no longer shown | StarcoderdataPython |
1636438 | import yaml
import six
script_out = """all:
children:
ungrouped:
hosts:
foobar:
should_be_artemis_here: !vault |
$ANSIBLE_VAULT;1.2;AES256;alan
30386264646430643536336230313232653130643332356531633437363837323430663031356364
3836313935643038306263613631396136663634613066650a303838613532313236663966343433
37636234366130393131616631663831383237653761373533363666303361333662373664336261
6136313463383061330a633835643434616562633238383530356632336664316366376139306135
3534"""
# --- the YAML docs ---
# class Monster(yaml.YAMLObject):
# yaml_tag = u'!vault'
# def __init__(self, node):
# print ' args kwargs ' + str(node)# + str(kwargs)
# self.node = node
#
# def __repr__(self):
# return str(self.node)
# second example
class Dice(tuple):
def __new__(cls, a, b):
return tuple.__new__(cls, [a, b])
def __repr__(self):
return "Dice(%s,%s)" % self
def dice_representer(dumper, data):
return dumper.represent_scalar(u'!dice', u'%sd%s' % data)
def dice_constructor(loader, node):
value = loader.construct_scalar(node)
a, b = map(int, value.split('d'))
return Dice(a, b)
yaml.add_representer(Dice, dice_representer)
yaml.add_constructor(u'!dice', dice_constructor)
print yaml.dump({'gold': Dice(10,6)})
print yaml.load("""initial hit points: !dice 8d4""")
class NaiveVault:
def __init__(self, data):
self.data = data
def __repr__(self):
return six.text_type(self.data)
print NaiveVault('hello world')
def vault_representer(dumper, data):
return dumper.represent_scalar(u'!vault', six.text_type(data))
def vault_constructor(loader, node):
value = loader.construct_scalar(node)
return NaiveVault(value)
yaml.add_representer(NaiveVault, vault_representer)
yaml.add_constructor(u'!vault', vault_constructor)
# --- the Ansible method ---
# from yaml.constructor import SafeConstructor
#
# class AnsibleConstructor(SafeConstructor):
#
# def construct_vault_encrypted_unicode(self, node):
# value = self.construct_scalar(node)
# return str(value)
#
# yaml.add_constructor(
# u'!vault',
# AnsibleConstructor.construct_vault_encrypted_unicode)
python_out = yaml.load(script_out)
print ' python output '
print python_out
print ' dumped output '
print yaml.dump(python_out, default_flow_style=False)
print ' original script out '
print script_out
print ' again, using safe_load '
yaml.SafeLoader.add_constructor(u'!vault', vault_constructor)
python_out = yaml.safe_load(script_out)
| StarcoderdataPython |
136557 | # -*- coding: utf-8 -*-
u"""
Levenshtein Distance
The Levenshtein distance between two words is the minimal number of
edits that turn one word into the other. Here, "edit" means a
single-letter addition, single-letter deletion, or exchange of a
letter with another letter.
http://en.wikipedia.org/wiki/Levenshtein_distance
EXAMPLES::
>>> from sage_bootstrap.levenshtein import Levenshtein
>>> Levenshtein(5)(u'Queensryche', u'Queensrÿche')
1
"""
#*****************************************************************************
# Copyright (C) 2015 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
class DistanceExceeded(Exception):
pass
class Levenshtein(object):
def __init__(self, limit):
"""
Levenshtein Distance with Maximum Distance Cutoff
Args:
limit (int): if the distance exceeds the limit, a
:class:`DistanceExceeded` is raised and the
computation is aborted.
EXAMPLES::
>>> from sage_bootstrap.levenshtein import Levenshtein
>>> lev3 = Levenshtein(3)
>>> lev3(u'saturday', u'sunday')
3
>>> lev3(u'kitten', u'sitting')
3
>>> lev2 = Levenshtein(2)
>>> lev2(u'kitten', u'sitting')
Traceback (most recent call last):
...
DistanceExceeded
"""
self._limit = limit
def __call__(self, a, b):
"""
calculate the levenshtein distance
args:
a,b (str): the two strings to compare
returns:
int: the Levenshtein distance if it is less or equal to
the distance limit.
Example::
>>> from app.scoring.levenshtein import Levenshtein
>>> lev3 = Levenshtein(3)
>>> lev3(u'Saturday', u'Sunday')
3
"""
n, m = len(a), len(b)
if n > m:
# Optimization to use O(min(n,m)) space
a, b, n, m = b, a, m, n
curr = range(n+1)
for i in range(1, m+1):
prev, curr = curr, [i]+[0]*n
for j in range(1, n+1):
cost_add, cost_del = prev[j]+1, curr[j-1]+1
cost_change = prev[j-1]
if a[j-1] != b[i-1]:
cost_change += 1
curr[j] = min(cost_add, cost_del, cost_change)
if min(curr) > self._limit:
raise DistanceExceeded
if curr[n] > self._limit:
raise DistanceExceeded
return curr[n]
| StarcoderdataPython |
3247384 | <gh_stars>1-10
from get_current_pose_joints import get_current_pose_joints
from get_current_pose_cartesian import get_current_pose_cartesian
from stop import stop
| StarcoderdataPython |
3262570 | <filename>Code/Python/RiskyContrib.py
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# notebook_metadata_filter: all
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.0
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.8.8
# latex_envs:
# LaTeX_envs_menu_present: true
# autoclose: false
# autocomplete: false
# bibliofile: RiskyContrib-Add.bib
# cite_by: apalike
# current_citInitial: 1
# eqLabelWithNumbers: true
# eqNumInitial: 1
# hotkeys:
# equation: Ctrl-E
# itemize: Ctrl-I
# labels_anchors: false
# latex_user_defs: true
# report_style_numbering: false
# user_envs_cfg: false
# ---
# %% [markdown]
# <h1><center>A Two-Asset Savings Model with an Income-Contribution Scheme</center></h1>
# <h2><center><NAME></center></h2>
# <h3><center> <EMAIL> </center></h3>
# <h2><center> Johns Hopkins University </center></h2>
#
# This notebook demonstrates the use of the `RiskyContrib` agent type
# of the [HARK Toolkit](https://econ-ark.org/toolkit). The model represents an agent who can
# save using two different assets---one risky and the other risk-free---to insure
# against fluctuations in his income, but faces frictions to transferring funds between
# assets. The flexibility of its implementation and its inclusion in the HARK
# toolkit will allow users to adapt the model to realistic life-cycle calibrations, and
# also to embedded it in heterogeneous-agents macroeconomic models.
# %% code_folding=[0]
# Preamble
from HARK.ConsumptionSaving.ConsRiskyContribModel import (
RiskyContribConsumerType,
init_risky_contrib_lifecycle,
)
from time import time
from copy import copy
import numpy as np
import seaborn as sns
import pandas as pd
from Simulations.tools import pol_funcs_dframe, age_profiles
import os
# This is a jupytext paired notebook that autogenerates a .py file
# which can be executed from a terminal command line
# But a terminal does not permit inline figures, so we need to test jupyter vs terminal
# Google "how can I check if code is executed in the ipython notebook"
from IPython import get_ipython # In case it was run from python instead of ipython
# If the ipython process contains 'terminal' assume not in a notebook
def in_ipynb():
try:
if 'terminal' in str(type(get_ipython())):
return False
else:
return True
except NameError:
return False
# Determine whether to make the figures inline (for spyder or jupyter)
# vs whatever is the automatic setting that will apply if run from the terminal
if in_ipynb():
# # %matplotlib inline generates a syntax error when run from the shell
# so do this instead
get_ipython().run_line_magic('matplotlib', 'inline')
else:
get_ipython().run_line_magic('matplotlib', 'auto')
# %% [markdown]
# ## Model Description
#
# I now discuss the main components of the model informally, and leave its full
# recursive mathematical representation for Section \ref{sec:recursive}.
#
# ### Time, mortality, and utility
#
# Time advances in discrete steps that I will index with $t$. The model can
# be used in both infinite and finite-horizon versions.
#
# Agents face an exogenous risk of death $\delta_t$ each period, which becomes certain at the
# maximum age of the finite-horizon version. There are no intentional bequests; agents
# will consume all of their resources if they reach the last period, but they can leave
# accidental bequests upon premature death.
#
# In each period, agents derive utility from consumption only. Their utility function
# follows a constant relative risk aversion specification. Formally, for a level of
# consumption $C$, the agent derives instant utility
# \begin{equation}
# u(C) = \frac{C^{1-\rho}}{1- \rho}.
# \end{equation}
#
# #### Income process
#
# Agents supply labor inelastically. Their labor earnings $Y_{i,t}$ are the product of a
# permanent component $P_{i,t}$ and a transitory stochastic component $\theta_{i,t}$ as
# in \cite{Carroll1997qje}, where $i$ indexes different agents. Formally,
# \begin{equation*}
# \begin{split}
# \ln Y_{i,t} &= \ln P_{i,t} + \ln \theta_{i,t} \\
# \ln P_{i,t} &= \ln P_{i,t-1} + \ln \Gamma_{i,t} + \ln \psi_{i,t}
# \end{split}
# \end{equation*}
# where $\Gamma_{i,t}$ is a deterministic growth factor that can capture
# life-cycle patterns in earnings, and
# $\ln \psi_{i,t}\sim \mathcal{N}(-\sigma^2_{\psi,t}/2, \sigma_{\psi,t})$
# is a multiplicative shock to permanent income\footnote{The mean of the shock is set so that $E[\psi_{i,t}] = 1$.}.
#
# The transitory component $\theta_{i,t}$ is a mixture that models unemployment and
# other temporal fluctuations in income as
# \begin{equation*}
# \ln\theta_{i,t} = \begin{cases}
# \ln \mathcal{U}, & \text{With probability } \mho\\
# \ln \tilde{\theta}_{i,t}\sim\mathcal{N}(-\sigma^2_{\theta,t}/2, \sigma_{\theta,t}), & \text{With probability } 1-\mho,
# \end{cases}
# \end{equation*}
# with $\mho$ representing the probability of unemployment and $\mathcal{U}$ the replacement
# factor of unemployment benefits.
#
# This specification of the income process is parsimonious and flexible enough to accommodate
# life-cycle patterns in income growth and volatility, transitory unemployment and exogenous
# retirement. Introduced by \cite{Carroll1997qje}, this income specification is common in studies
# of life-cycle wealth accumulation and portfolio choice; see e.g.,
# \cite{Cagetti2003jbes,Cocco2005rfs,Fagereng2017jof}. The specification has
# also been used in studies of income volatility such as \cite{Carroll1992bpea,Carroll1997jme,Sabelhaus2010jme}, which have yielded calibrations of its stochastic shocks' distributions.
#
# #### Financial assets and frictions
#
# Agents smooth their consumption by saving and have two assets
# available for this purpose. The first is a risk-free liquid account with
# constant per-period return factor $R$. The second has a stochastic return
# factor $\tilde{R}$ that is log-normally distributed and independent across
# time. Various interpretations such as stocks, a retirement fund, or entrepreneurial
# capital could be given to the risky asset. Importantly, consumption must be paid for
# using funds from the risk-free account. The levels of risk-free and risky assets
# owned by the agent will both be state variables, denoted with $M_{i,t}$ and $N_{i,t}$
# respectively.
#
# Portfolio rebalancing takes place by moving funds between the risk-free
# and risky accounts. These flows are one of the agents' control variables
# and are denoted as $D_{i,t}$, with $D_{i,t}>0$ representing a movement of
# funds from the risk-free to the risky account. Withdrawals from the risky
# account are subject to a constant-rate tax $\tau$ which can represent, for
# instance, capital-gains realization taxes or early retirement-fund withdrawal
# penalties. In sum, denoting post-rebalancing asset levels with $\tilde{\cdot}$,
# \begin{equation*}
# \begin{split}
# \tilde{M}_{i,t} &= M_{i,t} - D_{i,t}(1 - 1_{[D_{i,t}\leq0]}\tau)\\
# \tilde{N}_{i,t} &= N_{i,t} + D_{i,t}.
# \end{split}
# \end{equation*}
#
# At any given period, an agent might not be able to rebalance his portfolio.
# This ability is governed by an exogenous stochastic shock that is realized
# at the start of the period
#
# \begin{equation*}
# \Adj_t \sim \text{Bernoulli}(p_t),
# \end{equation*}
#
# with $\Adj_t=1$ meaning that the agent can rebalance and $\NAdj_t=1$ ($\Adj_t = 0$)
# forcing him to set $D_{i,t} = 0$. This friction is a parsimonious way to capture
# the fact that portfolio rebalancing is costly and households do it sporadically.
# Recent studies have advocated for \cite{Giglio2021aer} and used
# \cite{Luetticke2021aej_macro} this kind of rebalancing friction.
#
# To partially evade the possibility of being unable to rebalance their accounts, agents
# can use an income deduction scheme. By default, labor income ($Y_{i,t}$) is deposited to
# the risk-free liquid account at the start of every period. However, agents can pre-commit
# to have a fraction $\Contr_t\in[0,1]$ of their income diverted to their risky account instead.
# This fraction can be tweaked by the agent whenever $\Adj_t = 1$; otherwise it stays at its
# previous value, $\Contr_{t+1} = \Contr_t$.
# %% [markdown]
# #### Timing
#
# <div>
# <img src="Figures/Timing_diagram.PNG" width="600"/>
# </div>
#
# The previous figure summarizes the timing of stochastic shocks and
# optimizing decisions that occur within a period of the life cycle model.
# %% [markdown]
# ### Recursive representation of the model
#
# Individual subscripts $i$ are dropped for simplicity. The value function for
# an agent who is not allowed to rebalance his portfolio at time $t$ is
#
# \begin{equation*}
# \begin{split}
# V^{\NAdj}_{t}(M_t, N_t, P_t, \Contr_t) = \max_{C_t} u(C_t)
# + p_{t+1} &\beta\delta_{t+1} E_t \left[ V^{\Adj}_{t+1}\left( M_{t+1}, N_{t+1},
# P_{t+1} \right)\right] +\\
# \left(1-p_{t+1}\right) &\beta\delta_{t+1} E_t\left[V^{\NAdj}_{t+1}\left(M_{t+1},
# N_{t+1}, P_{t+1}, \Contr_{t+1}\right) \right]\\
# \text{Subject to:} \quad &\\
# 0\leq& C_t \leq M_t \\
# A_t =& M_t - C_t \\
# M_{t+1} =& R A_t + (1-\Contr_{t+1}) Y_{t+1}\\
# N_{t+1} =& \tilde{R}_{t+1}N_t + \Contr_{t+1}Y_{t+1}\\
# P_{t+1} =& \Gamma_{t+1} \psi_{t+1} P_{t}\\
# Y_{t+1} =& \theta_{t+1} P_{t+1}\\
# \Contr_{t+1} =& \Contr_t
# \end{split}
# \end{equation*}
#
# and that of agent who is allowed to rebalance is
#
# \begin{equation*}
# \begin{split}
# V^{\Adj}_{t}(M_t, N_t, P_t) = \max_{C_t,D_t,\Contr_{t+1}}
# u(C_t) + p_{t+1} &\beta\delta_{t+1} E_t \left[ V^{\Adj}_{t+1}\left( M_{t+1},
# N_{t+1}, P_{t+1} \right)\right] +\\
# \left(1-p_{t+1}\right) &\beta\delta_{t+1} E_t\left[V^{\NAdj}_{t+1}\left(M_{t+1},
# N_{t+1}, P_{t+1}, \Contr_{t+1}\right) \right]\\
# \text{Subject to:} \quad &\\
# \quad -N_t \leq D_t \leq M_t, \quad \Contr_{t+1} \in& [0,1], \quad 0 \leq C_t \leq \tilde{M}_t\\
# \hfill\\
# \tilde{M}_t =& M_t - D_t\left(1-1_{[D_t\leq0]}\tau\right)\\
# \tilde{N}_t =& N_t + D_t\\
# A_t =& \tilde{M}_t - C_t \\
# M_{t+1} =& R A_t + (1-\Contr_{t+1}) Y_{t+1}\\
# N_{t+1} =& \tilde{R}_{t+1} \tilde{N}_t + \Contr_{t+1}Y_{t+1}\\
# P_{t+1} =& \Gamma_{t+1}\psi_{t+1} P_{t}\\
# Y_{t+1} =& \theta_{t+1} P_{t+1}
# \end{split}
# \end{equation*}
# %% [markdown]
# ## Parametrizations
# %% [markdown]
# This notebook will only demonstrate a life-cycle calibration of the model. However, the current implementation of the model is able to find and use the solution to infinite-horizon formulations (see the document in this repository for details).
#
# For the present exercise, I calibrate the model's mortality and income paths to represent individuals who enter the model at age 25, retire exogenously at 65, and live to a maximum age of 90. Survival probabilities ($\delta$) come from the 2004 SSA life-tables for males. Income growth factors and volatilities ($\Gamma$, $\sigma_\psi$, $\sigma_\theta$) come from the calibration for high-school graduates in \cite{Cagetti2003jbes}.
#
# To highlight the effect of different financial frictions on wealth accumulation and portfolio choice, I consider different configurations for the risky-withdrawal tax $\tau$ and the probability of being able to rebalance $p$:
#
# \begin{itemize}
# \item \textbf{Base}: the probability of being able to rebalance is $p = 1$
# and the risky withdrawal tax rate is $\tau = 0$, both constant throughout the agents' lives.
#
# \item \textbf{Tax}: the risky withdrawal tax is constant at $10\%$ and the agents
# can always rebalance their portfolios.
#
# \item \textbf{Calvo}: there is no risky withdrawal tax, but there is only a $25\%$ chance
# that agents can rebalance their portfolios every year.
#
# \item \textbf{Retirement}: there is no risky withdrawal tax, but the agents' ability
# to rebalance their portfolio is time-varying; they can rebalance their assets and pick
# their income-deduction scheme for certain when they enter the model at age 25, but
# then they have no chance of doing so again ($p=0$) until they retire. After retirement
# at age 65, they can always rebalance their portfolio ($p=1$).
# \end{itemize}
#
# The rest of the parameters take the following values
#
# | Name in HARK | Mathematical Symbol | Value |
# |--------------|-----------------------|---------|
# | `CRRA` | $\rho$ | $5.0$ |
# | `Rfree` | $R$ | $1.03$ |
# | `DiscFac` | $\beta$ | $0.9$ |
# | `UnempPrb` | $\mho$ | $0.05$ |
# | `IncUnemp` | $\mathcal{U}$ | $0.3$ |
# | `RiskyAvg` | $E[\tilde{R}]$ | $1.08$ |
# | `RiskyStd` | $\sqrt{V[\tilde{R}]}$ | $0.18$ |
#
#
#
#
# %% Base parametrization code_folding=[0]
# Base calibration setup
# Make the problem life-cycle
par_LC_base = init_risky_contrib_lifecycle.copy()
# Turn off aggregate growth
par_LC_base['PermGroFacAgg'] = 1.0
# and frictionless to start
par_LC_base["AdjustPrb"] = 1.0
par_LC_base["tau"] = 0.0
# Make contribution shares a continuous choice
par_LC_base["DiscreteShareBool"] = False
par_LC_base["vFuncBool"] = False
# Make grids go up to higher levels of resources
# (one of the calibration produces high levels of nNrm)
par_LC_base.update({"mNrmMax": 500, "nNrmMax":1000})
# %% A version with the tax code_folding=[0]
# Alternative calibrations
# Tax
par_LC_tax = copy(par_LC_base)
par_LC_tax["tau"] = 0.1
# Calvo
par_LC_calvo = copy(par_LC_base)
par_LC_calvo["AdjustPrb"] = 0.25
# Retirement
par_LC_retirement = copy(par_LC_base)
par_LC_retirement["AdjustPrb"] = [1.0] + [0.0]*39 + [1.0]*25
par_LC_retirement["tau"] = [0.0]*41 + [0.0]*24
par_LC_retirement["UnempPrb"] = 0.0
# %% [markdown]
# ## Solution
#
# With the parametrizations created, I now create and solve the agents.
# %% Create and solve agents with all the parametrizations
agents = {
"Base": RiskyContribConsumerType(**par_LC_base),
"Tax": RiskyContribConsumerType(**par_LC_tax),
"Calvo": RiskyContribConsumerType(**par_LC_calvo),
"Retirement": RiskyContribConsumerType(**par_LC_retirement),
}
for agent in agents:
print("Now solving " + agent)
t0 = time()
agents[agent].solve(verbose=True)
t1 = time()
print("Solving " + agent + " took " + str(t1 - t0) + " seconds.")
# %% [markdown]
# ## Policy function inspection
#
# Once agents have been solved, we can use their policy functions $\dFrac_t(m,n)$, $\Contr_t(\tilde{m},\tilde{n})$, and $c_t(\tilde{m},\tilde{n}, \Contr)$ for any period $t$.
# %%
# Example
calib = "Base"
t = 0
print(agents[calib].solution[t].stage_sols["Reb"].dfracFunc_Adj(1,1))
print(agents[calib].solution[t].stage_sols["Sha"].ShareFunc_Adj(1.2,0.8))
print(agents[calib].solution[t].stage_sols["Cns"].cFunc(1.2,0.8,0.5))
# %% [markdown]
# I now illustrate the policy functions of different calibrations for an arbitrary period graphically.
#
# Note that the solution algorithm represents the three simultaneous decisions that an agent can take as happening sequentially in ''stages'', in the order `Rebalancing` -> `Income deduction share` -> `Consumption`. See the document in this repository for more details.
# %% code_folding=[0]
# Setup
from HARK.utilities import (
determine_platform,
test_latex_installation,
setup_latex_env_notebook,
)
pf = determine_platform()
try:
latexExists = test_latex_installation(pf)
except ImportError: # windows and MacOS requires manual install
latexExists = False
setup_latex_env_notebook(pf, latexExists)
# General aesthetics
sns.set_style("whitegrid")
sns.set_context("paper", font_scale=1.5, rc={"lines.linewidth": 2.5})
# Parameters to feed to policy functions
t = 10
age = t + 24
mNrmGrid = np.linspace(0, 40, 100)
nNrm_vals = np.array([0.0, 20.0, 40])
Share_vals = np.array([0.0, 0.5])
# Evaluate policy functions
polfuncs = pol_funcs_dframe(agents, t, mNrmGrid, nNrm_vals, Share_vals)
# %% [markdown]
# ### Rebalancing
#
# The solution to this stage problem will be the policy function $d_t(\cdot, \cdot)$
# that gives the optimal flow from risk-free to risky assets, which can be negative.
# However, it is convenient to define a normalized policy function $\dFrac_t$ as
# \begin{equation*}
# \dFrac_t(m, n) = \begin{cases}
# d_t(m,n)/m, & \text{ if } d_t(m,n) \geq 0 \\
# d_t(m,n)/n, & \text{ if } d_t(m,n) < 0
# \end{cases}
# \end{equation*}
# so that $-1 \leq \dFrac(m,n) \leq 1$ for all $(m,n)$.
# %% code_folding=[0]
# Rebalancing fraction
polfuncs["$n$"] = polfuncs["n"]
g = sns.FacetGrid(
polfuncs[polfuncs.control == "dfrac"],
col="$n$",
hue="model",
height=3,
aspect=(7 / 3) * 1 / 3,
)
g.map(sns.lineplot, "m", "value", alpha=0.7)
g.add_legend(bbox_to_anchor=[0.5, 0.0], ncol=4, title="")
g.set_axis_labels("$m$", "Rebalancing fraction: $\dFrac$")
# %% [markdown]
# ### Income deduction share
# %% code_folding=[0]
# After rebalancing, m and n turn to their "tilde" versions. Create ntilde
# just for seaborn's grid labels.
polfuncs["$\\tilde{n}$"] = polfuncs["n"]
polfuncs["$\\Contr$"] = polfuncs["Share"]
# Share fraction
g = sns.FacetGrid(
polfuncs[polfuncs.control == "Share"],
col="$\\tilde{n}$",
hue="model",
height=3,
aspect=(7 / 3) * 1 / 3,
)
g.map(sns.lineplot, "m", "value", alpha=0.7)
g.add_legend(bbox_to_anchor=[0.5, 0.0], ncol=4, title="")
g.set_axis_labels("$\\tilde{m}$", r"Deduction Share: $\Contr$")
# %% [markdown]
# ### Consumption
# %% code_folding=[0]
# Consumption
g = sns.FacetGrid(
polfuncs[polfuncs.control == "c"],
col="$\\tilde{n}$",
row="$\\Contr$",
hue="model",
height=3,
aspect=(7 / 3) * 1 / 3,
)
g.map(sns.lineplot, "m", "value", alpha=0.7)
g.add_legend(bbox_to_anchor=[0.5, 0.0], ncol=4, title="")
g.set_axis_labels("$\\tilde{m}$", "Consumption: $c$")
# %% [markdown]
# ## Simulation and average life-cycle profiles
# %% [markdown]
# With the policy functions, it is easy to simulate populations of agents. I now simulate many agents for every calibration to obtain the average lifetime profiles of relevant variables like consumption, income, and wealth in its different components.
# %% Solve and simulate code_folding=[0]
# Simulation
n_agents = 100
t_sim = 200
profiles = []
for agent in agents:
agents[agent].AgentCount = n_agents
agents[agent].T_sim = t_sim
agents[agent].track_vars = ['pLvl','t_age','Adjust',
'mNrm','nNrm','mNrmTilde','nNrmTilde','aNrm',
'cNrm', 'Share', 'dfrac']
agents[agent].initialize_sim()
agents[agent].simulate()
profile = age_profiles(agents[agent])
profile['Model'] = agent
profiles.append(profile)
# %% Plot life-cycle means code_folding=[0]
# Plot
simdata = pd.concat(profiles)
# Latex names
simdata = simdata.rename(columns = {'pLvl': 'Perm. Income $P$',
'Mtilde': 'Risk-free Assets $\\tilde{M}$',
'Ntilde': 'Risky Assets $\\tilde{N}$',
'C': 'Consumption $C$',
'StockShare': 'Risky Share of Savings',
'Share': 'Deduct. Share $\\Contr$'})
lc_means = pd.melt(simdata,
id_vars = ['t_age', 'Model'],
value_vars = ['Perm. Income $P$',
'Risk-free Assets $\\tilde{M}$',
'Risky Assets $\\tilde{N}$',
'Consumption $C$',
'Risky Share of Savings','Deduct. Share $\\Contr$'])
lc_means['Age'] = lc_means['t_age'] + 24
# Drop the last year, as people's behavior is substantially different.
lc_means = lc_means[lc_means['Age']<max(lc_means['Age'])]
# General aesthetics
sns.set_style("whitegrid")
sns.set_context("paper", font_scale=1.5, rc={"lines.linewidth": 2.5})
g = sns.FacetGrid(
lc_means,
col="variable",
col_wrap = 3,
hue="Model",
height=3,
aspect=(7 / 3) * 1 / 3,
sharey=False
)
g.map(sns.lineplot, "Age", "value", alpha=0.7)
g.add_legend(bbox_to_anchor=[0.5, 0.0], ncol=4, title="")
g.set_axis_labels("Age", "")
g.set_titles(col_template = '{col_name}')
# %% [markdown]
# # References
#
# (<a id="cit-Carroll1997qje" href="#call-Carroll1997qje">Carroll, 1997</a>) <NAME>., ``_Buffer-Stock Saving and the Life Cycle/Permanent Income Hypothesis*_'', The Quarterly Journal of Economics, vol. 112, number 1, pp. 1-55, 02 1997. [online](https://doi.org/10.1162/003355397555109)
#
# (<a id="cit-Cagetti2003jbes" href="#call-Cagetti2003jbes">Cagetti, 2003</a>) <NAME>, ``_Wealth Accumulation Over the Life Cycle and Precautionary Savings_'', Journal of Business \& Economic Statistics, vol. 21, number 3, pp. 339-353, 2003. [online](https://doi.org/10.1198/073500103288619007
#
# )
#
# (<a id="cit-Cocco2005rfs" href="#call-Cocco2005rfs"><NAME> <em>et al.</em>, 2005</a>) <NAME>\~<NAME>., <NAME>. and <NAME>., ``_Consumption and Portfolio Choice over the Life Cycle_'', The Review of Financial Studies, vol. 18, number 2, pp. 491-533, 02 2005. [online](https://doi.org/10.1093/rfs/hhi017)
#
# (<a id="cit-Fagereng2017jof" href="#call-Fagereng2017jof">Fagereng, Gottlieb <em>et al.</em>, 2017</a>) <NAME>, <NAME> and <NAME>, ``_Asset Market Participation and Portfolio Choice over the
# Life-Cycle_'', The Journal of Finance, vol. 72, number 2, pp. 705-750, 2017. [online](https://onlinelibrary.wiley.com/doi/abs/10.1111/jofi.12484)
#
# (<a id="cit-Carroll1992bpea" href="#call-Carroll1992bpea">Carroll, 1992</a>) <NAME>., ``_The Buffer-Stock Theory of Saving: Some Macroeconomic Evidence_'', Brookings Papers on Economic Activity, vol. 1992, number 2, pp. 61--156, 1992. [online](http://www.jstor.org/stable/2534582)
#
# (<a id="cit-Carroll1997jme" href="#call-Carroll1997jme">Carroll and Samwick, 1997</a>) Carroll <NAME>. and <NAME>., ``_The nature of precautionary wealth_'', Journal of Monetary Economics, vol. 40, number 1, pp. 41-71, 1997. [online](https://www.sciencedirect.com/science/article/pii/S0304393297000366)
#
# (<a id="cit-Sabelhaus2010jme" href="#call-Sabelhaus2010jme">Sabelhaus and Song, 2010</a>) <NAME> and <NAME>, ``_The great moderation in micro labor earnings_'', Journal of Monetary Economics, vol. 57, number 4, pp. 391-403, 2010. [online](https://www.sciencedirect.com/science/article/pii/S0304393210000358)
#
# (<a id="cit-Giglio2021aer" href="#call-Giglio2021aer">Giglio, Maggiori <em>et al.</em>, 2021</a>) <NAME>, <NAME>, <NAME> <em>et al.</em>, ``_Five Facts about Beliefs and Portfolios_'', American Economic Review, vol. 111, number 5, pp. 1481-1522, May 2021. [online](https://www.aeaweb.org/articles?id=10.1257/aer.20200243)
#
# (<a id="cit-Luetticke2021aej_macro" href="#call-Luetticke2021aej_macro">Luetticke, 2021</a>) <NAME>, ``_Transmission of Monetary Policy with Heterogeneity in Household Portfolios_'', American Economic Journal: Macroeconomics, vol. 13, number 2, pp. 1-25, April 2021. [online](https://www.aeaweb.org/articles?id=10.1257/mac.20190064)
#
#
| StarcoderdataPython |
3380289 | <reponame>JakeGinnivan/pulumi-aws
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetSecretsResult:
"""
A collection of values returned by getSecrets.
"""
def __init__(__self__, id=None, plaintext=None, secrets=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
The provider-assigned unique ID for this managed resource.
"""
if plaintext and not isinstance(plaintext, dict):
raise TypeError("Expected argument 'plaintext' to be a dict")
__self__.plaintext = plaintext
"""
Map containing each `secret` `name` as the key with its decrypted plaintext value
"""
if secrets and not isinstance(secrets, list):
raise TypeError("Expected argument 'secrets' to be a list")
__self__.secrets = secrets
class AwaitableGetSecretsResult(GetSecretsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSecretsResult(
id=self.id,
plaintext=self.plaintext,
secrets=self.secrets)
def get_secrets(secrets=None,opts=None):
"""
Decrypt multiple secrets from data encrypted with the AWS KMS service.
:param list secrets: One or more encrypted payload definitions from the KMS service. See the Secret Definitions below.
The **secrets** object supports the following:
* `context` (`dict`) - An optional mapping that makes up the Encryption Context for the secret.
* `grantTokens` (`list`) - An optional list of Grant Tokens for the secret.
* `name` (`str`) - The name to export this secret under in the attributes.
* `payload` (`str`) - Base64 encoded payload, as returned from a KMS encrypt operation.
"""
__args__ = dict()
__args__['secrets'] = secrets
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:kms/getSecrets:getSecrets', __args__, opts=opts).value
return AwaitableGetSecretsResult(
id=__ret__.get('id'),
plaintext=__ret__.get('plaintext'),
secrets=__ret__.get('secrets'))
| StarcoderdataPython |
112989 | <gh_stars>0
import os
import sys
from numpy.lib import utils
sys.path.append(".")
import json
from http import HTTPStatus
from fastapi import FastAPI, Path
from fastapi.responses import RedirectResponse
from pydantic import BaseModel
from dogapp import dogconfig, predict, models, utils
app = FastAPI(
title="Dog Identification App",
description="",
version="1.0.0",
)
@utils.construct_response
@app.get("/")
async def _index():
response = {
"message": HTTPStatus.OK.phrase,
"status-code": HTTPStatus.OK,
"data": {},
}
dogconfig.logger.info(json.dumps(response, indent=2))
return response
class PredictPayload(BaseModel):
# experiment_id: str = "latest"
# inputs: list = [{"url": ""}]
urls: list = [""]
@utils.construct_response
@app.post("/predict")
async def _predict(payload: PredictPayload):
# Get run components for prediction
model = models.DogCNN()
model.summary(input_shape=(7, 7, 2048)) # build it
model_path = os.path.join(os.getcwd(), "embeddings/weights.best.Xception.hdf5")
model.load_weights(model_path)
prediction = []
for url in payload.urls:
try:
data = utils.loadImage(url)
prediction.append(predict.predict(url=url, data=data, model=model)[0])
response = {
"message": HTTPStatus.OK.phrase,
"status-code": HTTPStatus.OK,
"data": {"prediction": prediction},
}
dogconfig.logging.getLogger('infologger').info(json.dumps(response, indent=2))
except Exception as pe:
dogconfig.logging.getLogger('errorlogger').error("Error making predictions", exc_info=pe)
return response
| StarcoderdataPython |
19564 | <gh_stars>0
from netsuitesdk.internal.utils import PaginatedSearch
from .base import ApiBase
import logging
logger = logging.getLogger(__name__)
class CustomRecords(ApiBase):
def __init__(self, ns_client):
ApiBase.__init__(self, ns_client=ns_client, type_name='CustomRecordType')
def get_all_by_id(self, internalId):
cr_type = self.ns_client.CustomRecordSearchBasic(
recType=self.ns_client.CustomRecordType(
internalId=internalId
)
)
ps = PaginatedSearch(client=self.ns_client, type_name='CustomRecordType', search_record=cr_type, pageSize=20)
return list(self._paginated_search_to_generator(paginated_search=ps))
| StarcoderdataPython |
3350422 | <gh_stars>0
from datetime import datetime
from elasticsearch import Elasticsearch
class Searcher(object):
"""
Class for searching results using parameters
:param location: location string
:param start_date: start date calendar
:param end_date: end date calendar
:param k: total num of documents to retrieve
"""
def __init__(self, location, start_date='2020-12-21', end_date='2021-03-21', k=300):
self.k = k
self.start_date = start_date
self.end_date = end_date
self.index_name = "airbnb_" + location
self.num_hits = 0
self.es = Elasticsearch()
def get_all_docs(self):
""" Get a list of documents ordered by id """
res = self.es.search(
index=self.index_name,
body={
"size": self.k,
"query": {
"match_all": {}
}
}
)
self.num_hits = res['hits']['total']['value']
docs = res['hits']['hits']
return docs
def get_docs_ord_by_overall_rating(self):
""" Get a list of documents in descending order by overall rating """
res = self.es.search(
index=self.index_name,
body={"size": self.k, "sort" : [{ "overall_rating" : "desc" }, "_score"], "query": {"match_all": {}}}
)
self.num_hits = res['hits']['total']['value']
docs = res['hits']['hits']
return docs
def get_docs_by_availability30(self):
""" Get a list of documents by availability 30 """
res = self.es.search(index=self.index_name, body={
"size": self.k,
"sort" : [
{
"overall_rating" : "desc"
}, "_score"
],
"query": {
"bool": {
"filter": [
{ "range": {
"availability_30": {
"gt": 0
}
}
}
]}
}
})
self.num_hits = res['hits']['total']['value']
docs = res['hits']['hits']
return docs
if __name__ == "__main__":
s = Searcher(location='boston')
# Ranker 1. Return all documents and order by id
docs = s.get_all_docs()
hits = s.num_hits
print("Total num docs ordered by id: ", hits)
print("/n")
# Ranker 2. Return all documents and order descendingly by overall_rating, id
docs = s.get_docs_ord_by_overall_rating()
hits = s.num_hits
print("Total num docs ordered by overall_rating, id: ", hits)
print("/n")
# Ranker 3. Return all documents with availability > 0 and order descendingly by overall_rating, id
docs = s.get_docs_by_availability30()
hits = s.num_hits
print("Total num docs by availability30: ", hits)
| StarcoderdataPython |
3206035 | import unittest
from main import *
class StringsTests(unittest.TestCase):
def test_main(self):
self.assertIsInstance(fccSet, set)
self.assertEqual(len(fccSet), 14)
| StarcoderdataPython |
44915 | import datetime
import functools
import io
import os
import zipfile
import httpx
import pytest
from coverage_comment import coverage as coverage_module
from coverage_comment import github_client, settings
@pytest.fixture
def base_config():
def _(**kwargs):
defaults = {
# GitHub stuff
"GITHUB_TOKEN": "foo",
"GITHUB_PR_RUN_ID": 123,
"GITHUB_REPOSITORY": "ewjoachim/foobar",
# Action settings
"MERGE_COVERAGE_FILES": True,
"VERBOSE": False,
}
return settings.Config(**(defaults | kwargs))
return _
@pytest.fixture
def push_config(base_config):
def _(**kwargs):
defaults = {
# GitHub stuff
"GITHUB_BASE_REF": "",
"GITHUB_REF": "refs/heads/main",
"GITHUB_EVENT_NAME": "push",
}
return base_config(**(defaults | kwargs))
return _
@pytest.fixture
def pull_request_config(base_config):
def _(**kwargs):
defaults = {
# GitHub stuff
"GITHUB_BASE_REF": "main",
"GITHUB_REF": "refs/pull/2/merge",
"GITHUB_EVENT_NAME": "pull_request",
}
return base_config(**(defaults | kwargs))
return _
@pytest.fixture
def workflow_run_config(base_config):
def _(**kwargs):
defaults = {
# GitHub stuff
"GITHUB_BASE_REF": "",
"GITHUB_REF": "refs/heads/main",
"GITHUB_EVENT_NAME": "workflow_run",
}
return base_config(**(defaults | kwargs))
return _
@pytest.fixture
def coverage_json():
return {
"meta": {
"version": "1.2.3",
"timestamp": "2000-01-01T00:00:00",
"branch_coverage": True,
"show_contexts": False,
},
"files": {
"codebase/code.py": {
"executed_lines": [1, 2, 5, 6, 9],
"summary": {
"covered_lines": 5,
"num_statements": 6,
"percent_covered": 75.0,
"missing_lines": 1,
"excluded_lines": 0,
"num_branches": 2,
"num_partial_branches": 1,
"covered_branches": 1,
"missing_branches": 1,
},
"missing_lines": [7, 9],
"excluded_lines": [],
}
},
"totals": {
"covered_lines": 5,
"num_statements": 6,
"percent_covered": 75.0,
"missing_lines": 1,
"excluded_lines": 0,
"num_branches": 2,
"num_partial_branches": 1,
"covered_branches": 1,
"missing_branches": 1,
},
}
@pytest.fixture
def diff_coverage_json():
return {
"report_name": "XML",
"diff_name": "master...HEAD, staged and unstaged changes",
"src_stats": {
"codebase/code.py": {
"percent_covered": 80.0,
"violation_lines": [9],
"violations": [[9, None]],
}
},
"total_num_lines": 5,
"total_num_violations": 1,
"total_percent_covered": 80,
"num_changed_lines": 39,
}
@pytest.fixture
def coverage_obj():
return coverage_module.Coverage(
meta=coverage_module.CoverageMetadata(
version="1.2.3",
timestamp=datetime.datetime(2000, 1, 1),
branch_coverage=True,
show_contexts=False,
),
info=coverage_module.CoverageInfo(
covered_lines=5,
num_statements=6,
percent_covered=0.75,
missing_lines=1,
excluded_lines=0,
num_branches=2,
num_partial_branches=1,
covered_branches=1,
missing_branches=1,
),
files={
"codebase/code.py": coverage_module.FileCoverage(
path="codebase/code.py",
executed_lines=[1, 2, 5, 6, 9],
missing_lines=[7, 9],
excluded_lines=[],
info=coverage_module.CoverageInfo(
covered_lines=5,
num_statements=6,
percent_covered=0.75,
missing_lines=1,
excluded_lines=0,
num_branches=2,
num_partial_branches=1,
covered_branches=1,
missing_branches=1,
),
)
},
)
@pytest.fixture
def coverage_obj_no_branch():
return coverage_module.Coverage(
meta=coverage_module.CoverageMetadata(
version="1.2.3",
timestamp=datetime.datetime(2000, 1, 1),
branch_coverage=False,
show_contexts=False,
),
info=coverage_module.CoverageInfo(
covered_lines=5,
num_statements=6,
percent_covered=0.75,
missing_lines=1,
excluded_lines=0,
num_branches=None,
num_partial_branches=None,
covered_branches=None,
missing_branches=None,
),
files={
"codebase/code.py": coverage_module.FileCoverage(
path="codebase/code.py",
executed_lines=[1, 2, 5, 6, 9],
missing_lines=[7],
excluded_lines=[],
info=coverage_module.CoverageInfo(
covered_lines=5,
num_statements=6,
percent_covered=0.75,
missing_lines=1,
excluded_lines=0,
num_branches=None,
num_partial_branches=None,
covered_branches=None,
missing_branches=None,
),
)
},
)
@pytest.fixture
def diff_coverage_obj():
return coverage_module.DiffCoverage(
total_num_lines=5,
total_num_violations=1,
total_percent_covered=0.8,
num_changed_lines=39,
files={
"codebase/code.py": coverage_module.FileDiffCoverage(
path="codebase/code.py",
percent_covered=0.8,
violation_lines=[7, 9],
)
},
)
@pytest.fixture
def session():
"""
You get a session object. Register responses on it:
session.register(method="GET", path="/a/b")(status_code=200)
or
session.register(method="GET", path="/a/b", json=checker)(status_code=200)
(where checker is a function receiving the json value, and returning True if it
matches)
if session.request(method="GET", path="/a/b") is called, it will return a response
with status_code 200. Also, if not called by the end of the test, it will raise.
"""
class Session:
responses = [] # List[Tuples[request kwargs, response kwargs]]
def request(self, method, path, **kwargs):
request_kwargs = {"method": method, "path": path} | kwargs
for i, (match_kwargs, response_kwargs) in enumerate(self.responses):
match = True
for key, match_value in match_kwargs.items():
if key not in request_kwargs:
match = False
break
request_value = request_kwargs[key]
if hasattr(match_value, "__call__"):
try:
assert match_value(request_value)
except Exception:
match = False
break
else:
if not match_value == request_value:
match = False
break
if match:
self.responses.pop(i)
return httpx.Response(
**response_kwargs,
request=httpx.Request(method=method, url=path),
)
assert (
False
), f"No response found for kwargs {request_kwargs}\nExpected answers are {self.responses}"
def __getattr__(self, value):
if value in ["get", "post", "patch", "delete", "put"]:
return functools.partial(self.request, value.upper())
raise AttributeError(value)
def register(self, method, path, **request_kwargs):
request_kwargs = {"method": method, "path": path} | request_kwargs
def _(**response_kwargs):
response_kwargs.setdefault("status_code", 200)
self.responses.append((request_kwargs, response_kwargs))
return _
session = Session()
yield session
assert not session.responses
@pytest.fixture
def gh(session):
return github_client.GitHub(session=session)
@pytest.fixture
def get_logs(caplog):
caplog.set_level("DEBUG")
def get_logs(level=None, match=None):
return [
log.message
for log in caplog.records
if (level is None or level == log.levelname)
and (match is None or match in log.message)
]
return get_logs
@pytest.fixture
def in_tmp_path(tmp_path):
curdir = os.getcwd()
os.chdir(tmp_path)
yield tmp_path
os.chdir(curdir)
@pytest.fixture
def zip_bytes():
def _(filename, content):
file = io.BytesIO()
with zipfile.ZipFile(file, mode="w") as zipf:
with zipf.open(filename, "w") as subfile:
subfile.write(content.encode("utf-8"))
zip_bytes = file.getvalue()
assert zip_bytes.startswith(b"PK")
return zip_bytes
return _
| StarcoderdataPython |
139971 | from django.db import models
'''
activity
datetime
user (FK)
type
'''
class Log(models.Model):
activity = models.CharField(max_length=50, null=False, default='')
datetime = models.DateTimeField(auto_now=True)
user = models.ForeignKey(
to='users.User'
,on_delete=models.CASCADE
)
class Meta:
verbose_name = 'Log'
verbose_name_plural = 'Logs'
| StarcoderdataPython |
5802 | <filename>orrinjelo/aoc2021/day_11.py
from orrinjelo.utils.decorators import timeit
import numpy as np
def parse(lines):
return np.array([[int(c) for c in line.strip()] for line in lines])
visited = []
def flash(a, x, y):
global visited
if (x,y) in visited:
return
for dx in range(-1,2):
for dy in range(-1,2):
if dx == 0 and dy == 0:
continue
if x+dx < 0 or x+dx >= a.shape[0]:
continue
if y+dy < 0 or y+dy >= a.shape[1]:
continue
a[x+dx, y+dy] += 1
visited.append((x,y))
if a[x+dx, y+dy] > 9:
flash(a, x+dx, y+dy)
def progress(a):
global visited
a += 1
x,y = np.where(a > 9)
visited = []
for i in range(len(x)):
flash(a,x[i],y[i])
count = np.sum(a > 9)
# print('a:\n', a)
a[a > 9] = 0
return a, count
@timeit("Day 11 Part 1")
def part1(input_str, use_rust=False):
octomap = parse(input_str)
total_count = 0
for i in range(100):
octomap, count = progress(octomap)
total_count += count
return total_count
@timeit("Day 11 Part 2")
def part2(input_str, use_rust=False):
octomap = parse(input_str)
step = 0
while True:
step += 1
octomap, count = progress(octomap)
if count == octomap.shape[0]*octomap.shape[1]:
break
return step
# = Test ================================================
inputlist = [
'5483143223',
'2745854711',
'5264556173',
'6141336146',
'6357385478',
'4167524645',
'2176841721',
'6882881134',
'4846848554',
'5283751526',
]
def test_part1():
# import matplotlib.pyplot as plt
# plt.imshow(parse(inputlist))
# plt.show()
assert part1(inputlist) == 1656
def test_part2():
assert part2(inputlist) == 195
import pygame
import sys
def plot(input_str):
# octomap = parse(input_str)
octomap = np.random.randint(0,9,(100,100))
pygame.init()
clock = pygame.time.Clock()
scale = 5
screen = pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale))
surface = pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale))
frame = 0
history = []
for i in range(500):
print('Generating frame #', i)
octomap, _ = progress(octomap)
history.append(np.copy(octomap))
input()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit(); sys.exit();
# erase the screen
screen.fill((255,0,0))
try:
octomap = history[frame]
except:
frame = 0
for i in range(octomap.shape[0]):
for j in range(octomap.shape[1]):
if octomap[i,j] == 0:
brightness = 255
else:
brightness = int(255.0 * octomap[i,j]/10.0)
print(i*scale, j*scale, brightness)
pygame.draw.rect(
screen,
(brightness,brightness,brightness),
pygame.Rect(i*scale, j*scale, scale, scale)
)
pygame.display.update()
# surface.blit(screen, (0,0))
clock.tick(30)
frame += 1 | StarcoderdataPython |
3234055 | <reponame>MIklgr500/bowl
TRAIN_PATH = 'input/train'
TEST_PATH = 'input/test2'
FILE_PATH = 'output/'
RANDOM_STATE = 31
IMG_HEIGHT = 128
IMG_WIDTH = 128
IMG_CHAN = 3
MERG_RATION = 1
NU = 1.
MU = 0.
BATCH_SIZE = 32
EPOCHS = 20
| StarcoderdataPython |
71183 |
################################ A Library of Functions ##################################
##################################################################################################
#simple function which displays a matrix
def matrixDisplay(M):
for i in range(len(M)):
for j in range(len(M[i])):
print((M[i][j]), end = " ")
print()
##################################################################################################
#matrix product
def matrixProduct(L, M):
if len(L[0]) != len(M): #ensuring the plausiblity
print("Matrix multiplication not possible.")
else:
print("Multiplying the two matrices: ")
P=[[0 for i in range(len(M[0]))] for j in range(len(L))] #initializing empty matrix
for i in range(len(L)): #iterating rows
for j in range(len(M[0])): #iterating columns
for k in range(len(M)): #iterating elements and substituing them
P[i][j] = P[i][j] + (L[i][k] * M[k][j])
matrixDisplay(P)
##################################################################################################
#the gauss-jordan elimination code
def gaussj(a, b):
n = len(b) #defining the range through which the loops will run
for k in range(n): #loop to index pivot rows and eliminated columns
#partial pivoting
if abs(a[k][k]) < 1.0e-12:
for i in range(k+1, n):
if abs(a[i][k]) > abs(a[k][k]):
for j in range(k, n):
a[k][j], a[i][j] = a[i][j], a[k][j] #swapping of rows
b[k], b[i] = b[i], b[k]
break
#division of the pivot row
pivot = a[k][k]
if pivot == 0:
print("There is no unique solution to this system of equations.")
return
for j in range(k, n): #index of columns of the pivot row
a[k][j] /= pivot
b[k] /= pivot
#elimination loop
for i in range(n): #index of subtracted rows
if i == k or a[i][k] == 0: continue
factor = a[i][k]
for j in range(k, n): #index of columns for subtraction
a[i][j] -= factor * a[k][j]
b[i] -= factor * b[k]
print(b)
#################################################################################################
#calculation of determinant using gauss-jordan elimination
def determinant(a):
n = len(a) #defining the range through which the loops will run
if n != len(a[0]): #checking if determinant is possible to calculate
print("The matrix must be a square matrix.")
else:
s = 0
#code to obtain row echelon matrix using partial pivoting
for k in range(n-1):
if abs(a[k][k]) < 1.0e-12:
for i in range(k+1, n):
if abs(a[i][k]) > abs(a[k][k]):
for j in range(k, n):
a[k][j], a[i][j] = a[i][j], a[k][j] #swapping
s = s + 1 #counting the number of swaps happened
for i in range(k+1, n):
if a[i][k] == 0: continue
factor = a[i][k]/a[k][k]
for j in range(k, n):
a[i][j] = a[i][j] - factor * a[k][j]
d = 1
for i in range(len(a)):
d = d * a[i][i] #enlisting the diagonal elements
d = d*(-1)**s
print(d)
#################################################################################################
#calculating inverse
def inverse(a):
n = len(a) #defining the range through which loops will run
#constructing the n X 2n augmented matrix
P = [[0.0 for i in range(len(a))] for j in range(len(a))]
for i in range(3):
for j in range(3):
P[j][j] = 1.0
for i in range(len(a)):
a[i].extend(P[i])
#main loop for gaussian elimination begins here
for k in range(n):
if abs(a[k][k]) < 1.0e-12:
for i in range(k+1, n):
if abs(a[i][k]) > abs(a[k][k]):
for j in range(k, 2*n):
a[k][j], a[i][j] = a[i][j], a[k][j] #swapping of rows
break
pivot = a[k][k] #defining the pivot
if pivot == 0: #checking if matrix is invertible
print("This matrix is not invertible.")
return
else:
for j in range(k, 2*n): #index of columns of the pivot row
a[k][j] /= pivot
for i in range(n): #index the subtracted rows
if i == k or a[i][k] == 0: continue
factor = a[i][k]
for j in range(k, 2*n): #index the columns for subtraction
a[i][j] -= factor * a[k][j]
for i in range(len(a)): #displaying the matrix
for j in range(n, len(a[0])):
print("{:.2f}".format(a[i][j]), end = " ") #printing upto 2 places in decimal.
print()
| StarcoderdataPython |
1713656 |
import rospy
class BaseDataModule:
def __init__(self):
pass
def get(self):
raise NotImplementedError
def get_time(self):
return rospy.Time.now()
class LocalDataModule(BaseDataModule):
def __init__(self, rospy,
msg_topic, msg_type):
self.msg_topic = msg_topic
self.msg_type = msg_type
self.data = self.msg_type()
self.sub = rospy.Subscriber(self.msg_topic, self.msg_type, self.data_cb)
self.last_time = self.get_time()
def data_cb(self, msg):
self.data = msg
self.last_time = self.get_time()
def get(self):
return self.data, (self.get_time() - self.last_time).to_sec()
class RemoteDataModule(BaseDataModule):
def __init__(self, rospy, srv_topic, srv_type, extract_fn=lambda msg: msg):
self.srv_topic = srv_topic
self.srv_type = srv_type
self.extract_fn = extract_fn
print("Connecting to %s..." % self.srv_topic)
rospy.wait_for_service(self.srv_topic)
self.proxy = rospy.ServiceProxy(self.srv_topic, self.srv_type)
print("Connected")
def get(self):
t1 = self.get_time()
ret = self.proxy()
t2 = self.get_time()
return self.extract_fn(ret), (t2 - t1).to_sec()
| StarcoderdataPython |
98999 | <gh_stars>0
import platform
from dataclasses import dataclass
from modulefinder import ModuleFinder
from pathlib import Path
from pkgutil import ModuleInfo
from types import ModuleType
from unittest import mock
from cucumber_tag_expressions import parse
from tests.utilities import make_project
from ward import fixture, test
from ward._collect import (
PackageData,
_build_package_data,
_get_module_path,
_handled_within,
_is_excluded_module,
_remove_excluded_paths,
filter_fixtures,
filter_tests,
is_test_module,
)
from ward.fixtures import Fixture
from ward.testing import Test, each, skip
def named():
assert "fox" == "fox"
@fixture
def named_test():
return Test(fn=named, module_name="my_module")
@fixture
def tests_to_search(named_test=named_test):
return [named_test]
@test("__filter_tests__ matches on qualified test name")
def _(tests=tests_to_search, named=named_test):
results = filter_tests(tests, query="my_module.named")
assert list(results) == [named]
@test("filter_tests matches on test name alone")
def _(tests=tests_to_search, named=named_test):
results = filter_tests(tests, query="named")
assert list(results) == [named]
@test("filter_tests `query='fox'` returns tests with `'fox'` in the body")
def _(tests=tests_to_search, named=named_test):
results = filter_tests(tests, query="fox")
assert list(results) == [named]
@test("filter_tests returns an empty list when no tests match query")
def _(tests=tests_to_search):
assert [] == filter_tests(tests, query="92qj3f9i")
@test("filter_tests when tags match simple tag expression")
def _():
apples = Test(fn=named, module_name="", tags=["apples"])
bananas = Test(fn=named, module_name="", tags=["bananas"])
results = list(filter_tests([apples, bananas], tag_expr=parse("apples")))
assert results == [apples]
@test("filter_tests when tags match complex tag expression")
def _():
one = Test(fn=named, module_name="", tags=["apples", "bananas"])
two = Test(fn=named, module_name="", tags=["bananas", "carrots"])
three = Test(fn=named, module_name="", tags=["bananas"])
tag_expr = parse("apples or bananas and not carrots")
results = list(filter_tests([one, two, three], tag_expr=tag_expr))
assert results == [one, three]
@test("filter_tests when both query and tag expression match a test")
def _():
one = Test(fn=named, module_name="one", tags=["apples"])
two = Test(fn=named, module_name="two", tags=["apples"])
tag_expr = parse("apples")
results = list(filter_tests([one, two], query="two", tag_expr=tag_expr))
# Both tests match the tag expression, but only two matches the search query
# because the query matches the module name for the test.
assert results == [two]
@test("filter_tests when a test is defined with an empty tag list doesnt match")
def _():
t = Test(fn=named, module_name="", tags=[])
tag_expr = parse("apples")
results = list(filter_tests([t], tag_expr=tag_expr))
assert results == []
@test("filter_tests matches all tags when a tag expression is an empty string")
def _():
t = Test(fn=named, module_name="", tags=["apples"])
tag_expr = parse("")
results = list(filter_tests([t], tag_expr=tag_expr))
assert results == [t]
@test("filter_tests returns [] when the tag expression matches no tests")
def _():
one = Test(fn=named, module_name="one", tags=["apples"])
two = Test(fn=named, module_name="two", tags=["bananas"])
tag_expr = parse("carrots")
results = list(filter_tests([one, two], tag_expr=tag_expr))
assert results == []
@fixture
def named_fixture():
pass
@fixture
def marker_fixture():
return "marker"
@test("filter_fixtures on empty list returns empty list")
def _():
assert list(filter_fixtures([])) == []
@test("filter_fixtures matches anything with empty query and paths")
def _():
fixtures = [Fixture(f) for f in [named_fixture, marker_fixture]]
assert list(filter_fixtures(fixtures)) == fixtures
@test("filter_fixtures matches 'named_fixture' by name query {query!r}")
def _(query=each("named_fixture", "named", "fixture", "med_fix")):
fixtures = [Fixture(f) for f in [named_fixture]]
assert list(filter_fixtures(fixtures, query=query)) == fixtures
@test("filter_fixtures matches 'named_fixture' by module name query on {query!r}")
def _(query=each("test", "test_collect", "collect", "t_coll")):
fixtures = [Fixture(f) for f in [named_fixture]]
assert list(filter_fixtures(fixtures, query=query)) == fixtures
@test("filter_fixtures matches fixture by source query on {query!r}")
def _(query=each("marker", "mark", "ret", "return", '"')):
fixtures = [Fixture(f) for f in [named_fixture, marker_fixture]]
assert list(filter_fixtures(fixtures, query=query)) == [Fixture(marker_fixture)]
@test("filter_fixtures excludes fixtures when querying for {query!r}")
def _(query=each("echo", "foobar", "wizbang")):
fixtures = [Fixture(f) for f in [named_fixture, marker_fixture]]
assert list(filter_fixtures(fixtures, query=query)) == []
THIS_FILE = Path(__file__)
@test("filter_fixtures matches fixture by path on {path}")
def _(path=each(THIS_FILE, THIS_FILE.parent, THIS_FILE.parent.parent)):
fixtures = [Fixture(f) for f in [named_fixture]]
assert list(filter_fixtures(fixtures, paths=[path])) == fixtures
@test("filter_fixtures excludes by path on {path}")
def _(path=each(THIS_FILE.parent / "the-fixture-is-not-in-this-file.py")):
fixtures = [Fixture(f) for f in [named_fixture]]
assert list(filter_fixtures(fixtures, paths=[path])) == []
@test("is_test_module(<module: '{module_name}'>) returns {rv}")
def _(module_name=each("test_apples", "apples"), rv=each(True, False)):
module = ModuleInfo(ModuleFinder(), module_name, False)
assert is_test_module(module) == rv
PATH = Path("path/to/test_mod.py")
class StubModuleFinder:
def find_module(self, module_name: str):
return StubSourceFileLoader()
@dataclass
class StubSourceFileLoader:
path: str = PATH
@fixture
def test_module():
return ModuleInfo(StubModuleFinder(), PATH.stem, False)
@test("get_module_path returns the path of the module")
def _(mod=test_module):
assert _get_module_path(mod) == PATH
@test("is_excluded_module({mod.name}) is True for {excludes}")
def _(
mod=test_module,
excludes=str(PATH),
):
assert _is_excluded_module(mod, [excludes])
@test("is_excluded_module({mod.name}) is False for {excludes}")
def _(mod=test_module, excludes=each("abc", "/path/to", "/path")):
assert not _is_excluded_module(mod, exclusions=[excludes])
@fixture
def paths_to_py_files():
return [
Path("/a/b/c.py"),
Path("/a/b/d/e.py"),
Path("/a/b/d/f/g/h.py"),
]
for path_to_exclude in ["/a", "/a/", "/a/b", "/a/b/"]:
@test("remove_excluded_paths removes {exclude} from list of paths")
def _(exclude=path_to_exclude, paths=paths_to_py_files):
assert _remove_excluded_paths(paths, [exclude]) == []
@test(
"remove_excluded_paths removes correct files when exclusions relative to each other"
)
def _(paths=paths_to_py_files):
assert _remove_excluded_paths(paths, ["/a/b/d", "/a/b/d/", "/a/b/d/f"]) == [
Path("/a/b/c.py")
]
@test("remove_excluded_paths removes individually specified files")
def _(paths=paths_to_py_files):
assert _remove_excluded_paths(paths, ["/a/b/d/e.py", "/a/b/d/f/g/h.py"]) == [
Path("/a/b/c.py")
]
@test("remove_excluded_paths can remove mixture of files and dirs")
def _(paths=paths_to_py_files):
assert _remove_excluded_paths(paths, ["/a/b/d/e.py", "/a/b/d/f/g/"]) == [
Path("/a/b/c.py")
]
@fixture
def project():
yield from make_project("module.py")
@test("handled_within({mod}, {search}) is True")
def _(
root: Path = project, search=each("", "/", "a", "a/b", "a/b/c"), mod="a/b/c/d/e.py"
):
module_path = root / mod
assert _handled_within(module_path, [root / search])
@test("handled_within({mod}, {search}) is False")
def _(
root: Path = project,
search=each("x/y/z", "test_a.py", "a/b.py", "a/b/c/d/e.py"),
mod="a/b/c/d/e.py",
):
module_path = root / mod
assert not _handled_within(module_path, [root / search])
@skip("Skipped on Windows", when=platform.system() == "Windows")
@test("_build_package_data constructs correct package data")
def _():
from ward._collect import Path as ImportedPath
m = ModuleType(name="")
m.__file__ = "/foo/bar/baz/test_something.py"
patch_is_dir = mock.patch.object(ImportedPath, "is_dir", return_value=True)
# The intention of the side_effects below is to make `baz` and `bar` directories
# contain __init__.py files. It's not clean, but it does test the behaviour well.
patch_exists = mock.patch.object(
ImportedPath, "exists", side_effect=[True, True, False]
)
with patch_is_dir, patch_exists:
assert _build_package_data(m) == PackageData(
pkg_name="bar.baz", pkg_root=Path("/foo")
)
@skip("Skipped on Unix", when=platform.system() != "Windows")
@test("_build_package_data constructs package name '{pkg}' from '{path}'")
def _():
from ward._collect import Path as ImportedPath
m = ModuleType(name="")
m.__file__ = "\\foo\\bar\\baz\\test_something.py"
patch_is_dir = mock.patch.object(ImportedPath, "is_dir", return_value=True)
patch_exists = mock.patch.object(
ImportedPath, "exists", side_effect=[True, True, False]
)
with patch_is_dir, patch_exists:
pkg_data = _build_package_data(m)
assert pkg_data.pkg_name == "bar.baz"
assert str(pkg_data.pkg_root).endswith("foo")
| StarcoderdataPython |
5171 | from xagents import a2c, acer, ddpg, dqn, ppo, td3, trpo
from xagents.a2c.agent import A2C
from xagents.acer.agent import ACER
from xagents.base import OffPolicy
from xagents.ddpg.agent import DDPG
from xagents.dqn.agent import DQN
from xagents.ppo.agent import PPO
from xagents.td3.agent import TD3
from xagents.trpo.agent import TRPO
from xagents.utils.cli import play_args, train_args, tune_args
from xagents.utils.common import register_models
__author__ = 'schissmantics'
__email__ = '<EMAIL>'
__license__ = 'MIT'
__version__ = '1.0.1'
agents = {
'a2c': {'module': a2c, 'agent': A2C},
'acer': {'module': acer, 'agent': ACER},
'dqn': {'module': dqn, 'agent': DQN},
'ppo': {'module': ppo, 'agent': PPO},
'td3': {'module': td3, 'agent': TD3},
'trpo': {'module': trpo, 'agent': TRPO},
'ddpg': {'module': ddpg, 'agent': DDPG},
}
register_models(agents)
commands = {
'train': (train_args, 'fit', 'Train given an agent and environment'),
'play': (
play_args,
'play',
'Play a game given a trained agent and environment',
),
'tune': (
tune_args,
'',
'Tune hyperparameters given an agent, hyperparameter specs, and environment',
),
}
| StarcoderdataPython |
3354632 | import numpy as np
import theano
import theano.tensor as T
try:
import cPickle as pickle
except:
import pickle
import sys
#list for converting index to character
dic = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',\
'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',\
'1','2','3','4','5','6','7','8','9','0','-','.',',','!','?','(',')','\'','"',' ']
#load preprocessed numpy array
text = np.load('hhgttg.npy')
#characters to process per iteration
batch = 160
#increase recursion limit for lstm
sys.setrecursionlimit(10000)
#lstm class
class lstm(object):
def __init__(self):
#nn architecture
self.input = T.matrix()
self.Wi = theano.shared(self.ortho_weight(256)[:72,:])
self.Wf = theano.shared(self.ortho_weight(256)[:72,:])
self.Wc = theano.shared(self.ortho_weight(256)[:72,:])
self.Wo = theano.shared(self.ortho_weight(256)[:72,:])
self.Ui = theano.shared(self.ortho_weight(256))
self.Uf = theano.shared(self.ortho_weight(256))
self.Uc = theano.shared(self.ortho_weight(256))
self.Uo = theano.shared(self.ortho_weight(256))
self.bi = theano.shared(np.asarray(np.random.uniform(low=-0.1,high=0.1,size=(256)),dtype=theano.config.floatX))
self.bf = theano.shared(np.asarray(np.random.uniform(low=-0.1,high=0.1,size=(256)),dtype=theano.config.floatX))
self.bc = theano.shared(np.asarray(np.random.uniform(low=-0.1,high=0.1,size=(256)),dtype=theano.config.floatX))
self.bo = theano.shared(np.asarray(np.random.uniform(low=-0.1,high=0.1,size=(256)),dtype=theano.config.floatX))
self.C0 = theano.shared(np.asarray(np.random.uniform(low=-0.1,high=0.1,size=(256)),dtype=theano.config.floatX))
self.h0 = theano.shared(np.asarray(np.random.uniform(low=-0.1,high=0.1,size=(256)),dtype=theano.config.floatX))
self.W2 = theano.shared(self.ortho_weight(256)[:,:72])
self.b2 = theano.shared(np.asarray(np.random.uniform(low=-0.1,high=0.1,size=(72)),dtype=theano.config.floatX))
self.target = T.matrix()
#nn functions
self.params = [self.Wi,self.Wf,self.Wc,self.Wo,self.Ui,self.Uf,self.Uc,self.Uo,self.bi,self.bf,self.bc,self.bo,self.h0,self.C0,self.W2,self.b2]
[self.c,self.h_output],_ = theano.scan(fn=self.step,sequences=self.input,outputs_info=[self.C0,self.h0],non_sequences=self.params[:-4])
self.output = T.nnet.softmax(T.dot(self.h_output,self.W2)+self.b2)[40:,:]
self.cost = T.nnet.categorical_crossentropy(self.output,self.target).mean()
self.updates = self.adam(self.cost,self.params)
self.train = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True)
self.predict = theano.function([self.input],self.output,allow_input_downcast=True)
def step(self,input,h0,C0,Wi,Wf,Wc,Wo,Ui,Uf,Uc,Uo,bi,bf,bc,bo):
'''
lstm memory cell functions
'''
i = T.nnet.sigmoid(T.dot(input,Wi)+T.dot(h0,Ui)+bi)
cand = T.tanh(T.dot(input,Wc)+T.dot(h0,Uc)+bc)
f = T.nnet.sigmoid(T.dot(input,Wf)+T.dot(h0,Uf)+bf)
c = cand*i+C0*f
o = T.nnet.sigmoid(T.dot(input,Wo)+T.dot(h0,Uo)+bo)
h = o*T.tanh(c)
return c,h
def ortho_weight(self,ndim):
'''
orthogonal weight initialiation
'''
bound = np.sqrt(1./ndim)
W = np.random.randn(ndim, ndim)*bound
u, s, v = np.linalg.svd(W)
return u.astype(theano.config.floatX)
def adam(self, cost, params, lr=0.0002, b1=0.1, b2=0.01, e=1e-8):
'''
adam gradient descent updates
'''
updates = []
grads = T.grad(cost, params)
self.i = theano.shared(np.float32(0.))
i_t = self.i + 1.
fix1 = 1. - (1. - b1)**i_t
fix2 = 1. - (1. - b2)**i_t
lr_t = lr * (T.sqrt(fix2) / fix1)
for p, g in zip(params, grads):
self.m = theano.shared(p.get_value() * 0.)
self.v = theano.shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * self.m)
v_t = (b2 * T.sqr(g)) + ((1. - b2) * self.v)
g_t = m_t / (T.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((self.m, m_t))
updates.append((self.v, v_t))
updates.append((p, p_t))
updates.append((self.i, i_t))
return updates
#open previous lowest training cost if it exists
try:
with open("nn_cost.dat","rb") as c:
min_cost = float(c.readline())
except:
min_cost = 1000
#open previous saved lstm if it exists
try:
with open("nn.dat","rb") as pickle_nn:
NN = pickle.load(pickle_nn)
print "saved neural network loaded"
except:
print "pickle load failed, creating new NN"
NN = lstm()
with open("nn.dat","wb") as f:
pickle.dump(NN,f,pickle.HIGHEST_PROTOCOL)
#save all generated text to file for archiving
generated = open('generated.txt','w')
#train lstm
for i in range(1000000):
#select random starting point in text
start = np.random.randint(40,text.shape[0]-batch-1)
X_train = text[start-40:start+batch,:]
y_train = text[start+1:start+1+batch,:]
cost = NN.train(X_train,y_train)
print "step %i training error:" % (i+1), cost
#try generating text every 500 iterations
if (i+1) % 500 == 0:
string = ""
start = np.random.randint(text.shape[0]-41)
X_test = text[start:start+41,:]
for j in range(40):
row = X_test[j,:]
max = np.argmax(row)
string += dic[max]
out = NN.predict(X_test)
for j in range(160):
max = np.argmax(out)
string += dic[max]
next = np.zeros((1,72))
next[0,max] = 1
X_test = np.vstack((X_test[1:,:],next))
out = NN.predict(X_test)
print string
generated.write((str(i+1)+": "+string+"\n"))
#checkpoint
#save lstm if current checkpoint cost is lower than previous checkpoint cost
#else load previous saved lstm
if cost < min_cost:
print "pickling neural network"
min_cost = cost
with open("nn_cost.dat","wb") as c:
c.write(str(min_cost))
with open("nn.dat","wb") as f:
pickle.dump(NN,f,pickle.HIGHEST_PROTOCOL)
elif cost < min_cost+0.2:
print "pickling neural network"
with open("nn_cost.dat","wb") as c:
c.write(str(min_cost))
with open("nn.dat","wb") as f:
pickle.dump(NN,f,pickle.HIGHEST_PROTOCOL)
elif cost > min_cost+0.5:
print "reloading last good NN"
with open("nn.dat","rb") as pickle_nn:
NN = pickle.load(pickle_nn)
generated.close() | StarcoderdataPython |
68105 | <gh_stars>10-100
#!/usr/bin/python
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Script that uses either test app or qemu controlled by python-pexpect
import sys, autotest_data, autotest_runner
def usage():
print"Usage: autotest.py [test app|test iso image]",
print "[target] [whitelist|-blacklist]"
if len(sys.argv) < 3:
usage()
sys.exit(1)
target = sys.argv[2]
test_whitelist=None
test_blacklist=None
# get blacklist/whitelist
if len(sys.argv) > 3:
testlist = sys.argv[3].split(',')
testlist = [test.lower() for test in testlist]
if testlist[0].startswith('-'):
testlist[0] = testlist[0].lstrip('-')
test_blacklist = testlist
else:
test_whitelist = testlist
cmdline = "%s -c f -n 4"%(sys.argv[1])
print cmdline
runner = autotest_runner.AutotestRunner(cmdline, target, test_blacklist, test_whitelist)
for test_group in autotest_data.parallel_test_group_list:
runner.add_parallel_test_group(test_group)
for test_group in autotest_data.non_parallel_test_group_list:
runner.add_non_parallel_test_group(test_group)
num_fails = runner.run_all_tests()
sys.exit(num_fails)
| StarcoderdataPython |
3215735 | <reponame>rekhabiswal/sage<gh_stars>0
"""
Root system data for dual Cartan types
"""
#*****************************************************************************
# Copyright (C) 2008-2009 <NAME> <anne at math.ucdavis.edu>
# Copyright (C) 2008-2013 <NAME> <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from __future__ import absolute_import
from sage.misc.misc import attrcall
from sage.misc.cachefunc import cached_method
from sage.misc.lazy_attribute import lazy_attribute
from sage.combinat.root_system import cartan_type
from sage.combinat.root_system.root_lattice_realizations import RootLatticeRealizations
from sage.combinat.root_system import ambient_space
class CartanType(cartan_type.CartanType_decorator, cartan_type.CartanType_crystallographic):
r"""
A class for dual Cartan types.
The dual of a (crystallographic) Cartan type is a Cartan type with
the same index set, but all arrows reversed in the Dynkin diagram
(otherwise said, the Cartan matrix is transposed). It shares a lot
of properties in common with its dual. In particular, the Weyl
group is isomorphic to that of the dual as a Coxeter group.
EXAMPLES:
For all finite Cartan types, and in particular the simply laced
ones, the dual Cartan type is given by another preexisting Cartan
type::
sage: CartanType(['A',4]).dual()
['A', 4]
sage: CartanType(['B',4]).dual()
['C', 4]
sage: CartanType(['C',4]).dual()
['B', 4]
sage: CartanType(['F',4]).dual()
['F', 4] relabelled by {1: 4, 2: 3, 3: 2, 4: 1}
So to exercise this class we consider some non simply laced affine
Cartan types and also create explicitely `F_4^*` as a dual cartan
type::
sage: from sage.combinat.root_system.type_dual import CartanType as CartanTypeDual
sage: F4d = CartanTypeDual(CartanType(['F',4])); F4d
['F', 4]^*
sage: G21d = CartanType(['G',2,1]).dual(); G21d
['G', 2, 1]^*
They share many properties with their original Cartan types::
sage: F4d.is_irreducible()
True
sage: F4d.is_crystallographic()
True
sage: F4d.is_simply_laced()
False
sage: F4d.is_finite()
True
sage: G21d.is_finite()
False
sage: F4d.is_affine()
False
sage: G21d.is_affine()
True
TESTS::
sage: TestSuite(F4d).run(skip=["_test_pickling"])
sage: TestSuite(G21d).run()
.. NOTE:: F4d is pickled by construction as F4.dual() hence the above failure.
"""
def __init__(self, type):
"""
INPUT:
- ``type`` -- a Cartan type
EXAMPLES::
sage: ct = CartanType(['F',4,1]).dual()
sage: TestSuite(ct).run()
TESTS::
sage: ct1 = CartanType(['B',3,1]).dual()
sage: ct2 = CartanType(['B',3,1]).dual()
sage: ct3 = CartanType(['D',4,1]).dual()
sage: ct1 == ct2
True
sage: ct1 == ct3
False
Test that the produced Cartan type is in the appropriate
abstract classes (see :trac:`13724`)::
sage: from sage.combinat.root_system import cartan_type
sage: ct = CartanType(['B',3,1]).dual()
sage: TestSuite(ct).run()
sage: isinstance(ct, cartan_type.CartanType_simple)
True
sage: isinstance(ct, cartan_type.CartanType_finite)
False
sage: isinstance(ct, cartan_type.CartanType_affine)
True
sage: isinstance(ct, cartan_type.CartanType_crystallographic)
True
sage: isinstance(ct, cartan_type.CartanType_simply_laced)
False
By default, the dual of a reducible and finite type is not
constructed as such::
sage: ct = CartanType([['B',4],['A',2]]).dual(); ct
C4xA2
In order to exercise the dual infrastructure we force the
construction as a dual::
sage: from sage.combinat.root_system import type_dual
sage: ct = type_dual.CartanType(CartanType([['B',4],['A',2]])); ct
B4xA2^*
sage: isinstance(ct, type_dual.CartanType)
True
sage: TestSuite(ct).run(skip=["_test_pickling"])
sage: isinstance(ct, cartan_type.CartanType_finite)
True
sage: isinstance(ct, cartan_type.CartanType_simple)
False
sage: isinstance(ct, cartan_type.CartanType_affine)
False
sage: isinstance(ct, cartan_type.CartanType_crystallographic)
True
sage: isinstance(ct, cartan_type.CartanType_simply_laced)
False
"""
if not type.is_crystallographic():
raise NotImplementedError("only implemented for crystallographic Cartan types")
cartan_type.CartanType_decorator.__init__(self, type)
# TODO: design an appropriate infrastructure to handle this
# automatically? Maybe using categories and axioms?
# See also type_relabel.CartanType.__init__
if type.is_finite():
self.__class__ = CartanType_finite
elif type.is_affine():
self.__class__ = CartanType_affine
abstract_classes = tuple(cls
for cls in self._stable_abstract_classes
if isinstance(type, cls))
if abstract_classes:
self._add_abstract_superclass(abstract_classes)
# For each class cls in _stable_abstract_classes, if ct is an
# instance of A then ct.relabel(...) is put in this class as well.
# The order is relevant to avoid MRO issues!
_stable_abstract_classes = [
cartan_type.CartanType_simple]
def _repr_(self, compact = False):
"""
EXAMPLES::
sage: CartanType(['F', 4, 1]).dual()
['F', 4, 1]^*
sage: CartanType(['F', 4, 1]).dual()._repr_(compact = True)
'F4~*'
"""
dual_str = self.options.dual_str
if self.is_affine() and self.options.notation == "Kac":
if self._type.type() == 'B':
if compact:
return 'A%s^2'%(self.classical().rank()*2-1)
return "['A', %s, 2]"%(self.classical().rank()*2-1)
elif self._type.type() == 'BC':
dual_str = '+'
elif self._type.type() == 'C':
if compact:
return 'D%s^2'%(self.rank())
return "['D', %s, 2]"%(self.rank())
elif self._type.type() == 'F':
if compact:
return 'E6^2'
return "['E', 6, 2]"
return self.dual()._repr_(compact)+(dual_str if compact else "^"+dual_str)
def _latex_(self):
r"""
EXAMPLES::
sage: latex(CartanType(['F', 4, 1]).dual())
F_4^{(1)\vee}
"""
return self._type._latex_()+"^"+self.options.dual_latex
def __reduce__(self):
"""
TESTS::
sage: CartanType(['F', 4, 1]).dual().__reduce__()
(*.dual(), (['F', 4, 1],))
"""
return (attrcall("dual"), (self._type,))
def _latex_dynkin_diagram(self, label=lambda i: i, node=None, node_dist=2):
r"""
EXAMPLES::
sage: print(CartanType(['F',4,1]).dual()._latex_dynkin_diagram())
\draw (0 cm,0) -- (2 cm,0);
{
\pgftransformxshift{2 cm}
\draw (0 cm,0) -- (2 cm,0);
\draw (2 cm, 0.1 cm) -- +(2 cm,0);
\draw (2 cm, -0.1 cm) -- +(2 cm,0);
\draw (4.0 cm,0) -- +(2 cm,0);
\draw[shift={(2.8, 0)}, rotate=180] (135 : 0.45cm) -- (0,0) -- (-135 : 0.45cm);
\draw[fill=white] (0 cm, 0 cm) circle (.25cm) node[below=4pt]{$1$};
\draw[fill=white] (2 cm, 0 cm) circle (.25cm) node[below=4pt]{$2$};
\draw[fill=white] (4 cm, 0 cm) circle (.25cm) node[below=4pt]{$3$};
\draw[fill=white] (6 cm, 0 cm) circle (.25cm) node[below=4pt]{$4$};
}
\draw[fill=white] (0 cm, 0 cm) circle (.25cm) node[below=4pt]{$0$};
"""
if node is None:
node = self._latex_draw_node
return self._type._latex_dynkin_diagram(label, node, node_dist, dual=True)
def ascii_art(self, label=lambda i: i, node=None):
"""
Return an ascii art representation of this Cartan type
(by hacking the ascii art representation of the dual Cartan type)
EXAMPLES::
sage: print(CartanType(["B", 3, 1]).dual().ascii_art())
O 0
|
|
O---O=<=O
1 2 3
sage: print(CartanType(["C", 4, 1]).dual().ascii_art())
O=<=O---O---O=>=O
0 1 2 3 4
sage: print(CartanType(["G", 2, 1]).dual().ascii_art())
3
O=>=O---O
1 2 0
sage: print(CartanType(["F", 4, 1]).dual().ascii_art())
O---O---O=<=O---O
0 1 2 3 4
sage: print(CartanType(["BC", 4, 2]).dual().ascii_art())
O=>=O---O---O=>=O
0 1 2 3 4
"""
if node is None:
node = self._ascii_art_node
res = self._type.ascii_art(label, node)
# swap, like a computer science freshman!
# This assumes that the oriented multiple arrows are always ascii arted as =<= or =>=
res = res.replace("=<=", "=?=")
res = res.replace("=>=", "=<=")
res = res.replace("=?=", "=>=")
return res
def __eq__(self, other):
"""
Return whether ``self`` is equal to ``other``.
EXAMPLES::
sage: B41 = CartanType(['B', 4, 1])
sage: B41dual = CartanType(['B', 4, 1]).dual()
sage: F41dual = CartanType(['F', 4, 1]).dual()
sage: F41dual == F41dual
True
sage: F41dual == B41dual
False
sage: B41dual == B41
False
"""
if not isinstance(other, CartanType):
return False
return self._type == other._type
def __ne__(self, other):
"""
Return whether ``self`` is equal to ``other``.
EXAMPLES::
sage: B41 = CartanType(['B', 4, 1])
sage: B41dual = CartanType(['B', 4, 1]).dual()
sage: F41dual = CartanType(['F', 4, 1]).dual()
sage: F41dual != F41dual
False
sage: F41dual != B41dual
True
sage: B41dual != B41
True
"""
return not (self == other)
def __hash__(self):
"""
Compute the hash of ``self``.
EXAMPLES::
sage: B41 = CartanType(['B', 4, 1])
sage: B41dual = CartanType(['B', 4, 1]).dual()
sage: h = hash(B41dual)
"""
return hash(self._type)
def dual(self):
"""
EXAMPLES::
sage: ct = CartanType(['F', 4, 1]).dual()
sage: ct.dual()
['F', 4, 1]
"""
return self._type
def dynkin_diagram(self):
"""
EXAMPLES::
sage: ct = CartanType(['F', 4, 1]).dual()
sage: ct.dynkin_diagram()
O---O---O=<=O---O
0 1 2 3 4
F4~*
"""
return self._type.dynkin_diagram().dual()
###########################################################################
class AmbientSpace(ambient_space.AmbientSpace):
"""
Ambient space for a dual finite Cartan type.
It is constructed in the canonical way from the ambient space of
the original Cartan type by switching the roles of simple roots,
fundamental weights, etc.
.. NOTE::
Recall that, for any finite Cartan type, and in particular the
a simply laced one, the dual Cartan type is constructed as
another preexisting Cartan type. Furthermore the ambient space
for an affine type is constructed from the ambient space for
its classical type. Thus this code is not actually currently
used.
It is kept for cross-checking and for reference in case it
could become useful, e.g., for dual of general Kac-Moody
types.
For the doctests, we need to explicitly create a dual type.
Subsequently, since reconstruction of the dual of type `F_4`
is the relabelled Cartan type, pickling fails on the
``TestSuite`` run.
EXAMPLES::
sage: ct = sage.combinat.root_system.type_dual.CartanType(CartanType(['F',4]))
sage: L = ct.root_system().ambient_space(); L
Ambient space of the Root system of type ['F', 4]^*
sage: TestSuite(L).run(skip=["_test_elements","_test_pickling"])
"""
@lazy_attribute
def _dual_space(self):
"""
The dual of this ambient space.
EXAMPLES::
sage: ct = sage.combinat.root_system.type_dual.CartanType(CartanType(['F',4]))
sage: L = ct.root_system().ambient_space(); L
Ambient space of the Root system of type ['F', 4]^*
sage: L._dual_space
Ambient space of the Root system of type ['F', 4]
The basic data for this space is fetched from the dual space::
sage: L._dual_space.simple_root(1)
(0, 1, -1, 0)
sage: L.simple_root(1)
(0, 1, -1, 0)
"""
K = self.base_ring()
return self.cartan_type().dual().root_system().ambient_space(K)
#return self.root_system.dual.ambient_space()
def dimension(self):
"""
Return the dimension of this ambient space.
.. SEEALSO:: :meth:`sage.combinat.root_system.ambient_space.AmbientSpace.dimension`
EXAMPLES::
sage: ct = sage.combinat.root_system.type_dual.CartanType(CartanType(['F',4]))
sage: L = ct.root_system().ambient_space()
sage: L.dimension()
4
"""
# Can't yet use _dual_space for the base ring (and the Cartan type?) is not yet initialized
return self.root_system.dual.ambient_space().dimension()
@cached_method
def simple_root(self, i):
"""
Return the ``i``-th simple root.
It is constructed by looking up the corresponding simple
coroot in the ambient space for the dual Cartan type.
EXAMPLES::
sage: ct = sage.combinat.root_system.type_dual.CartanType(CartanType(['F',4]))
sage: ct.root_system().ambient_space().simple_root(1)
(0, 1, -1, 0)
sage: ct.root_system().ambient_space().simple_roots()
Finite family {1: (0, 1, -1, 0), 2: (0, 0, 1, -1), 3: (0, 0, 0, 2), 4: (1, -1, -1, -1)}
sage: ct.dual().root_system().ambient_space().simple_coroots()
Finite family {1: (0, 1, -1, 0), 2: (0, 0, 1, -1), 3: (0, 0, 0, 2), 4: (1, -1, -1, -1)}
Note that this ambient space is isomorphic, but not equal, to
that obtained by constructing `F_4` dual by relabelling::
sage: ct = CartanType(['F',4]).dual(); ct
['F', 4] relabelled by {1: 4, 2: 3, 3: 2, 4: 1}
sage: ct.root_system().ambient_space().simple_roots()
Finite family {1: (1/2, -1/2, -1/2, -1/2), 2: (0, 0, 0, 1), 3: (0, 0, 1, -1), 4: (0, 1, -1, 0)}
"""
dual_coroot = self._dual_space.simple_coroot(i)
return self.sum_of_terms(dual_coroot)
@cached_method
def fundamental_weights(self):
"""
Return the fundamental weights.
They are computed from the simple roots by inverting the
Cartan matrix. This is acceptable since this is only about
ambient spaces for finite Cartan types. Also, we do not have
to worry about the usual `GL_n` vs `SL_n` catch because type
`A` is self dual.
An alternative would have been to start from the fundamental
coweights in the dual ambient space, but those are not yet
implemented.
EXAMPLES::
sage: ct = sage.combinat.root_system.type_dual.CartanType(CartanType(['F',4]))
sage: L = ct.root_system().ambient_space()
sage: L.fundamental_weights()
Finite family {1: (1, 1, 0, 0), 2: (2, 1, 1, 0), 3: (3, 1, 1, 1), 4: (2, 0, 0, 0)}
Note that this ambient space is isomorphic, but not equal, to
that obtained by constructing `F_4` dual by relabelling::
sage: ct = CartanType(['F',4]).dual(); ct
['F', 4] relabelled by {1: 4, 2: 3, 3: 2, 4: 1}
sage: ct.root_system().ambient_space().fundamental_weights()
Finite family {1: (1, 0, 0, 0), 2: (3/2, 1/2, 1/2, 1/2), 3: (2, 1, 1, 0), 4: (1, 1, 0, 0)}
"""
return self.fundamental_weights_from_simple_roots()
@lazy_attribute
def _plot_projection(self):
"""
Return the default plot projection for ``self``.
If an ambient space uses barycentric projection, then so does
its dual.
.. SEEALSO::
- :meth:`sage.combinat.root_system.root_lattice_realizations.RootLatticeRealizations.ParentMethods._plot_projection`
EXAMPLES::
sage: ct = sage.combinat.root_system.type_dual.CartanType(CartanType(['G',2]))
sage: L = ct.root_system().ambient_space()
sage: L._plot_projection == L._plot_projection_barycentric
True
sage: L = RootSystem(['G',2]).coambient_space()
sage: L._plot_projection == L._plot_projection_barycentric
True
"""
dual_space = self.cartan_type().dual().root_system().ambient_space(self.base_ring())
if dual_space._plot_projection == dual_space._plot_projection_barycentric:
return self._plot_projection_barycentric
else:
RootLatticeRealizations.ParentMethods.__dict__["_plot_projection"]
class CartanType_finite(CartanType, cartan_type.CartanType_finite):
AmbientSpace = AmbientSpace
###########################################################################
class CartanType_affine(CartanType, cartan_type.CartanType_affine):
def classical(self):
"""
Return the classical Cartan type associated with self (which should
be affine).
EXAMPLES::
sage: CartanType(['A',3,1]).dual().classical()
['A', 3]
sage: CartanType(['B',3,1]).dual().classical()
['C', 3]
sage: CartanType(['F',4,1]).dual().classical()
['F', 4] relabelled by {1: 4, 2: 3, 3: 2, 4: 1}
sage: CartanType(['BC',4,2]).dual().classical()
['B', 4]
"""
return self.dual().classical().dual()
def basic_untwisted(self):
r"""
Return the basic untwisted Cartan type associated with this affine
Cartan type.
Given an affine type `X_n^{(r)}`, the basic untwisted type is `X_n`.
In other words, it is the classical Cartan type that is twisted to
obtain ``self``.
EXAMPLES::
sage: CartanType(['A', 7, 2]).basic_untwisted()
['A', 7]
sage: CartanType(['E', 6, 2]).basic_untwisted()
['E', 6]
sage: CartanType(['D', 4, 3]).basic_untwisted()
['D', 4]
"""
from . import cartan_type
if self.dual().type() == 'B':
return cartan_type.CartanType(['A', self.classical().rank()*2-1])
elif self.dual().type() == 'BC':
return cartan_type.CartanType(['A', self.classical().rank()*2])
elif self.dual().type() == 'C':
return cartan_type.CartanType(['D', self.classical().rank()+1])
elif self.dual().type() == 'F':
return cartan_type.CartanType(['E', 6])
elif self.dual().type() == 'G':
return cartan_type.CartanType(['D', 4])
def special_node(self):
"""
Implement :meth:`CartanType_affine.special_node`
The special node of the dual of an affine type `T` is the
special node of `T`.
EXAMPLES::
sage: CartanType(['A',3,1]).dual().special_node()
0
sage: CartanType(['B',3,1]).dual().special_node()
0
sage: CartanType(['F',4,1]).dual().special_node()
0
sage: CartanType(['BC',4,2]).dual().special_node()
0
"""
return self.dual().special_node()
def _repr_(self, compact=False):
"""
EXAMPLES::
sage: CartanType(['F', 4, 1]).dual()
['F', 4, 1]^*
sage: CartanType(['F', 4, 1]).dual()._repr_(compact = True)
'F4~*'
"""
dual_str = self.options.dual_str
if self.options.notation == "Kac":
if self._type.type() == 'B':
if compact:
return 'A%s^2'%(self.classical().rank()*2-1)
return "['A', %s, 2]"%(self.classical().rank()*2-1)
elif self._type.type() == 'BC':
dual_str = '+'
elif self._type.type() == 'C':
if compact:
return 'D%s^2'%(self.rank())
return "['D', %s, 2]"%(self.rank())
elif self._type.type() == 'F':
if compact:
return 'E6^2'
return "['E', 6, 2]"
return CartanType._repr_(self, compact)
def _latex_(self):
r"""
Return a latex representation of ``self``.
EXAMPLES::
sage: latex(CartanType(['B',4,1]).dual())
B_{4}^{(1)\vee}
sage: latex(CartanType(['BC',4,2]).dual())
BC_{4}^{(2)\vee}
sage: latex(CartanType(['G',2,1]).dual())
G_2^{(1)\vee}
sage: CartanType.options['notation'] = 'Kac'
sage: latex(CartanType(['A',7,2]))
A_{7}^{(2)}
sage: latex(CartanType(['B',4,1]).dual())
A_{7}^{(2)}
sage: latex(CartanType(['A',8,2]))
A_{8}^{(2)}
sage: latex(CartanType(['A',8,2]).dual())
A_{8}^{(2)\dagger}
sage: latex(CartanType(['E',6,2]))
E_6^{(2)}
sage: latex(CartanType(['D',5,2]))
D_{5}^{(2)}
sage: CartanType.options._reset()
"""
if self.options('notation') == "Kac":
if self._type.type() == 'B':
return "A_{%s}^{(2)}"%(self.classical().rank()*2-1)
elif self._type.type() == 'BC':
return "A_{%s}^{(2)\\dagger}"%(2*self.classical().rank())
elif self._type.type() == 'C':
return "D_{%s}^{(2)}"%(self.rank)()
elif self._type.type() == 'F':
return "E_6^{(2)}"
result = self._type._latex_()
import re
if re.match(".*\^{\(\d\)}$", result):
return "%s%s}"%(result[:-1], self.options('dual_latex'))
else:
return "{%s}^%s"%(result, self.options('dual_latex'))
def _default_folded_cartan_type(self):
"""
Return the default folded Cartan type.
EXAMPLES::
sage: CartanType(['A', 6, 2]).dual()._default_folded_cartan_type()
['BC', 3, 2]^* as a folding of ['A', 5, 1]
sage: CartanType(['A', 5, 2])._default_folded_cartan_type()
['B', 3, 1]^* as a folding of ['D', 4, 1]
sage: CartanType(['D', 4, 2])._default_folded_cartan_type()
['C', 3, 1]^* as a folding of ['A', 5, 1]
sage: CartanType(['E', 6, 2])._default_folded_cartan_type()
['F', 4, 1]^* as a folding of ['E', 6, 1]
sage: CartanType(['G', 2, 1]).dual()._default_folded_cartan_type()
['G', 2, 1]^* as a folding of ['D', 4, 1]
"""
from sage.combinat.root_system.type_folded import CartanTypeFolded
letter = self._type.type()
if letter == 'BC': # A_{2n}^{(2)\dagger}
n = self._type.classical().rank()
return CartanTypeFolded(self, ['A', 2*n - 1, 1],
[[0]] + [[i, 2*n-i] for i in range(1, n)] + [[n]])
if letter == 'B': # A_{2n-1}^{(2)}
n = self._type.classical().rank()
return CartanTypeFolded(self, ['D', n + 1, 1],
[[i] for i in range(n)] + [[n, n+1]])
if letter == 'C': # D_{n+1}^{(2)}
n = self._type.classical().rank()
return CartanTypeFolded(self, ['A', 2*n-1, 1],
[[0]] + [[i, 2*n-i] for i in range(1, n)] + [[n]])
if letter == 'F': # E_6^{(2)}
return CartanTypeFolded(self, ['E', 6, 1], [[0], [2], [4], [3, 5], [1, 6]])
if letter == 'G': # D_4^{(3)}
return CartanTypeFolded(self, ['D', 4, 1], [[0], [1, 3, 4], [2]])
return super(CartanType, self)._default_folded_cartan_type()
| StarcoderdataPython |
3289558 | <reponame>gschivley/GenX-helpers
"Calculate region-specific costs from imports/exports/RPS/CES"
import os
from pathlib import Path
import pandas as pd
import yaml
def find_results_folders(year):
cwd = Path.cwd()
results_folders = list((cwd / f"{year}").rglob("Results"))
results_folders.sort()
return results_folders
def clean_case_name(name):
clean_name = " ".join(name.split("_")[2:]).replace("with", "w/").strip()
return clean_name
def find_years():
years = [
int(f.name)
for f in os.scandir(Path.cwd())
if f.is_dir() and "__" not in f.name and "." not in f.name
]
return years
def find_region_lines():
"Returns a dictionary of zone number: list of "
network_file_path = list(Path.cwd().rglob("Network.csv"))[0]
network_df = pd.read_csv(network_file_path)
network_df = network_df.set_index("Network_lines")
# Zones should be of form "z<x>" where x is an integer
zones = network_df["Network_zones"].dropna().to_list()
if "Region description" in network_df.columns:
zone_names = network_df["Region description"].dropna().to_list()
else:
zone_names = zones
zone_lines = {}
for zone, name in zip(zones, zone_names):
zone_lines[name] = network_df.loc[network_df[zone] != 0, :].index.to_list()
return zone_lines
def calc_import_export_costs(year, zone, lines):
zone_num = int(zone[1:])
results_folders = find_results_folders(year)
input_folders = [folder.parent / "Inputs" for folder in results_folders]
# col_order = [clean_case_name(folder.parent.stem) for folder in results_folders]
imports_dict = {}
exports_dict = {}
for i_folder, r_folder in zip(input_folders, results_folders):
case_name = clean_case_name(i_folder.parent.stem)
flow = pd.read_csv(r_folder / "flow.csv", index_col=0)
prices = pd.read_csv(r_folder / "prices.csv", index_col=0)
network = pd.read_csv(i_folder / "Network.csv")
network_direction = network.set_index("Network_lines")[zone]
imports = 0
exports = 0
# print(case_name)
for line in lines:
line_imports = flow.loc[
network_direction[line] * flow[f"{line}"] < 0, f"{line}"
]
line_exports = flow.loc[
network_direction[line] * flow[f"{line}"] > 0, f"{line}"
]
line_import_costs = (
-1
* network_direction[line]
* (line_imports * prices[f"{zone_num}"]).dropna().sum()
)
imports += line_import_costs
line_export_revenues = (
-1
* network_direction[line]
* (line_exports * prices[f"{zone_num}"]).dropna().sum()
)
exports += line_export_revenues
imports_dict[case_name] = imports
exports_dict[case_name] = exports
import_export_df = pd.DataFrame(
[imports_dict, exports_dict], index=["Import Costs", "Export Revenues"]
).T
import_export_df["Net Trade Costs"] = import_export_df.sum(axis=1)
import_export_df = import_export_df.astype(int)
# import_export_df["Zone"] = year
return import_export_df
def calc_rps_ces_costs(year, zone):
zone_num = int(zone[1:])
results_folders = find_results_folders(year)
input_folders = [folder.parent / "Inputs" for folder in results_folders]
# col_order = [clean_case_name(folder.parent.stem) for folder in results_folders]
rps_dict = {}
ces_dict = {}
for i_folder, r_folder in zip(input_folders, results_folders):
case_name = clean_case_name(i_folder.parent.stem)
# print(case_name, year)
genx_settings_path = i_folder.parent / "GenX_settings.yml"
with open(genx_settings_path, "r") as f:
settings = yaml.safe_load(f)
rps_adjustment = settings["RPS_Adjustment"]
ces_adjustment = settings["CES_Adjustment"]
rps_ces_prices = pd.read_csv(r_folder / "RPS_CES.csv", index_col=0)
rps_price = rps_ces_prices.loc[zone_num, "RPS_Price"]
ces_price = rps_ces_prices.loc[zone_num, "CES_Price"]
generators = pd.read_csv(i_folder / "Generators_data.csv")
resource_rps_value = generators.loc[generators["zone"] == zone_num, "RPS"]
resource_ces_value = generators.loc[generators["zone"] == zone_num, "CES"]
energy = pd.read_csv(r_folder / "Power.csv", index_col=0)
# Calculate the weighted generation for every resources
weighted_gen = energy.loc["Sum", :].reset_index(drop=True)
# Credits from in-region generation by qualifying resources
region_rps_credits = (weighted_gen * resource_rps_value).sum()
region_ces_credits = (weighted_gen * resource_ces_value).sum()
network = pd.read_csv(i_folder / "Network.csv", index_col=1)
# Calculate how many credits are needed in a region
qualifying_resources = generators.loc[
(generators["zone"] == zone_num)
& (generators["STOR"] == 0)
& (generators["DR"] == 0)
& (generators["HEAT"] == 0),
:,
].index
qualifying_energy = weighted_gen.loc[qualifying_resources].sum()
region_rps_requirement = (
network.loc[zone, "RPS"] * qualifying_energy
) - rps_adjustment
region_ces_requirement = (
network.loc[zone, "CES"] * qualifying_energy
) - ces_adjustment
rps_credit_difference = region_rps_requirement - region_rps_credits
ces_credit_difference = region_ces_requirement - region_ces_credits
rps_cost = rps_credit_difference * rps_price
ces_cost = ces_credit_difference * ces_price
rps_dict[case_name] = rps_cost
ces_dict[case_name] = ces_cost
rps_ces_df = (
pd.DataFrame([rps_dict, ces_dict], index=["RPS Costs", "CES Costs"])
.T.fillna(0)
.astype(int)
)
# rps_ces_df["Zone"] = zone
return rps_ces_df
def calc_all_costs():
years = find_years()
zone_dict = find_region_lines()
results_list = []
for year in years:
for zone, zone_lines in zone_dict.item():
import_export_df = calc_import_export_costs(year, zone, zone_lines)
rps_ces_df = calc_rps_ces_costs(year, zone)
combined_df = pd.concat([import_export_df, rps_ces_df], axis=1)
combined_df["Total Extra Costs"] = combined_df[
["Net Trade Costs", "RPS Costs", "CES Costs"]
].sum(axis=1)
combined_df["Year"] = year
combined_df["Zone"] = zone
results_list.append(combined_df)
final_costs = pd.concat(results_list)
final_costs.index.name = "Case"
final_costs = final_costs.reset_index().set_index(["Year", "Case"])
return final_costs
def main():
final_costs = calc_all_costs()
final_costs.to_csv("Zone specific costs.csv")
if __name__ == "__main__":
main()
| StarcoderdataPython |
156493 | <reponame>yaogdu/xiaozu_spider
DB={'address':'127.0.0.1:27017','db':'douban','col':'posts','replicaSet':'dmmongo'}
skip = [',','.',':',';','<','>','/','&','#']
url = 'https://www.douban.com/group/fangzi/discussion?start=100'
print url.split('=').pop(0) | StarcoderdataPython |
3290352 | <gh_stars>1-10
import cv2
from imagepy.core.engine import Simple
import numpy as np
class MatchTemplate(Simple):
title = 'Match Template'
note = ['all']
para = {'mat':'cv2.TM_CCOEFF','img':None}
view = [(list, 'mat', ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR', 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED'], str, 'match', ''),
('img', 'img', 'image', '')]
def run(self, ips, imgs, para=None):
down, up = ips.range
ips2 = self.app.get_img(para['img'])
ips.snapshot()
sl1, sl2 = ips.slices, ips2.slices
cn1, cn2 = ips.channels, ips2.channels
if ips.dtype != ips2.dtype:
return self.app.alert('Two stack must be equal dtype!')
elif sl1 > 1 and sl2 > 1 and sl1 != sl2:
return self.app.alert('Two stack must have equal slices!')
elif cn1 > 1 and cn2 > 1 and cn1 != cn2:
return self.app.alert('Two stack must have equal channels!')
imgs1, imgs2 = ips.subimg(), ips2.subimg()
'''
if len(imgs1) == 1: imgs1 = imgs1 * len(imgs2)
if len(imgs2) == 1: imgs2 = imgs2 * len(imgs1)
if imgs1[0].shape[:2] != imgs2[0].shape[:2]:
return self.app.alert('Two image must be in equal shape')
'''
for i in range(len(imgs1)):
im1, im2 = imgs1[i], imgs2[i]
if cn1 == 1 and cn2 > 1:
im2 = im2.mean(axis=-1, dtype=np.float32)
if cn2 == 1 and cn1 > 1:
im2 = im2[:, :, None] * np.ones(cn1)
# 模板匹配
res = cv2.matchTemplate(im1, im2, method=eval(para['mat']))
# 寻找最值
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if para['mat'] in ['cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']:
top_left = min_loc
else:
top_left = max_loc
#彩色图
cn1, cn2 = ips.channels, ips2.channels
if cn2 == 3 and cn1 == 3:
w, h, c = im2.shape[::]
if cn2 == 1 and cn1 == 1:
w, h = im2.shape[::]
bottom_right = (top_left[0] + h, top_left[1] + w)
cv2.rectangle(im1, top_left, bottom_right, 255, 2)
plgs = [MatchTemplate]
| StarcoderdataPython |
152825 | <filename>hap-monitor-cron.py
#!/usr/bin/env python
# Copyright European Organization for Nuclear Research (CERN) 2013
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - <NAME> <<EMAIL>>, 2014
import argparse
import socket
import traceback
import logging
import sys
import time
from sys import stdout
# Define logger
logging.basicConfig(stream=stdout,
level=logging.ERROR,
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
logger = logging.getLogger(__name__)
def monitor_haproxy(socket_name):
data = {}
INCLUDE_INFO = ['Process_num', 'Idle_pct']
INCLUDE_STAT = ['scur', 'qcur', 'chkfail', 'status', 'hrsp_1xx', 'hrsp_2xx', 'hrsp_3xx', 'hrsp_4xx', 'hrsp_5xx', 'req_rate', 'qtime', 'ctime', 'rtime', 'ttime']
# Request data from socket
logger.debug('Connecting to socket: %s' % socket_name)
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_name)
logger.debug('Requesting info')
s.send('show info\n')
raw_info = s.recv(4096)
s.close() # Note: socket is not reusable
logger.debug('Requesting stat')
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_name)
s.send('show stat\n')
raw_stat = s.recv(8192)
s.close()
except Exception as e:
logger.error('Failed requesting data from socket %s with execption %s' % (socket_name, e))
logger.debug(traceback.format_exc(e))
return None
logger.debug('Successfully requested data from socket.')
# Transforming info response into dictonary
logger.debug('Parsing info response')
for entry in raw_info.split('\n'):
tmp = entry.split(': ')
try:
if tmp[0] in INCLUDE_INFO:
data[tmp[0]] = float(tmp[1])
except Exception as e:
logger.error('Entry: %s failed with exception: %s' % (tmp, e))
logger.debug(traceback.format_exc(e))
logger.debug('Done parsing info response.')
# Transforming stat response into dictonary
logger.debug('Parsing stat response')
raw_stat = raw_stat.split('\n')
headers = raw_stat.pop(0).split(',')[2:-1] # Get the column headers and remove pxname and svname
for stat in raw_stat:
stat = stat.split(',')
if len(stat) == 1:
logger.debug('Ignored line: %s' % stat[0])
continue # Line is something else than stats
prefix = '%s.%s' % (stat.pop(0), stat.pop(0)) # Build metric prefix using pxname and svname
for column in range(len(headers)):
try:
if headers[column] in INCLUDE_STAT:
if (headers[column] == 'status') and (stat[column] in ['UP', 'DOWN', 'MAINT']) and (data['Process_num'] == 1.0):
for s in ['UP', 'DOWN', 'MAINT']:
data[prefix+'.'+headers[column]+'.'+s] = 0 # set all status to zero to support gauge values
data[prefix+'.'+headers[column]+'.'+stat[column]] = 1
else:
data[prefix+'.'+headers[column]] = float(stat[column])
except Exception as e:
logger.warning('Ignoring data: %s -> %s' % (headers[column], stat[column]))
logger.debug('Done parsing stat response.')
return data
def backend_graphite(url, stats, prefix):
process_num = stats['Process_num']
del(stats['Process_num'])
server_name = socket.getfqdn().split('.')[0]
prefix = '%s.%s.%s' % (prefix, server_name, int(process_num))
logger.debug('Reporting to prefix: %s' % prefix)
server, port = url.split(':')
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except Exception, e:
logger.error('Unable to connect to Graphite backend %s: %s' % (url, e))
raise
for s in stats:
try:
sock.sendto('%s.%s %s %s\n' % (prefix, s, float(stats[s]), int(time.time())), (server, int(port)))
logger.debug('Message: %s.%s %s %s\n' % (prefix, s, float(stats[s]), int(time.time())))
except Exception as e:
logger.error('Failed reporting %s.%s %s %s\n' % (prefix, s, float(stats[s]), time.time()))
logger.error(traceback.format_exc(e))
def backend_statsd(url, stats, prefix):
from pystatsd import Client
process_num = stats['Process_num']
del(stats['Process_num'])
server_name = socket.getfqdn().split('.')[0]
prefix = '%s.%s.%s' % (prefix, server_name, int(process_num))
logger.debug('Reporting to prefix: %s' % prefix)
server, port = url.split(':')
try:
pystatsd_client = Client(host=server, port=port, prefix=prefix)
except Exception, e:
logger.error('Unable to connect to statsd backend %s: %s' % (url, e))
raise
for s in stats:
try:
pystatsd_client.gauge(s, float(stats[s]))
logger.debug('%s.%s => %s' % (prefix, s, float(stats[s])))
except Exception as e:
logger.error('Failed reporting %s (%s): %s' % (s, stats[s], e))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--backend', metavar='B', type=str, nargs=1, help='Backend server URL[:port][::scope] to which the script will report to. E.g. --backend my.graphite.host:8025/listen/now::rucio.loadbalancer')
parser.add_argument('--type', metavar='T', type=str, nargs=1, help='Type of the backend server. Supported values are: G (Graphite) and S (statsd)')
parser.add_argument('--sockets', metavar='S', type=str, nargs='+', help='a list of socket files e.g. /var/run/haproxy_admin_process_no_1.sock')
parser.add_argument('--verbose', help='makes it chatty', action="store_true")
args = parser.parse_args()
if args.verbose:
logger.setLevel(level=logging.DEBUG)
if args.sockets is None:
print 'At least one input socket must be defined. Run --help for further information.'
sys.exit(1)
if args.backend is None:
print 'No backend information provided. Run --help for further information.'
sys.exit(1)
print args
type = 'g' if ((args.type is not None) and (args.type[0].lower() == 'g')) else 's' # If not Graphite, statsd is used
args = vars(args)
try:
url, prefix = args['backend'][0].split('::')
logger.debug('Reporting to backend (type: %s) => URL: %s\tPrefix: %s' % (type, url, prefix))
except ValueError:
logger.critical('Can not unpack backend information: %s' % args['backend'][0])
sys.exit(1)
for socket_name in args['sockets']:
try:
data = monitor_haproxy(socket_name)
if type == 'g':
backend_graphite(url, data, prefix)
else:
backend_statsd(url, data, prefix)
except Exception as e:
logger.error(e)
sys.exit(1)
sys.exit(0) | StarcoderdataPython |
46995 | def pct_inc(p1, p2) :
return ((p2-p1)/p1)*100
| StarcoderdataPython |
1690093 | <filename>show_architecture.py<gh_stars>0
from resnet_imagenet import resnet18
from resnet import resnet20
# model = resnet18()
# print(model)
model = resnet20().cpu()
import copy
import torch
fused_model = copy.deepcopy(model)
# print(fused_model)
# fuse the layers in the frontend
fused_model = torch.quantization.fuse_modules(fused_model,
[["conv_1_3x3", "bn_1", "relu"]],
inplace=True)
# print(fused_model)
for module_name, module in fused_model.named_children():
if "stage" in module_name:
for basic_block_name, basic_block in module.named_children():
torch.quantization.fuse_modules(
basic_block, [["conv_a", "bn_a", "relu_a"], ["conv_b", "bn_b"]],
inplace=True)
# for sub_block_name, sub_block in basic_block.named_children():
# if sub_block_name == "downsample":
# print(sub_block_name)
# print(fused_model) | StarcoderdataPython |
167166 | <filename>server/stylegan2_hypotheses_explorer/logic/model_loader/model_loader.py
from ...models import ModelsArray
from ..evaluator import Evaluator
from ..generator import Generator
from ..paths import MODELS_PATH, MODELS_SCHEMA_PATH
from ..util import load_and_validate
from .evaluator_loader import EvaluatorLoader
from .generator_loader import GeneratorLoader
class ModelLoader:
@classmethod
def get_generator(cls, id: int) -> Generator:
return GeneratorLoader.get_generator(id)
@classmethod
def get_evaluator(cls, id: int) -> Evaluator:
return EvaluatorLoader.get_evaluator(id)
@classmethod
def list_all_models(cls) -> ModelsArray:
return ModelsArray(
GeneratorLoader.list_generator_models(),
EvaluatorLoader.list_evaluator_models()
)
def __init__(self, step_size: float, offline_mode: bool):
self._step_size = step_size
self._offline_mode = offline_mode
def load_models(self):
models = load_and_validate(MODELS_PATH,
MODELS_SCHEMA_PATH)
EvaluatorLoader(self._step_size,
self._offline_mode).load(models["evaluators"])
GeneratorLoader(self._step_size,
self._offline_mode).load(models["generators"])
| StarcoderdataPython |
1659013 | <filename>venv/Lib/site-packages/PyQt4/examples/designer/plugins/widgets/counterlabel.pyw
#!/usr/bin/env python
"""
counterlabel.py
A PyQt custom widget example for Qt Designer.
Copyright (C) 2006 <NAME> <<EMAIL>>
Copyright (C) 2005-2006 Trolltech ASA. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from PyQt4 import QtCore, QtGui
class CounterLabel(QtGui.QWidget):
"""CounterLabel(QtGui.QWidget)
Provides a custom label widget to be used as a counter, with signals
similar to those provided by QAbstractSlider subclasses and properties
similar to those provided by QLabel.
"""
# We define two signals that are used to indicate changes to the status
# of the widget.
valueChanged = QtCore.pyqtSignal((int, ), (str, ))
def __init__(self, parent=None):
super(CounterLabel, self).__init__(parent)
self.setAutoFillBackground(False)
self._font = QtGui.QFont()
self._minimum = 1
self._maximum = 1
self._value = 1
self._offset = 0
self.rescale()
self.reposition()
def paintEvent(self, event):
p = QtGui.QPainter()
p.begin(self)
p.setRenderHint(QtGui.QPainter.Antialiasing)
p.setFont(self._font)
p.translate(self.width()/2.0, self.height()/2.0)
p.scale(self._scale, self._scale)
p.drawText(self._xpos, self._ypos, str(self._value))
p.end()
def sizeHint(self):
return QtCore.QSize(32, 32)
def rescale(self):
fm = QtGui.QFontMetricsF(self._font, self)
maxRect = fm.boundingRect(QtCore.QRectF(self.rect()),
QtCore.Qt.AlignCenter, str(self._maximum))
xscale = float(self.width())/maxRect.width()
yscale = float(self.height())/maxRect.height()
self._scale = min(xscale, yscale)
def reposition(self):
fm = QtGui.QFontMetricsF(self._font, self)
rect = fm.boundingRect(QtCore.QRectF(self.rect()),
QtCore.Qt.AlignCenter, str(self._value))
self._xpos = -rect.width()/2.0
self._ypos = rect.height()/2.0 - fm.descent()
self.update()
# Provide getter and setter methods for the font property.
def getFont(self):
return self._font
def setFont(self, font):
self._font = font
self.rescale()
self.reposition()
font = QtCore.pyqtProperty(QtGui.QFont, getFont, setFont)
# Provide getter and setter methods for the minimum and maximum properties.
def getMinimum(self):
return self._minimum
def setMinimum(self, value):
self._minimum = value
if self._minimum > self._maximum:
self.setMaximum(self._minimum)
if self._minimum > self._value:
self.setValue(self._minimum)
minimum = QtCore.pyqtProperty(int, getMinimum, setMinimum)
def getMaximum(self):
return self._maximum
def setMaximum(self, value):
self._maximum = value
self._minimum = min(self._minimum, self._maximum)
if self._maximum < self._value:
self.setValue(self._maximum)
self.rescale()
self.reposition()
maximum = QtCore.pyqtProperty(int, getMaximum, setMaximum)
# We provide an offset property to allow the value shown to differ from
# the internal value held by the widget.
def getOffset(self):
return self._offset
def setOffset(self, value):
self._offset = value
offset = QtCore.pyqtProperty(int, getOffset, setOffset)
# The value property is implemented using the getValue() and setValue()
# methods.
def getValue(self):
return self._value
# The setter method for the value property can also be used as a slot.
@QtCore.pyqtSlot(int)
def setValue(self, value):
if not self._minimum <= value <= self._maximum:
return
self._value = value
self.valueChanged[int].emit(value + self._offset)
self.valueChanged[str].emit(str(value + self._offset))
self.reposition()
value = QtCore.pyqtProperty(int, getValue, setValue)
# Like QAbstractSpinBox, we provide stepUp() and stepDown() slots to
# enable the value to be incremented and decremented.
@QtCore.pyqtSlot()
def stepUp(self):
self.setValue(self._value + 1)
@QtCore.pyqtSlot()
def stepDown(self):
self.setValue(self._value - 1)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
widget = CounterLabel()
widget.setValue(123)
widget.show()
sys.exit(app.exec_())
| StarcoderdataPython |
1672194 | <filename>examples/geomopt/01-pyberny.py
#!/usr/bin/env python
'''
Use pyberny to get the molecular equilibrium geometry.
'''
from pyscf import gto, scf
from pyscf.geomopt.berny_solver import optimize
mol = gto.M(atom='N 0 0 0; N 0 0 1.2', basis='ccpvdz')
mf = scf.RHF(mol)
#
# geometry optimization for HF. There are two entries to invoke the berny
# geometry optimization.
#
# method 1: import the optimize function from pyscf.geomopt.berny_solver
mol_eq = optimize(mf)
print(mol_eq.atom_coords())
# method 2: create the optimizer from Gradients class
mol_eq = mf.Gradients().optimizer(solver='berny').kernel()
#
# geometry optimization for CASSCF
#
from pyscf import mcscf
mf = scf.RHF(mol)
mc = mcscf.CASSCF(mf, 4, 4)
conv_params = {
'gradientmax': 6e-3, # Eh/AA
'gradientrms': 2e-3, # Eh/AA
'stepmax': 2e-2, # AA
'steprms': 1.5e-2, # AA
}
# method 1
mol_eq = optimize(mc, **conv_params)
# method 2
mol_eq = mc.Gradients().optimizer(solver='berny').kernel(conv_params)
| StarcoderdataPython |
3392683 | ## Count characters in your string
## 6 kyu
## https://www.codewars.com/kata/52efefcbcdf57161d4000091
def count(string):
list1 = {}
for item in string:
if item not in list1:
list1[item] = 0
list1[item] += 1
return list1 | StarcoderdataPython |
3329922 | <filename>orttrainer/huggingface-gpt2/ort_addon/ort_supplement/src/transformers/trainer_ort.py
import json
import time
import logging
import os
import random
import re
import shutil
from contextlib import contextmanager
from pathlib import Path
from typing import Callable, Dict, List, NamedTuple, Optional, Tuple
import numpy as np
import torch
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from tqdm import tqdm, trange
import onnxruntime
from onnxruntime.training import _utils, amp, checkpoint, optim, orttrainer, TrainStepInfo
from .data.data_collator import DataCollator, DefaultDataCollator
from .modeling_utils import PreTrainedModel
from .training_args import TrainingArguments
from .trainer import PredictionOutput, TrainOutput, EvalPrediction, set_seed, Trainer
from azureml.core.run import Run
# get the Azure ML run object
run = Run.get_context()
try:
from torch.utils.tensorboard import SummaryWriter
_has_tensorboard = True
except ImportError:
try:
from tensorboardX import SummaryWriter
_has_tensorboard = True
except ImportError:
_has_tensorboard = False
def is_tensorboard_available():
return _has_tensorboard
logger = logging.getLogger(__name__)
PREFIX_CHECKPOINT_DIR = "ort_checkpoint"
class OrtTrainer(Trainer):
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch,
optimized for Transformers.
"""
model: PreTrainedModel
args: TrainingArguments
data_collator: DataCollator
train_dataset: Optional[Dataset]
eval_dataset: Optional[Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
tb_writer: Optional["SummaryWriter"] = None
def __init__(
self,
model: PreTrainedModel,
args: TrainingArguments,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
):
"""
OrtTrainer is a simple but feature-complete training and eval loop for ORT,
optimized for Transformers.
Args:
prediction_loss_only:
(Optional) in evaluation and prediction, only return the loss
"""
super().__init__(model, args, data_collator, train_dataset, eval_dataset, compute_metrics, prediction_loss_only)
onnxruntime.set_seed(self.args.seed)
torch.cuda.set_device(self.args.local_rank)
def update_torch_model(self,):
if self.ort_model:
logger.info(
"Updating weights of torch model from ORT model."
)
ort_state_dict = checkpoint.experimental_state_dict(self.ort_model)
self.model.load_state_dict(ort_state_dict, strict=False)
else:
logger.warning(
"No ORT model found to update weights from, assuming torch model is up to date."
)
def gpt2_model_description(self, n_head, vocab_size, n_hidden, n_layer, n_ctx, batch_size):
logger.info("****num of head is: {}".format(n_head))
logger.info("****vocab size is: {}".format(vocab_size))
logger.info("****num of hidden layer is: {}".format(n_hidden))
logger.info("****num of layer is: {}".format(n_layer))
logger.info("****seq length is: {}".format(n_ctx))
# We are using hard-coded values for batch size and sequence length in order to trigger
# memory planning in ORT, which would reduce the memory footprint during training.
# Alternatively, one can set these as symbolic dims 'batch_size' and 'n_ctx' to be able
# to use dynamic input sizes.
model_desc = {'inputs': [('input_ids', [batch_size, n_ctx]),
('labels', [batch_size, n_ctx])],
'outputs': [('loss', [], True)]}
return model_desc
def get_train_dataloader(self) -> DataLoader:
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = (
RandomSampler(self.train_dataset) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset)
)
return DataLoader(
self.train_dataset,
batch_size=self.args.per_gpu_train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator.collate_batch,
)
def train(self, model_path: Optional[str] = None):
"""
Main training entry point.
Args:
model_path:
(Optional) Local path to model if model to train has been instantiated from a local path
If present, we will try reloading the optimizer/scheduler states from there.
"""
train_dataloader = self.get_train_dataloader()
if self.args.max_steps > 0:
t_total = self.args.max_steps
num_train_epochs = (
self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1
)
else:
t_total = int(len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs)
num_train_epochs = self.args.num_train_epochs
config = self.model.config
model_desc = self.gpt2_model_description(config.n_head,
config.vocab_size,
config.n_embd,
config.n_layer,
config.n_ctx,
self.args.per_gpu_train_batch_size)
from onnxruntime.capi._pybind_state import set_arena_extend_strategy, ArenaExtendStrategy
set_arena_extend_strategy(ArenaExtendStrategy.kSameAsRequested)
param_optimizer = list(self.model.named_parameters())
no_decay = ['bias', 'gamma', 'beta', 'LayerNorm']
optim_config = optim.AdamConfig(params=[{'params' : [n for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'lambda_coef': 0.0
}],
lr=self.args.learning_rate, alpha=0.9, beta=0.999, lambda_coef=self.args.weight_decay, epsilon=self.args.adam_epsilon)
warmup = self.args.warmup_steps / t_total
lr_scheduler = optim.lr_scheduler.LinearWarmupLRScheduler(total_steps=t_total, warmup=warmup)
loss_scaler = amp.DynamicLossScaler(automatic_update=True,
loss_scale=float(1 << 20),
up_scale_window=2000,
min_loss_scale=1.0,
max_loss_scale=float(1 << 24)) if self.args.fp16 else None
opts = orttrainer.ORTTrainerOptions({
'device': {'id': str(self.args.device)},
'distributed': {
'world_rank': self.args.world_rank,
'world_size': self.args.world_size,
'local_rank': self.args.local_rank,
'allreduce_post_accumulation': True},
'mixed_precision': {'enabled': self.args.fp16,
'loss_scaler': loss_scaler},
'batch': {'gradient_accumulation_steps': self.args.gradient_accumulation_steps},
'lr_scheduler': lr_scheduler})
self.ort_model = orttrainer.ORTTrainer(self.model, model_desc, optim_config, None, options=opts)
logger.info("****************************Model converted to ORT")
model = self.ort_model
if self.tb_writer is not None:
self.tb_writer.add_text("args", self.args.to_json_string())
# Train!
if self.is_world_master():
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataloader.dataset))
logger.info(" Num Epochs = %d", num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", self.args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
self.args.train_batch_size
* self.args.gradient_accumulation_steps
* (self.args.world_size if self.args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if model_path is not None:
# set global_step to global_step of last saved checkpoint from model path
try:
global_step = int(model_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // self.args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // self.args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
global_step = 0
logger.info(" Starting fine-tuning.")
tr_loss = 0.0
logging_loss = 0.0
global_batch_train_start = time.time()
train_iterator = trange(
epochs_trained, int(num_train_epochs), desc="Epoch", disable=self.args.local_rank not in [-1, 0],
)
for epoch in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=self.args.local_rank not in [-1, 0])
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if len(inputs['input_ids']) < self.args.per_gpu_train_batch_size:
# skip incomplete batch
logger.info('Skipping incomplete batch...')
continue
tr_loss += self._training_step(model, inputs)
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
len(epoch_iterator) <= self.args.gradient_accumulation_steps
and (step + 1) == len(epoch_iterator)
):
global_step += 1
global_batch_train_duration = time.time() - global_batch_train_start
global_batch_train_start = time.time()
if self.args.local_rank in [-1, 0]:
if (self.args.logging_steps > 0 and global_step % self.args.logging_steps == 0) or (
global_step == 1 and self.args.logging_first_step
):
logs = {}
loss_avg = (tr_loss - logging_loss) / (self.args.logging_steps * self.args.gradient_accumulation_steps)
logs["learning_rate"] = lr_scheduler.get_last_lr()[0]
logs["loss"] = loss_avg.item()
logs["global_step"] = global_step
logs["global_step_time"] = global_batch_train_duration
logging_loss = tr_loss.clone()
if self.tb_writer:
for k, v in logs.items():
self.tb_writer.add_scalar(k, v, global_step)
run.log(k, v)
epoch_iterator.write(json.dumps({**logs, **{"step": global_step}}))
if self.args.save_steps > 0 and global_step % self.args.save_steps == 0:
# In all cases (even distributed/parallel), self.model is always a reference
# to the model we want to save.
if hasattr(model, "module"):
assert model.module is self.ort_model
else:
assert model is self.ort_model
# Save model checkpoint
output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{global_step}")
self.save_model(output_dir)
self._rotate_checkpoints()
if self.args.max_steps > 0 and global_step > self.args.max_steps:
epoch_iterator.close()
break
if self.args.max_steps > 0 and global_step > self.args.max_steps:
train_iterator.close()
break
if self.tb_writer:
self.tb_writer.close()
self.update_torch_model()
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
return TrainOutput(global_step, tr_loss / global_step)
def _training_step(
self, model: nn.Module, inputs: Dict[str, torch.Tensor]) -> float:
loss = model.train_step(**inputs)
return loss
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
self.update_torch_model()
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def evaluate_in_ORT(
self, eval_dataset: Optional[Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and return metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent.
Args:
eval_dataset: (Optional) Pass a dataset if you wish to override
the one on the instance.
Returns:
A dict containing:
- the eval loss
"""
self.infer_sess = None
onnx_model_path = os.path.join(self.args.output_dir, "final_model.onnx")
output_names = [o_desc.name for o_desc in self.ort_model.model_desc.outputs]
# The eval batch size should be the same as finetuned onnx model's batch size
# as the graph exported for training is being used for inference
# Alternatively, we can export the onnx graph again to use a symbolic batch size
assert self.args.per_gpu_eval_batch_size == self.args.per_gpu_train_batch_size
# save the onnx graph
self.ort_model.save_as_onnx(onnx_model_path)
# delete the training model to free up GPU memory
del(self.ort_model)
self.ort_model = None
# create the inference session
self.infer_sess = onnxruntime.InferenceSession(onnx_model_path)
# load the eval dataset
eval_dataloader = self.get_eval_dataloader(eval_dataset)
description = "Evaluation"
if self.is_world_master():
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", len(eval_dataloader.dataset))
logger.info(" Batch size = %d", eval_dataloader.batch_size)
eval_losses: List[float] = []
for inputs in tqdm(eval_dataloader, desc=description):
# for the last batch, pad to the batch size.
if len(inputs['input_ids']) < self.args.per_gpu_eval_batch_size:
pad_len = self.args.per_gpu_eval_batch_size - inputs['input_ids'].size()[0]
inputs['input_ids'] = torch.nn.functional.pad(inputs['input_ids'], (0,0,0,pad_len))
inputs['labels'] = torch.nn.functional.pad(inputs['labels'], (0,0,0,pad_len))
step_eval_loss = self.infer_sess.run(output_names,
{"input_ids": inputs["input_ids"].numpy(),
"labels": inputs["labels"].numpy()
})
eval_losses += [step_eval_loss[0]]
metrics = {}
if len(eval_losses) > 0:
metrics["loss"] = np.mean(eval_losses)
return metrics
def evaluate(
self, eval_dataset: Optional[Dataset] = None, prediction_loss_only: Optional[bool] = None
) -> Dict[str, float]:
"""
Run evaluation and return metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent.
Args:
eval_dataset: (Optional) Pass a dataset if you wish to override
the one on the instance.
Returns:
A dict containing:
- the eval loss
- the potential metrics computed from the predictions
"""
eval_dataloader = self.get_eval_dataloader(eval_dataset)
# update the torch model weights and delete the ort training model to free up GPU memory
self.update_torch_model()
del(self.ort_model)
self.ort_model = None
output = self._prediction_loop(eval_dataloader, description="Evaluation")
return output.metrics
| StarcoderdataPython |
1730592 | from rl.featurizer.featurizer import DiscreteFeaturizer
| StarcoderdataPython |
3321185 | from influxdb import InfluxDBClient, DataFrameClient
import numpy as np
import pandas as pd
import requests
import datetime
import time
import json
import os
import sys
import logging
def readOutput():
# Set some boolean success variables for timer
success = False
## Go up one directory and to the output folder using shitty path functions
output_path = os.path.join(os.getcwd(),'output')
# Archive Path is where files are moved to after processing
archive_path = os.path.join(os.getcwd(),'archive')
#Read CSV Inventory of address names corresponding to user IDs
df_sensors = pd.read_csv('mbientInventory.csv')
df_sensors.set_index('serialNumber', inplace = True)
# get list of csv file previous downloaded from the mbient sensor
filenames = os.listdir(output_path)
# filter the csvs
csv_filenames = list(filter(lambda x: x.split('.')[-1] =="csv", filenames))
# Itterate through csv files in output folder
for filename in csv_filenames:
print(filename)
# Get device address
address = filename.split('_')[2]
# Get correspondinguserid
user_id = df_sensors.loc[address, 'participantID']
sensor_location = df_sensors.loc[address, 'sensorLocation']
#Read Data
try:
current_dataframe = pd.read_csv(os.path.join(output_path,filename))
except pd.errors.EmptyDataError:
print("WARNING: Empty CSV! Skipping")
print(filename, "has no data, pease delete manually")
continue
#Set name of df to device address - may not be useful
current_dataframe.name = address
#--Time--
#Get timestamp in correct format from epoch time
timestamp = pd.to_datetime(current_dataframe["epoc (ms)"], unit = 'ms')
#Convert to Singapore time
timestamp = timestamp.dt.tz_localize('utc').dt.tz_convert('Asia/Singapore') # use proper timezone
#set index to timestamp and change name
current_dataframe.index = timestamp
current_dataframe.index.names = ["timestamp"]
# Resampling data to average values of the same minute, and padding the rest with NANs
current_dataframe = current_dataframe.resample("1min").mean() # make readings evenly
#Remove spaces, brackets and symbols from title
column_names = current_dataframe.columns
new_column_names = []
for name in column_names:
#remove unit
new_name = name.split(' ')[: -1]
#join with underscores
new_name = "_".join(new_name)
new_column_names.append(new_name)
#reset column names with new tidier names
current_dataframe.columns = new_column_names
#Drop Epoch and Elapsed time from measurement
current_dataframe = current_dataframe.drop(['epoc', 'elapsed'], axis = 1)
print(current_dataframe)
#Just in case we end up importing funcitons from this file
if __name__ == "__main__":
readOutput()
| StarcoderdataPython |
3301410 | <reponame>rolker/project11_navigation
#!/usr/bin/python3
'''
Written by <NAME> with contributions from <NAME>
'''
import rospy
import tf2_ros
import tf2_geometry_msgs
from nav_msgs.msg import OccupancyGrid
from geographic_visualization_msgs.msg import GeoVizItem, GeoVizPointList, GeoVizPolygon
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import PointStamped
from geographic_msgs.msg import GeoPoint
import project11
def costmap_callback(data):
origin = PointStamped()
origin.point = data.info.origin.position
origin.header = data.header
height_meters = data.info.resolution*data.info.height
width_meters = data.info.resolution*data.info.width
opposite = PointStamped()
opposite.point.x = origin.point.x+height_meters
opposite.point.y = origin.point.y+width_meters
opposite.point.z = origin.point.z
opposite.header = data.header
corners_ll = earth.pointListToGeoPointList((origin, opposite))
if len(corners_ll) == 2 and corners_ll[0] is not None and corners_ll[1] is not None:
origin_ll = corners_ll[0].position
opposite_ll = corners_ll[1].position
vizItem = GeoVizItem()
vizItem.id = 'occupency_grid'
plist = GeoVizPointList()
plist.color.r = 0.0
plist.color.g = 0.5
plist.color.b = 1.0
plist.color.a = 1.0
corners = [origin_ll,opposite_ll]
for i in ( (0,0), (0,1), (1,1), (1,0), (0,0) ):
gp = GeoPoint()
gp.latitude = corners[i[0]].latitude
gp.longitude = corners[i[1]].longitude
plist.points.append(gp)
vizItem.lines.append(plist)
dlat = (opposite_ll.latitude - origin_ll.latitude)/float(data.info.width)
dlong = (opposite_ll.longitude - origin_ll.longitude)/float(data.info.height)
for row in range(data.info.height):
for col in range(data.info.width):
if data.data[row*data.info.width+col] > 0:
intensity = data.data[row*data.info.width+col]/255.0
p = GeoVizPolygon()
p.fill_color.r = 0.1
p.fill_color.g = 0.1
p.fill_color.b = 0.1
p.fill_color.a = intensity
for i in ( (0,0), (0,1), (1,1), (1,0), (0,0) ):
gp = GeoPoint()
gp.latitude = origin_ll.latitude+dlat*(row+i[0])
gp.longitude = origin_ll.longitude+dlong*(col+i[1])
p.outer.points.append(gp)
vizItem.polygons.append(p)
display_publisher.publish(vizItem)
grid_sub = None
# allow tf_buffer to fill up a bit before asking for a grid
def delayed_subscribe(data):
global grid_sub
grid_sub = rospy.Subscriber('occupancy_grid', OccupancyGrid, costmap_callback, queue_size=1)
rospy.init_node("occupancy_to_camp")
earth = project11.nav.EarthTransforms()
tf_buffer = tf2_ros.Buffer()
tf_listener = tf2_ros.TransformListener(tf_buffer)
display_publisher = rospy.Publisher('project11/display', GeoVizItem, queue_size = 10)
rospy.Timer(rospy.Duration(2), delayed_subscribe, oneshot=True)
rospy.spin()
| StarcoderdataPython |
3331561 | <filename>src/ui/views/paragraphs.py
import Tkinter as tk
from . import ViewBase, ViewWithUpdate
from decorators import register_view
from functools import partial
from lxml import etree
@register_view('paragraph', 'interpParagraph')
class ParagraphView(ViewBase, ViewWithUpdate):
def __init__(self, *args, **kwargs):
ViewBase.__init__(self, *args, **kwargs)
row = 0
label = tk.Label(self, text='Label')
self.label_var = tk.StringVar(self)
self.label = tk.Entry(self, textvariable=self.label_var, width=40)
label.grid(row=row, column=0)
self.label.grid(row=row, column=1, sticky='e')
self.update_view_attribute('label', self.label_var, str)
self.label_var.trace('w', partial(self.update_model_attribute, 'label', self.label_var))
row += 1
if self.model.tag == 'interpParagraph':
label = tk.Label(self, text='Target')
self.target_var = tk.StringVar(self)
self.target = tk.Entry(self, textvariable=self.target_var, width=40)
label.grid(row=row, column=0)
self.target.grid(row=row, column=1, sticky='e')
self.update_view_attribute('target', self.target_var, str)
self.target_var.trace('w', partial(self.update_model_attribute, 'target', self.target_var))
row += 1
label = tk.Label(self, text='Marker')
self.marker_var = tk.StringVar(self)
self.marker = tk.Entry(self, textvariable=self.marker_var, width=10)
label.grid(row=row, column=0)
self.marker.grid(row=row, column=1, sticky='e')
self.update_view_attribute('marker', self.marker_var, str)
self.marker_var.trace('w', partial(self.update_model_attribute, 'marker', self.marker_var))
row += 1
if self.model.find('title') is not None:
label = tk.Label(self, text='Title')
self.title_var = tk.StringVar(self)
self.title = tk.Entry(self, textvariable=self.title_var, width=80)
label.grid(row=row, column=0)
self.title.grid(row=row, column=1, sticky='e')
self.update_view('title', self.title_var, str)
self.title_var.trace('w', partial(self.update_model, 'title', self.title_var))
row += 1
label = tk.Label(self, text='Content', borderwidth=1)
self.content = tk.Text(self, wrap=tk.WORD, height=35)
label.grid(row=row, column=0)
self.content.grid(row=row, column=1, sticky='e')
self.content.bind('<KeyRelease>', self.update_content)
if self.model.find('content') is None:
content = etree.SubElement(self.model, 'content')
content.text = ''
self.content.insert(1.0, self.model.find('content').text.strip())
row += 1
def update_content(self, *args):
content_text = self.content.get(1.0, tk.END).strip()
#print 'setting text to:', content_text
self.model.find('content').text = content_text
| StarcoderdataPython |
131437 | <reponame>Ramossvitor/PYTHON
import math
num1 = float(input('Digite um numero: '))
print('O numero {} tem a parte inteira {:.0f}'.format(num1, math.trunc(num1))) | StarcoderdataPython |
1749353 | from neupy import layers
from neupy.exceptions import LayerConnectionError
from base import BaseTestCase
class InputTestCase(BaseTestCase):
def test_input_exceptions(self):
layer = layers.Input(10)
error_message = "Input layer got unexpected input"
with self.assertRaisesRegexp(LayerConnectionError, error_message):
layer.get_output_shape((10, 15))
def test_output_shape_merging(self):
layer = layers.Input(10)
self.assertShapesEqual(layer.get_output_shape((None, 10)), (None, 10))
self.assertShapesEqual(layer.get_output_shape((5, 10)), (5, 10))
layer = layers.Input((None, None, 3))
self.assertShapesEqual(
layer.get_output_shape((None, 28, 28, 3)),
(None, 28, 28, 3),
)
self.assertShapesEqual(
layer.get_output_shape((None, None, 28, 3)),
(None, None, 28, 3),
)
self.assertShapesEqual(
layer.get_output_shape((10, 28, 28, None)),
(10, 28, 28, 3),
)
def test_merged_inputs(self):
network = layers.join(
layers.Input((10, 2)),
layers.Input((None, 2)),
)
self.assertShapesEqual(network.input_shape, (None, 10, 2))
self.assertShapesEqual(network.output_shape, (None, 10, 2))
def test_input_layers_connected(self):
network = layers.join(layers.Input(1), layers.Input(1))
self.assertShapesEqual(network.input_shape, (None, 1))
self.assertShapesEqual(network.output_shape, (None, 1))
def test_input_repr(self):
self.assertEqual(
str(layers.Input(10)),
"Input(10, name='input-1')",
)
self.assertEqual(
str(layers.Input((10, 3))),
"Input((10, 3), name='input-2')",
)
self.assertEqual(
str(layers.Input((None, None, 3))),
"Input((None, None, 3), name='input-3')",
)
self.assertEqual(
str(layers.Input(None)),
"Input(None, name='input-4')",
)
def test_input_with_tensor_shape(self):
network = layers.join(
layers.Input(10),
layers.Relu(5),
)
network_2 = layers.join(
layers.Input(network.output_shape[1:]),
layers.Relu(3),
)
self.assertEqual(network_2.layers[0].shape, (5,))
self.assertShapesEqual(network_2.input_shape, (None, 5))
self.assertShapesEqual(network_2.output_shape, (None, 3))
| StarcoderdataPython |
52346 | import _init_paths
import tensorflow as tf
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os, sys, cv2
import argparse
from networks.factory import get_network
CLASSES = ('__background__', # always index 0
'Acura', 'Alpha-Romeo', 'Aston-Martin', 'Audi', 'Bentley', 'Benz', 'BMW', 'Bugatti', 'Buick', 'nike', 'adidas', 'vans', 'converse', 'puma', 'nb', 'anta', 'lining', 'pessi', 'yili', 'uniquo', 'coca', 'Haier', 'Huawei', 'Apple', 'Lenovo', 'McDonalds', 'Amazon')
def vis_detections(im, class_name, dets,ax, image_name,fc7, brands,thresh=0.0):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
print(len(inds))
for i in inds:
print(i)
param = fc7[i]
param = np.array(param)
np.save('/home/CarLogo/features/'+image_name[:-4]+'.npy', param)
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
brands.append(class_name)
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.savefig('/home/CarLogo/detect/'+class_name+'_'+image_name)
plt.axis('off')
plt.tight_layout()
plt.draw()
class extractor():
def __init__(self):
cfg.TEST.HAS_RPN = True # Use RPN for proposals
# init session
gpu_options = tf.GPUOptions(allow_growth=True)
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# load network
self.net = get_network('VGGnet_test')
# load model
self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V1)
# model = '/home/CarLogo/Faster_RCNN_TF/output/default/car_logo_train_list_27/VGGnet_fast_rcnn_iter_70000_test.ckpt'
model = '/home/CarLogo/Faster_RCNN_TF/output/default/car_logo_train_list_all/VGGnet_fast_rcnn_iter_70000.ckpt'
self.saver.restore(self.sess, model)
#sess.run(tf.initialize_all_variables())
print '\n\nLoaded network {:s}'.format(model)
im = 128 * np.ones((300, 300, 3), dtype=np.uint8)
for i in xrange(2):
_, _, _= im_detect(self.sess, self.net, im)
def get_feature(self,image_name):
#im_file = os.path.join(cfg.DATA_DIR, sys.argv[1], image_name)
im_file = os.path.join(cfg.DATA_DIR,'demo', image_name)
im = cv2.imread(im_file)
timer = Timer()
timer.tic()
scores, boxes,fc7 = im_detect(self.sess, self.net, im)
print(fc7.shape)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
CONF_THRESH = 0.8
NMS_THRESH = 0.3
brands=[]
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
# brands.append(cls)
vis_detections(im, cls, dets, ax, image_name, fc7,brands, thresh=CONF_THRESH)
return brands
if __name__ == '__main__':
e = extractor()
filename= os.path.join(cfg.DATA_DIR,sys.argv[1])
print('loading files from {}'.format(filename))
im_names=[]
for root, dirs, files in os.walk(filename):
print(files)
im_names = files
im_names = ['0024.jpg', '0075.jpg', '0084.jpg',
'0093.jpg']
for image_name in im_names:
brands = e.get_feature(image_name)
print(brands)
| StarcoderdataPython |
1728194 | <gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from collections import deque, namedtuple
import warnings
import random
from utils.helpers import Experience
def sample_batch_indexes(low, high, size):
if high - low >= size:
# We have enough data. Draw without replacement, that is each index is unique in the
# batch. We cannot use `np.random.choice` here because it is horribly inefficient as
# the memory grows. See https://github.com/numpy/numpy/issues/2764 for a discussion.
# `random.sample` does the same thing (drawing without replacement) and is way faster.
try:
r = xrange(low, high)
except NameError:
r = range(low, high)
batch_idxs = random.sample(r, size)
else:
# Not enough data. Help ourselves with sampling from the range, but the same index
# can occur multiple times. This is not good and should be avoided by picking a
# large enough warm-up phase.
warnings.warn('Not enough entries to sample without replacement. Consider increasing your warm-up phase to avoid oversampling!')
batch_idxs = np.random.random_integers(low, high - 1, size=size)
assert len(batch_idxs) == size
return batch_idxs
def zeroed_observation(observation):
if hasattr(observation, 'shape'):
return np.zeros(observation.shape)
elif hasattr(observation, '__iter__'):
out = []
for x in observation:
out.append(zeroed_observation(x))
return out
else:
return 0.
class RingBuffer(object):
def __init__(self, maxlen):
self.maxlen = maxlen
self.start = 0
self.length = 0
self.data = [None for _ in range(maxlen)]
def __len__(self):
return self.length
def __getitem__(self, idx):
if idx < 0 or idx >= self.length:
raise KeyError()
return self.data[(self.start + idx) % self.maxlen]
def append(self, v):
if self.length < self.maxlen:
# We have space, simply increase the length.
self.length += 1
elif self.length == self.maxlen:
# No space, "remove" the first item.
self.start = (self.start + 1) % self.maxlen
else:
# This should never happen.
raise RuntimeError()
self.data[(self.start + self.length - 1) % self.maxlen] = v
class Memory(object):
def __init__(self, window_length, ignore_episode_boundaries=False):
self.window_length = window_length
self.ignore_episode_boundaries = ignore_episode_boundaries
self.recent_observations = deque(maxlen=window_length)
self.recent_terminals = deque(maxlen=window_length)
def sample(self, batch_size, batch_idxs=None):
raise NotImplementedError()
def append(self, observation, action, reward, terminal, training=True):
self.recent_observations.append(observation)
self.recent_terminals.append(terminal)
def get_recent_state(self, current_observation):
# This code is slightly complicated by the fact that subsequent observations might be
# from different episodes. We ensure that an experience never spans multiple episodes.
# This is probably not that important in practice but it seems cleaner.
state = [current_observation]
idx = len(self.recent_observations) - 1
for offset in range(0, self.window_length - 1):
current_idx = idx - offset
current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False
if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal):
# The previously handled observation was terminal, don't add the current one.
# Otherwise we would leak into a different episode.
break
state.insert(0, self.recent_observations[current_idx])
while len(state) < self.window_length:
state.insert(0, zeroed_observation(state[0]))
return state
def get_config(self):
config = {
'window_length': self.window_length,
'ignore_episode_boundaries': self.ignore_episode_boundaries,
}
return config
| StarcoderdataPython |
3349809 | from __future__ import print_function
#%matplotlib inline
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
from beginner_source.dcgan_faces_tutorial import (
dataset,
dataloader,
device,
real_batch,
fixed_noise,
netG,
)
PATH = "DCGAN-trained-8x8-full.pickle"
img_list = []
netG.load_state_dict(torch.load(PATH))
with torch.no_grad():
fake = netG(fixed_noise).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
# Plot the real images
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.axis("off")
plt.title("Real Images")
plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=5, normalize=True).cpu(),(1,2,0)))
plt.savefig("real-images")
# Plot the fake images from the last epoch
plt.subplot(1,2,2)
plt.axis("off")
plt.title("Fake Images")
plt.imshow(np.transpose(img_list[-1],(1,2,0)))
plt.savefig("fake-images")
| StarcoderdataPython |
192869 | <reponame>franklinzhanggis/model-interoperable-engine
from .utils import HttpHelper
class Service:
def __init__(self, ip, port):
self.ip = ip
self.port = port
def getBaseURL(self):
return "http://" + self.ip + ":" + str(self.port) + "/"
def connect(self):
strData = HttpHelper.Request_get_str_sync(self.ip, self.port, "/ping")
if strData == "OK":
return True
else:
return False
| StarcoderdataPython |
3276863 | <filename>8-recursion-and-dynamic-programming/2-robot-in-grid/dp_solution.py
"""
Problem:
8.2 Robot in a Grid: Imagine a robot sitting on the upper left corner of grid with r rows and c columns.
The robot can only move in two directions, right and down, but certain cells are "off limits" such that
the robot cannot step on them. Design an algorithm to find a path for the robot from the top left to
the bottom right.
Hints: #331, #360, #388
--
Questions:
- Is there always a valid solution? That is, the robot can always get to the bottom right?
No. Take care of this case.
- Is it safe to assume that it cannot walk through the walls of the grid?
Yes.
- Can we return any path? Does it make sense to try to find the shortest one?
I am going to assume any path is valid, since any path will have the same number of steps.
--
Algorithm:
[r,0,0,0,0]
[0,0,x,0,0]
[0,x,0,0,0]<
[0,0,0,0,0]
^
To reach the bottom right cell (r, c), the robot needs to be able to reach the cells
to the left (r, c-1) and move right or the cell above it (r-1, c) and move down.
That means that we can break this into subproblems. By removing a row or column,
we try to find if the robot can reach the new point (r, c). So, we recursively
try to see if a robot in the origin can reach this new point.
-- Optimizing it --
Notice that we are going through some cells more than one time.
Ex: (r-1, c-1) is reached when we go to (r-1,c) and (r, c-1). So, we could
mark it as reachable or not to prevent duplicate work.
"""
def robot_in_grid_points(grid):
path = []
height = len(grid)
width = len(grid[0])
def find_path(grid, row, col, path):
if row == 0 and col == 0:
path.append((0, 0))
return True
if not is_path(grid, row, col):
return False
if find_path(grid, row, col - 1, path) or find_path(grid, row - 1, col, path):
path.append((row, col))
return True
return False
def is_path(grid, row, col):
if row < 0 or col < 0 or grid[row][col] == "x":
return False
return True
find_path(grid, height - 1, width - 1, path)
return path
def robot_in_grid_points_memo(grid):
path = []
height = len(grid)
width = len(grid[0])
memo = dict()
def find_path(grid, row, col, path, memo):
if row == 0 and col == 0:
path.append((0, 0))
return True
if (row, col) in memo:
return memo[(row, col)]
if not is_path(grid, row, col):
memo[(row, col)] = False
return False
if find_path(grid, row, col - 1, path, memo) or find_path(
grid, row - 1, col, path, memo
):
memo[(row, col)] = True
path.append((row, col))
return True
memo[(row, col)] = False
return False
def is_path(grid, row, col):
if row < 0 or col < 0 or grid[row][col] == "x":
return False
return True
find_path(grid, height - 1, width - 1, path, memo)
return path
def test(grid, expected_answer):
answer = robot_in_grid_points_memo(grid)
if answer != expected_answer:
raise Exception(
f"Answer {answer} is wrong. Expected answer is {expected_answer}"
)
if __name__ == "__main__":
test([["r"]], [(0, 0)])
test(
[
["r", "0", "0"],
["0", "0", "0"],
],
[(0, 0), (1, 0), (1, 1), (1, 2)],
),
test(
[
["r", "x", "0"],
["x", "0", "0"],
],
[],
)
test(
[
["r", "x", "0"],
["0", "x", "0"],
["0", "0", "0"],
],
[(0, 0), (1, 0), (2, 0), (2, 1), (2, 2)],
)
test(
[
["r", "x", "0"],
["0", "0", "0"],
["x", "0", "0"],
],
[(0, 0), (1, 0), (1, 1), (2, 1), (2, 2)],
),
test(
[
["r", "0", "0", "0", "0", "0", "0"],
["0", "x", "x", "x", "x", "x", "x"],
["0", "0", "0", "0", "0", "0", "0"],
["0", "x", "x", "x", "x", "x", "x"],
["0", "0", "0", "0", "0", "0", "0"],
["0", "x", "x", "x", "x", "x", "x"],
["0", "0", "0", "0", "0", "0", "x"],
["0", "0", "0", "0", "0", "0", "0"],
],
[
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(4, 0),
(5, 0),
(6, 0),
(7, 0),
(7, 1),
(7, 2),
(7, 3),
(7, 4),
(7, 5),
(7, 6),
],
)
test(
[
["r", "0", "0", "x", "0"],
["0", "0", "0", "x", "x"],
["x", "x", "0", "0", "0"],
["x", "x", "x", "x", "0"],
],
[(0, 0), (1, 0), (1, 1), (1, 2), (2, 2), (2, 3), (2, 4), (3, 4)],
)
print("All tests passed!")
| StarcoderdataPython |
3356866 | """
Functions for saving experiment data
"""
from os.path import join
import torch
from torch.nn import Module
from src.metrics.metric_recollector import MetricRecollector
DATA_PROCESSED_FOLDER = 'data/processed'
MODELS_FOLDER = 'models'
def save_experiment_data(
experiment: str,
model: Module,
metrics: MetricRecollector) -> None:
"""Function to save experiment data"""
experiment_folder = join(DATA_PROCESSED_FOLDER, experiment.lower())
metrics.save_metrics(experiment_folder)
torch.save(model.state_dict(),
join(MODELS_FOLDER, f'{experiment.lower()}.pt'))
| StarcoderdataPython |
1669442 | from collections import deque
class Solution:
def findOrder(self, numCourses: int, prerq: list) -> list:
# build an adjcaent list
graph = {}
for e in prerq:
if e[1] in graph:
graph[e[1]].append(e[0])
else:
graph[e[1]] = [e[0]]
indegree = [0 for _ in range(numCourses)]
for course in range(numCourses):
if course in graph:
for neighbor in graph[course]:
indegree[neighbor] += 1
queue = deque()
res = []
# push all of nodes that has 0 in-degree
for course in range(numCourses):
if indegree[course] == 0:
queue.append(course)
while len(queue) > 0:
popped = queue.popleft()
res.append(popped)
if popped in graph:
for neighbor in graph[popped]:
indegree[neighbor] -= 1
if indegree[neighbor] == 0:
queue.append(neighbor)
if len(res) < numCourses:
return []
return res | StarcoderdataPython |
4818323 | #!/usr/bin/env python
from __future__ import print_function
import os
import ecflow
# When no arguments specified uses ECF_HOST and/or ECF_PORT,
# Explicitly set host and port using the same client
# For alternative argument list see ecflow.Client.set_host_port()
HOST = os.getenv("ECF_HOST", "localhost")
PORT = int(os.getenv("ECF_PORT", "%d" % (1500 + os.getuid())))
CLIENT = ecflow.Client(HOST + ":%d" % PORT)
try:
CLIENT.ping()
except RuntimeError as err:
print("ping failed: " + str(err))
try:
CLIENT.restart_server()
print("Server was restarted")
except RuntimeError as err:
print("Server could not be restarted")
try:
NAME = os.getenv("SUITE", "elearning")
CLIENT.begin_suite("/%s" % NAME)
print("Suite %s is now begun" % NAME)
except RuntimeError as err:
print("suite %s could not begin" % NAME)
| StarcoderdataPython |
79264 | <filename>src/loadbalancer/job.py
"""The job class represents a job on the cluster."""
from datetime import datetime
class Job:
def __init__(self, job_id, requested_queue, assigned_queue, owner, state, predecessors, submit_timestamp):
"""Constructor
Args:
job_id (int) - The job ID.
requested_queue (str) - The name of the queue this job is submitted on.
assigned_queue (str) - The name of the queue this job is running on, or None.
owner (str) - The owner of the job.
state (str) - The state of the running job.
predecessors ([int]) - List of job ids this job depends on.
submit_timestamp (int) - The unix timestamp at which this job was submitted.
"""
self.job_id = job_id
self.requested_queue = requested_queue
self.assigned_queue = assigned_queue
self.owner = owner
self.state = state
self.predecessors = predecessors
self.submit_timestamp = submit_timestamp
def running(self):
"""Is this job running or not."""
return self.assigned_queue is not None
def has_predecessors(self):
"""Does this job depend on other jobs?"""
return self.predecessors is not None and len(self.predecessors) > 0
def __str__(self):
submit_date = datetime.utcfromtimestamp(self.submit_timestamp).strftime('%Y-%m-%d %H:%M:%S')
return 'Job %d: %s on %s submitted by %s at %s' % (
self.job_id, self.state, self.requested_queue, self.owner, submit_date)
| StarcoderdataPython |
3348434 | <filename>tests/api/test_webapplication.py<gh_stars>1-10
import pytest
from bromine import WebApplication
@pytest.fixture(name='app')
def app_fixture():
app = WebApplication('https://www.example.com', object())
return app
def test_base_url(app):
assert app.base_url == 'https://www.example.com'
def test_browser(app):
assert hasattr(app, 'browser')
def test_initial_current_page(app):
assert app.current_page is None
def test_current_page(app):
page = object()
app.current_page = page
assert app.current_page is page
| StarcoderdataPython |
3354931 | """ PyTest Configuration """
import mongoengine
def pytest_configure(config):
"""setup configuration"""
import sys
sys._called_from_test = True
from settings import settings
mongoengine.connect(host=settings.MONGODB_URI)
def pytest_unconfigure(config):
"""teardown configuration"""
conn = mongoengine.connection.get_connection()
conn.drop_database('test_badash')
import sys
del sys._called_from_test
| StarcoderdataPython |
139049 | <reponame>FarhanAliRaza/django-sockpuppet
from sockpuppet.reflex import Reflex
class ExampleReflex(Reflex):
def increment(self, step=1):
self.session['count'] = int(self.element.dataset['count']) + step
class DecrementReflex(Reflex):
def decrement(self, step=1):
self.session['otherCount'] = int(self.element.dataset['count']) - step
class ParamReflex(Reflex):
def change_word(self):
self.word = 'space'
self.success = True
class FormReflex(Reflex):
def submit(self):
self.text_output = self.request.POST['text-input']
class ErrorReflex(Reflex):
def increment(self, step=1):
raise Exception('error happened')
class UserReflex(Reflex):
def get_user(self):
context = self.get_context_data()
self.user_reveal = context['object']
| StarcoderdataPython |
1629440 | <gh_stars>0
#!/usr/bin/env python
from collections import deque
import numpy as np
from numpy.random import choice, randint
import matplotlib.pyplot as plt
def maze(cells=(25, 25), start=(0, 0), exit=(-1, -1)):
"""Generates a binary numpy array that represents a maze.
Values of 1 are colored and are representing walls, while
values of 0 are not colored and represent path.
"""
if cells[0] <= 0 or cells[1] <= 0:
# Just quit if invalid parameters are passed.
raise ValueError('Invalid maze dimensions')
# --- Lookup Table Data ---
# Table of direction vectors mapped to wallflags.
vdirtable = {
1: np.array([0, 1]),
2: np.array([1, 0]),
4: np.array([0, -1]),
8: np.array([-1, 0]),
}
# Table of opposing directions mapped to each other.
odirtable = {1: 4, 2: 8, 4: 1, 8: 2}
# Table of direction tuples mapped to wallflag sums.
walltable = {i: tuple(j for j in (8, 4, 2, 1) if i & j) for i in range(16)}
# --- Maze grid initialization ---
# Build maze using an odd shape value to make walls their own cells.
mgrid = np.ones((2*cells[0] + 1, 2*cells[1] + 1, 2), dtype=int)
# Create the seed used to generate the maze from.
mseed = np.fromiter((2*randint(0, size-1) + 1 for size in cells), int)
# Initialize wallflag matrix.
if cells == (1, 1):
mgrid[1, 1, 1] = 0
elif cells[0] == 1:
mgrid[1, 1, 1] = 1
mgrid[1, 3:-2:2, 1] = 5
mgrid[1, -2, 1] = 4
elif cells[1] == 1:
mgrid[1, 1, 1] = 2
mgrid[3:-2:2, 1, 1] = 10
mgrid[-2, 1, 1] = 8
else:
# Corners
mgrid[1, 1, 1] = 3
mgrid[-2, 1, 1] = 9
mgrid[-2, -2, 1] = 12
mgrid[1, -2, 1] = 6
# Edges
mgrid[1, 3:-2:2, 1] = 7
mgrid[3:-2:2, 1, 1] = 11
mgrid[-2, 3:-2:2, 1] = 13
mgrid[3:-2:2, -2, 1] = 14
# Centre
mgrid[3:-2:2, 3:-2:2, 1] = 15
# The number of maze nodes specified.
nsize = cells[0] * cells[1]
def open_wall(npos):
"""Opens the wall of a maze to allow entry and exit."""
gpos = np.fromiter(map(lambda x: 2*x + int(x >= 0), npos), int)
try:
wallflag = walltable[mgrid[gpos[0], gpos[1], 1]]
except IndexError:
raise IndexError('Entrypoint cell outside of maze')
for d in (i for i in (4, 1, 8, 2) if i not in wallflag):
wpos = gpos + vdirtable[d]
if mgrid[wpos[0], wpos[1], 0]:
mgrid[wpos[0], wpos[1], 0] = False
break
else:
raise ValueError('Entrypoint cell not on maze border')
# Open the designated entrance and exit.
open_wall(start)
open_wall(exit)
del open_wall
# Current path stack.
mpath = deque([mseed])
# Open the starting cell.
mgrid[mseed[0], mseed[1], 0] = False
# Debug counter.
itercnt = 0
# Counts the number of nodecells opened in the maze.
# When the number of open cells equals the original passed mazesize,
# The algorithm has filled up the maze and may stop.
cellcnt = 1
# If all of the original nodes have been visited, the maze is done.
while cellcnt != nsize:
itercnt += 1
# Obtain current cell.
thispos = mpath[-1]
thiscel = mgrid[thispos[0], thispos[1]]
# print(mgrid[:,:,0])
while thiscel[1]:
cdir = choice(walltable[thiscel[1]])
vdir = vdirtable[cdir]
# Remove chosen direction from both templist and matrix.
thiscel[1] -= cdir
# Get the position of the next cell.
nextpos = thispos + 2 * vdir
nextcel = mgrid[nextpos[0], nextpos[1]]
odir = odirtable[cdir]
if nextcel[1] & odir:
nextcel[1] -= odir
if nextcel[0]:
cellcnt += 1
# Get the position of the wall to open.
wallpos = thispos + vdir
# Add the next cell to the current path.
if nextcel[1]:
mpath.append(nextpos)
# Open the path to the next cell.
mgrid[nextpos[0], nextpos[1], 0] = False
mgrid[wallpos[0], wallpos[1], 0] = False
break
else:
# There are no more unexplored directions, backtrack.
mpath.pop()
print("Performed", itercnt, "iterations for maze of size", nsize)
# Return only the binary array of walls and paths.
return mgrid[:,:,0]
if __name__ == '__main__':
newmaze = maze((100, 100), (50, 0), (50, -1))
plt.imsave('maze.png', newmaze, cmap=plt.cm.binary)
plt.figure(figsize=(9, 8))
plt.imshow(newmaze, cmap=plt.cm.binary, interpolation='nearest')
plt.xticks([])
plt.yticks([])
plt.show()
| StarcoderdataPython |
67856 | """ Miscellaneous utility functions """
import logging
import os
import pandas as pd
logger = logging.getLogger(__name__)
def load_df_from_dataset(file_name: str) -> pd.DataFrame:
""" Loads cleaned dataframe from csv
Fields with extra records get logged and dropped.
"""
df = pd.read_csv(file_name)
bad_column = None
bad_column_name = None
for column in df.columns:
if not column.startswith('Unnamed:'):
# Only care about unnamed columns
continue
column_id = column.split(':')[-1].strip()
if column_id == '0':
# Ignore unnamed column if it is the very first one (eg index)
continue
bad_column_name = column
bad_column = df[column]
logger.warning('Found suspicious column "%s"' % column)
if bad_column is not None:
# Drop records that have entries in trailing unnamed column
bad_records = df[bad_column.notna()]
for record in bad_records.iterrows():
logger.warning('Skipping record: #%s' % record[0])
df = df.drop(bad_records.index)
df = df.drop(columns=[bad_column_name])
return df
def get_default_dataset_filename() -> str:
""" Returns default dataset file name """
return os.environ.get('DATASET_NAME', 'data/movie_metadata.csv')
def nan_to_none(value, default=None):
""" Casts numpy/pandas nan values to none, or some other default """
if pd.isna(value):
return default
else:
return value
| StarcoderdataPython |
1658355 | <filename>pytype/tests/test_recovery.py<gh_stars>10-100
"""Tests for recovering after errors."""
from pytype.tests import test_inference
class RecoveryTests(test_inference.InferenceTest):
"""Tests for recovering after errors.
The type inferencer can warn about bad code, but it should never blow up.
These tests check that we don't faceplant when we encounter difficult code.
"""
def testBadSubtract(self):
ty = self.Infer("""
def f():
t = 0.0
return t - ("bla" - t)
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
def f() -> ?
""")
def testBadCall(self):
ty = self.Infer("""
def f():
return "%s" % chr("foo")
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
def f() -> str
""")
def testBadFunction(self):
ty = self.Infer("""
import time
def f():
return time.unknown_function(3)
def g():
return '%s' % f()
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
time = ... # type: module
def f() -> ?
def g() -> str
""")
def testInheritFromInstance(self):
ty = self.Infer("""
class Foo(3):
pass
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
class Foo(?):
pass
""")
def testNameError(self):
ty = self.Infer("""
x = foobar
class A(x):
pass
pow(A(), 2)
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
x = ... # type: ?
class A(?):
pass
""")
def testObjectAttr(self):
self.assertNoCrash("""
object.bla(int)
""")
def testAttrError(self):
ty = self.Infer("""
class A:
pass
x = A.x
class B:
pass
y = "foo".foo()
object.bar(int)
class C:
pass
""", deep=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
class A:
pass
x = ... # type: ?
class B:
pass
y = ... # type: ?
class C:
pass
""")
def testNoSelf(self):
ty = self.Infer("""
class Foo(object):
def foo():
pass
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
class Foo(object):
def foo(): ...
""")
def testWrongCall(self):
ty = self.Infer("""
def f():
pass
f("foo")
x = 3
""", deep=True, solve_unknowns=True, report_errors=False)
self.assertTypesMatchPytd(ty, """
def f() -> None: ...
x = ... # type: int
""")
def testDuplicateIdentifier(self):
ty = self.Infer("""
class A(object):
def __init__(self):
self.foo = 3
def foo(self):
pass
""", deep=True)
self.assertTypesMatchPytd(ty, """
from typing import Any
class A(object):
foo = ... # type: Any
""")
if __name__ == "__main__":
test_inference.main()
| StarcoderdataPython |
1782630 | # Generated by Django 2.2.1 on 2019-07-26 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0019_auto_20190617_0141'),
]
operations = [
migrations.CreateModel(
name='ClassTemp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.IntegerField()),
('semester', models.IntegerField(choices=[(1, 'First'), (2, 'Second')])),
('name', models.CharField(max_length=50)),
('code', models.CharField(max_length=20)),
('professor', models.CharField(max_length=50)),
('place', models.CharField(max_length=50)),
('size', models.IntegerField()),
('grade', models.IntegerField(choices=[(0, 'Zero'), (1, 'One'), (2, 'Two'), (3, 'Three'), (4, 'Four')])),
('start1', models.TimeField(blank=True, null=True)),
('end1', models.TimeField(blank=True, null=True)),
('week1', models.CharField(blank=True, choices=[('mon', 'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday')], max_length=20, null=True)),
('start2', models.TimeField(blank=True, null=True)),
('end2', models.TimeField(blank=True, null=True)),
('week2', models.CharField(blank=True, choices=[('mon', 'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday')], max_length=20, null=True)),
('start3', models.TimeField(blank=True, null=True)),
('end3', models.TimeField(blank=True, null=True)),
('week3', models.CharField(blank=True, choices=[('mon', 'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday')], max_length=20, null=True)),
('start4', models.TimeField(blank=True, null=True)),
('end4', models.TimeField(blank=True, null=True)),
('week4', models.CharField(blank=True, choices=[('mon', 'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday')], max_length=20, null=True)),
('start5', models.TimeField(blank=True, null=True)),
('end5', models.TimeField(blank=True, null=True)),
('week5', models.CharField(blank=True, choices=[('mon', 'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday')], max_length=20, null=True)),
],
),
]
| StarcoderdataPython |
53163 | <filename>Util.py
# -*- coding:utf-8 -*-
import random
import string
import base64
import hmac
import hashlib
import logging
import sys
import time
import os
TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
ISO8601 = '%Y%m%dT%H%M%SZ'
ISO8601_MS = '%Y-%m-%dT%H:%M:%S.%fZ'
RFC1123 = '%a, %d %b %Y %H:%M:%S %Z'
class InitSSHRemoteHost:
def __init__(self, ip, username, password, tools_path, tool_number):
self.localIP = ip
self.username = username
self.password = password
self.tools_path = tools_path
self.tool_number = tool_number
def random_string_create(string_length):
if isinstance(string_length, int):
return ''.join(
[random.choice(string.ascii_letters + string.digits + string.punctuation.translate(None, '!,%&<>\'\\^`')) for n in
range(string_length)])
else:
print 'input error'
def generate_image_format(image_format):
"""
生成图片转码格式
:param image_format:
:return:
"""
if str(image_format).find(',') != -1:
format_array = image_format.split(',')
return format_array[random.randint(0, len(format_array) - 1)]
return image_format
def generate_a_size(data_size_str):
"""
返回对象大小,和是否是固定值,可必免反复请求。ifFixed = True
:param data_size_str:
:return:
"""
if str(data_size_str).find('~') != -1 and str(data_size_str).find(',') != -1:
size_array = data_size_str.split(',')
size_chosen = size_array[random.randint(0, len(size_array) - 1)]
start_size = int(size_chosen.split('~')[0])
end_size = int(size_chosen.split('~')[1])
return random.randint(start_size, end_size), False
elif str(data_size_str).find('~') != -1:
start_size = int(data_size_str.split('~')[0])
end_size = int(data_size_str.split('~')[1])
return random.randint(start_size, end_size), False
elif str(data_size_str).find(',') != -1:
size_array = data_size_str.split(',')
return int(size_array[random.randint(0, len(size_array) - 1)]), False
else:
return int(data_size_str), True
def get_utf8_value(value):
if not value:
return ''
if isinstance(value, str):
return value
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
def compare_version(v1, v2):
v1 = v1.split('.')
v2 = v2.split('.')
try:
for i in range(0, len(v1)):
if len(v2) < i + 1:
return 1
elif int(v1[i]) < int(v2[i]):
return -1
elif int(v1[i]) > int(v2[i]):
return 1
except:
return -1
if len(v2) > len(v1):
return -1
return 0
def generate_config_file(config_file):
"""
generate specific configuration file
:param config_file:
:return: config generated
"""
config = {}
try:
f = open(config_file, 'r')
lines = f.readlines()
for line in lines:
line = line.strip()
if line and line[0] != '#':
config[line[:line.find('=')].strip()] = line[line.find('=') + 1:].strip()
else:
continue
f.close()
except Exception, e:
print '[ERROR] Read config file %s error: %s' % (config_file, e)
sys.exit()
return config
def read_distribute_config(config_file='distribute_config.dat'):
"""
read given distribute file configuration
:param config_file:
:return:
"""
config = generate_config_file(config_file)
config['Slaves'] = config['Slaves'].replace(' ', '').replace(',,', ',')
config['Usernames'] = config['Usernames'].replace(' ', '').replace(',,', ',')
config['Passwords'] = config['Passwords'].replace(' ', '').replace(',,', ',')
config['Toolpaths'] = config['Toolpaths'].replace(' ', '').replace(',,', ',')
config['ToolNumberPerServer'] = config['ToolNumberPerServer'].replace(' ', '').replace(',,', ',')
if config['Master'] is not None and config['Master'] and \
config['Slaves'] is not None and config['Slaves'] and \
config['Usernames'] is not None and config['Usernames'] and \
config['Passwords'] is not None and config['Passwords'] and \
config['Toolpaths'] is not None and config['Toolpaths'] and \
config['ToolNumberPerServer'] is not None and config['ToolNumberPerServer'] and \
config['RunTime'] is not None and config['RunTime']:
pass
else:
raise Exception('Some config(s) is missed')
return config
def generate_slave_servers(config):
"""
initialize slave servers
:param config: distribute configuration
:return: generated slave servers
"""
slaves = []
slave_ips = config['Slaves'].split(',')
slave_usernames = config['Usernames'].split(',')
slave_passwords = config['Passwords'].split(',')
slave_tool_paths = config['Toolpaths'].split(',')
slave_tool_numbers = config['ToolNumberPerServer'].split(',')
k = 0
for i in xrange(len(slave_ips)):
for j in xrange(int(slave_tool_numbers[i])):
ip = slave_ips[i]
username = slave_usernames[i] if len(slave_usernames) > 1 else slave_usernames[0]
password = slave_passwords[i] if len(slave_passwords) > 1 else slave_passwords[0]
tool_path = slave_tool_paths[k]
k += 1
tool_number = "1"
slaves.append(InitSSHRemoteHost(ip, username, password, tool_path, tool_number))
return slaves
def generate_connections(servers):
"""
generate provided servers' connections
:param servers:
:return:
"""
from long_ssh_connection import LongSSHConnection
connects = []
for server in servers:
connect = LongSSHConnection(server)
# build the connection to provided server
logging.debug("Build connection to server[%s]" % server.localIP)
r = connect.execute_cmd('ssh %s@%s' % (server.username, server.localIP), timeout=10)
if r.endswith('?'):
connect.execute_cmd('yes', expect_end=':')
connect.execute_cmd(server.password, expect_end='#')
logging.debug("Successfully built the connection to server[%s]" % server.localIP)
# go to provided tool path
logging.debug("Go to provided tool path[%s] of server[%s]" % (server.tools_path, server.localIP))
connect.execute_cmd('cd %s' % server.tools_path, timeout=5)
connects.append(connect)
return connects
def get_brief_file_name(connect):
"""
get brief file name
:param connect:
:return:
"""
logging.warn("try to get brief file from server: %s" % connect.ip)
get_slave_brief_file_name_result = connect.execute_cmd(r"ls -t result/*_brief.txt | head -1")
tmp = get_slave_brief_file_name_result.split('\r\n')[0]
return tmp.split('/')[1]
def start_tool(connect, test_case, run_time):
"""
start tool in server
:param connect:
:param test_case:
:param run_time:
:return:
"""
print "Start at %s, send run signal to slave[%s]" % (time.strftime('%X %x %Z'), connect.ip)
logging.warn("send run signal to server %s" % connect.ip)
connect.execute_cmd('python run.py %s' % test_case, timeout=10)
def convert_time_format_str(time_sec):
if time_sec < 0:
return '--\'--\'--'
if time_sec >= 8553600:
return '>99 days'
elif time_sec >= 86400:
return '%2.2d Days %2.2d\'%2.2d\'%2.2d' % (
time_sec / (3600 * 24), time_sec % (3600 * 24) / 3600, (time_sec % 3600 / 60), (time_sec % 60))
else:
ms = time_sec - int('%2.2d' % (time_sec % 60))
return '%2.2d\'%2.2d\'%2.2d.%d' % (time_sec / 3600, (time_sec % 3600 / 60), (time_sec % 60), ms * 1000)
def generate_response(response):
"""
response of server always contains "\r\n", need to remove it
:param response: response of server
:return:
"""
if response is not None:
resp = response.split('\r\n')
resp = resp[0]
return resp
else:
raise Exception("response of server is none, please confirm it.")
def convert_to_size_str(size_bt):
kb = 2 ** 10
mb = 2 ** 20
gb = 2 ** 30
tb = 2 ** 40
pb = 2 ** 50
if size_bt >= 100 * pb:
return '>100 PB'
elif size_bt >= pb:
return "%.2f PB" % (size_bt / (pb * 1.0))
elif size_bt >= tb:
return "%.2f TB" % (size_bt / (tb * 1.0))
elif size_bt >= gb:
return "%.2f GB" % (size_bt / (gb * 1.0))
elif size_bt >= mb:
return "%.2f MB" % (size_bt / (mb * 1.0))
elif size_bt >= kb:
return "%.2f KB" % (size_bt / (kb * 1.0))
else:
return "%.2f B" % size_bt
| StarcoderdataPython |
1782083 | <filename>src/curt/curt/modules/vision/image_classification.py
"""
Copyright (C) Cortic Technology Corp. - All Rights Reserved
Written by <NAME> <<EMAIL>>, 2021
"""
import tvm
from tvm.contrib import graph_runtime
import numpy as np
import time
from scipy.special import expit, logit
import cv2
import math
import os
import logging
import logging
from curt.modules.vision.utils import decode_image_byte, image_labels
from curt.modules.vision.tvm_processing import TVMProcessing
class ImageClassification(TVMProcessing):
def __init__(self):
super().__init__( "cpu",
"tuned32_efficientnet_lite.json",
"tuned32_efficientnet_lite_lib.tar",
"tuned32_efficientnet_lite_param.params",
"images",
1)
self.input_width = 224
self.input_height = 224
self.friendly_name = "image_classification_pi"
def preprocess_input(self, params):
img = params[0]
if img is None:
logging.warning("Image Classification: " + "imgae is None")
return None
if isinstance(img, str):
img = decode_image_byte(img)
img = cv2.resize(img, (self.input_width, self.input_height), interpolation=cv2.INTER_NEAREST)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype("float32")
img = cv2.normalize(img, None, -0.992188, 1, cv2.NORM_MINMAX)
img = img[np.newaxis, :]
return img
def process_data(self, preprocessed_data):
return self.tvm_process(preprocessed_data)
def postprocess_result(self, data):
inference_outputs = data[0][0].squeeze()
top5 = (-inference_outputs).argsort()[:5]
classified_results = []
for idx in top5:
label = image_labels[idx]
probability = inference_outputs[idx]
if label.find(",") != -1:
label = label[0:label.find(",")]
classified_results.append([label, probability.astype(float)])
return classified_results
| StarcoderdataPython |
67653 | <gh_stars>1000+
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Subprocess utilities.
"""
from subprocess import PIPE, STDOUT, CalledProcessError, Popen
from eliot import Message, start_action
from pyrsistent import PClass, field
class _CalledProcessError(CalledProcessError):
"""
Just like ``CalledProcessError`` except output is included in the string
representation.
"""
def __str__(self):
base = super(_CalledProcessError, self).__str__()
lines = "\n".join(" |" + line for line in self.output.splitlines())
return base + " and output:\n" + lines
class _ProcessResult(PClass):
"""
The return type for ``run_process`` representing the outcome of the process
that was run.
"""
command = field(type=list, mandatory=True)
output = field(type=bytes, mandatory=True)
status = field(type=int, mandatory=True)
def run_process(command, *args, **kwargs):
"""
Run a child process, capturing its stdout and stderr.
:param list command: An argument list to use to launch the child process.
:raise CalledProcessError: If the child process has a non-zero exit status.
:return: A ``_ProcessResult`` instance describing the result of the child
process.
"""
kwargs["stdout"] = PIPE
kwargs["stderr"] = STDOUT
action = start_action(
action_type="run_process", command=command, args=args, kwargs=kwargs)
with action:
process = Popen(command, *args, **kwargs)
output = process.stdout.read()
status = process.wait()
result = _ProcessResult(command=command, output=output, status=status)
# TODO: We should be using a specific logging type for this.
Message.new(
command=result.command,
output=result.output,
status=result.status,
).write()
if result.status:
raise _CalledProcessError(
returncode=status, cmd=command, output=output,
)
return result
| StarcoderdataPython |
111823 | #!/bin/env python
import os
import itk
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from glob import glob
from FemurSegmentation.IOManager import ImageReader
from FemurSegmentation.IOManager import VolumeWriter
from FemurSegmentation.filters import execute_pipeline
from FemurSegmentation.filters import adjust_physical_space
from FemurSegmentation.metrics import itk_label_overlapping_measures
from FemurSegmentation.metrics import itk_hausdorff_distance
from FemurSegmentation.metrics import itk_hausdorff_distance_map
# %%
def parse_args():
description = 'Automated CT Femur Segmentation'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--source',
dest='source',
required=True,
type=str,
action='store',
help='Source Image Filename')
parser.add_argument('--target',
dest='target',
required=True,
type=str,
action='store',
help='Target Image Filename')
parser.add_argument('--output',
dest='output',
required=True,
type=str,
action='store',
help='output csv in which save the results')
parser.add_argument('--distance_map',
dest='distance_map',
required=False,
type=str,
action='store',
help='output filename for the distance map between source and target',
default=None)
args = parser.parse_args()
return args
def main(source_path, target_path, compute_distance_map=False):
ImageType = itk.Image[itk.SS, 3]
reader = ImageReader()
name = os.path.basename(source_path)
source = reader(source_path, ImageType)
target = reader(target_path, ImageType)
source = adjust_physical_space(source, target, ImageType)
measures = itk_label_overlapping_measures(source, target)
_ = measures.Update()
hd = itk_hausdorff_distance(source, target)
_ = hd.Update()
distance_map = None
if compute_distance_map:
distance_map = itk_hausdorff_distance_map(source, target)
dict = {'Patient Name' : [name],
'Dice Coefficient' : [measures.GetDiceCoefficient()],
'Jaccard Coefficient' : [measures.GetJaccardCoefficient()],
'Volume Similarity' : [measures.GetVolumeSimilarity()],
'Hausdorff Distance' : [hd.GetHausdorffDistance()],
'Average Hausdorff Distance' : [hd.GetAverageHausdorffDistance()]}
df = pd.DataFrame.from_dict(dict)
print('Processed Image: {}'.format(name), flush=True)
print('Computed Metrics:', flush=True)
print(df)
return [df, distance_map]
if __name__ == '__main__':
args = parse_args()
print('Source Image: {}'.format(args.source), flush=True)
print('Target Image: {}'.format(args.target), flush=True)
compute_distance_map=False
if args.distance_map is not None:
compute_distance_map=True
df, distance_map = main(args.source, args.target, compute_distance_map=compute_distance_map)
print('Writing the results to {}'.format(args.output), flush=True)
df.to_csv(args.output, sep=',', index=False)
if compute_distance_map:
print('Writing the distance map to {}'.format(args.distance_map))
writer = VolumeWriter()
_ = writer(args.distance_map, distance_map)
print('[DONE]', flush=True)
| StarcoderdataPython |
4810125 | <gh_stars>1-10
# Generated by Django 3.1.5 on 2021-05-02 10:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hospital', '0015_remove_admin_status'),
]
operations = [
migrations.RemoveField(
model_name='charges',
name='unitprice',
),
migrations.AlterField(
model_name='charges',
name='commodity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='AdmitDetails', to='hospital.medicines'),
),
]
| StarcoderdataPython |
53418 | """Load the Tatoeba dataset."""
import sys
import os
import csv
import subprocess
import time
from multiprocessing import Pool, Lock, cpu_count
from tqdm import tqdm
from scipy.io import wavfile
from python.params import MIN_EXAMPLE_LENGTH, MAX_EXAMPLE_LENGTH
from python.dataset.config import CACHE_DIR, CORPUS_DIR
from python.util.storage import delete_file_if_exists
from python.dataset import download
from python.dataset.txt_files import generate_txt
# Path to the Taboeba dataset.
__URL = 'https://downloads.tatoeba.org/audio/tatoeba_audio_eng.zip'
__MD5 = 'd76252fd704734fc3d8bf5b44e029809'
__NAME = 'tatoeba'
__FOLDER_NAME = 'tatoeba_audio_eng'
__SOURCE_PATH = os.path.join(CACHE_DIR, __FOLDER_NAME)
__TARGET_PATH = os.path.realpath(os.path.join(CORPUS_DIR, __FOLDER_NAME))
def tatoeba_loader(keep_archive):
"""Download, extract and build the output strings that can be written to the desired TXT files.
Args:
keep_archive (bool): Keep or delete the downloaded archive afterwards.
Returns:
str: String containing the output string that can be written to TXT files.
"""
# Download and extract the dataset if necessary.
download.maybe_download(__URL, md5=__MD5, cache_archive=keep_archive)
if not os.path.isdir(__SOURCE_PATH):
raise ValueError('"{}" is not a directory.'.format(__SOURCE_PATH))
# Download user ratings CSV file.
csv_path = os.path.join(__SOURCE_PATH, 'users_sentences.csv')
download.download_with_progress('http://downloads.tatoeba.org/exports/users_sentences.csv',
csv_path)
assert os.path.exists(csv_path)
target = 'train'
# Generate the WAV and a string for the `<target>.txt` file.
output = __tatoeba_loader(target)
# Generate the `<target>.txt` file.
txt_path = generate_txt(__NAME, target, output)
# Cleanup extracted folder.
download.cleanup_cache(__FOLDER_NAME)
return txt_path
def __tatoeba_loader(target):
"""Build the output string that can be written to the desired TXT file.
Args:
target (str): Only 'train' is supported for the Tatoeba dataset.
Returns:
str: List containing the output string that can be written to TXT file.
"""
if not os.path.isdir(__SOURCE_PATH):
raise ValueError('"{}" is not a directory.'.format(__SOURCE_PATH))
if target != 'train':
raise ValueError('Invalid target. Tatoeba only has a train dataset.')
validated_samples = set() # Set of all sample IDs that have been validated.
# Parse dataset meta data information to filter out low ranked samples.
with open(os.path.join(__SOURCE_PATH, 'users_sentences.csv'), 'r') as csv_handle:
csv_reader = csv.reader(csv_handle, delimiter='\t')
csv_lines = list(csv_reader)
# print('csv_header: username\tsentence_id\trating\tdate_added\tdate_modified')
for username, _id, rating, _, _ in csv_lines:
rating = int(rating)
if rating >= 1:
path = os.path.join(__SOURCE_PATH, 'audio', username, _id)
validated_samples.add(path)
samples = [] # List of dictionaries of all files and labels and in the dataset.
# Parse dataset meta data information to filter out low ranked samples.
with open(os.path.join(__SOURCE_PATH, 'sentences_with_audio.csv'), 'r') as csv_handle:
csv_reader = csv.reader(csv_handle, delimiter='\t')
csv_lines = list(csv_reader)
csv_lines = csv_lines[1:] # Remove CSV header.
# print('csv_header: sentence_id\tusername\ttext')
for _id, username, text in tqdm(csv_lines,
desc='Loading Tatoeba CSV', total=len(csv_lines),
file=sys.stdout, unit='entries', dynamic_ncols=True):
path = os.path.join(__SOURCE_PATH, 'audio', username, _id)
if path in validated_samples:
samples.append({'path': path, 'text': text})
# Create target folder structure.
for sample in samples:
dir_path = os.path.join(__TARGET_PATH, os.path.relpath(sample['path'], __SOURCE_PATH))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
lock = Lock()
buffer = []
missing_mp3_counter = 0
with Pool(processes=cpu_count()) as pool:
for result in tqdm(pool.imap_unordered(__tatoeba_loader_helper, samples, chunksize=1),
desc='Converting Tatoeba MP3 to WAV', total=len(samples),
file=sys.stdout, unit='files', dynamic_ncols=True):
lock.acquire()
if result is None:
missing_mp3_counter += 1
else:
buffer.append(result)
lock.release()
print('WARN: {} MP3 files listed in the CSV could not be found.'
.format(missing_mp3_counter))
return buffer
def __tatoeba_loader_helper(sample):
path = sample['path']
text = sample['text']
mp3_path = '{}.mp3'.format(path)
wav_path = '{}.wav'.format(path)
wav_path = os.path.join(__TARGET_PATH, os.path.relpath(wav_path, __SOURCE_PATH))
# Check if audio file MP3 exists.
if not os.path.isfile(mp3_path):
# print('WARN: Audio file missing: {}'.format(mp3_path))
return None
# Check if file isn't empty.
try:
if os.path.getsize(mp3_path) <= 4048:
return None
except OSError:
return None
delete_file_if_exists(wav_path)
# Convert MP3 file into WAV file, reduce volume to 0.95, downsample to 16kHz mono sound.
ret = subprocess.call(['sox', '-v', '0.95', mp3_path, '-r', '16k', wav_path, 'remix', '1'])
if not os.path.isfile(wav_path):
raise RuntimeError('Failed to create WAV file with error code={}: {}'.format(ret, wav_path))
# Validate that the example length is within boundaries.
for i in range(5):
try:
(sr, y) = wavfile.read(wav_path)
length_sec = len(y) / sr
if not MIN_EXAMPLE_LENGTH <= length_sec <= MAX_EXAMPLE_LENGTH:
return None
break
except ValueError:
print('WARN: Could not load ({}/5) wavfile: {}'.format(i, wav_path))
if i == 4:
raise
time.sleep(1)
# TODO: Copy used files to corpus dir
wav_path = os.path.relpath(wav_path, CORPUS_DIR)
return '{} {}\n'.format(wav_path, text.strip())
# Test download script.
if __name__ == '__main__':
print('Tatoeba txt_paths: ', tatoeba_loader(True))
print('\nDone.')
| StarcoderdataPython |
3247250 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 4 10:48:58 2018
@author: <NAME>
"""
import numpy as np
import pandas as pd
from HelperFuncs import ppLFER, vant_conv, arr_conv, make_ppLFER
from FugModel import FugModel
class ppLFERMUM(FugModel):
""" ppLFER based Multimedia Urban Model fugacity model object. Implementation of the model by
Diamond et al (2001) as updated by Rodgers et al. (2018)
Attributes:
----------
pplfer_system (df): (optional) ppLFER system parameters, with
columns as systems(Kij or dUij) and the row index as l,s,a,b,v,c
e.g pp = pd.DataFrame(index = ['l','s','a','b','v','c']) by default
the system will define the ppLFER system as per Rodgers et al. (2018)
ic input_calc (df): Dataframe describing the system up to the point
of matrix solution.
"""
def __init__(self,locsumm,chemsumm,params,num_compartments = 7,name = None,pplfer_system = None):
FugModel. __init__(self,locsumm,chemsumm,params,num_compartments,name)
self.pp = pplfer_system
self.ic = self.input_calc(self.locsumm,self.chemsumm,self.params,self.pp)
def input_calc(self,locsumm,chemsumm,params,pp):
""" Perform the initial calulations to set up the fugacity matrix. A steady state
ppLFERMUM is an n compartment fugacity model solved at steady
state using the compartment parameters from locsumm and the chemical
parameters from chemsumm, other parameters from params, and a ppLFER system
from pp, use pp = None to use the defaults.
"""
#pdb.set_trace()
#Initialize used inputs dataframe with input properties
ic_inp = pd.DataFrame.copy(chemsumm,deep=True)
#Declare constants and calculate non-chemical dependent parameters
#Should I make if statements here too? Many of the params.Value items could be here instead.
R = 8.314 #Ideal gas constant, J/mol/K
locsumm.loc[:,'V']= locsumm.Area*locsumm.Depth #Add volumes m³
params.loc['TempK','Value'] = params.Value['Temp'] +273.15 #°C to K
#Calculate air density kg/m^3
locsumm.loc[['Lower_Air','Upper_Air'],'Density'] = 0.029 * 101325 / (R * params.Value.TempK)
Y4 = locsumm.Depth.Soil/2 #Soil diffusion path length (m)
Y5 = locsumm.Depth.Sediment/2 #Sediment diffusion path length (m)
#Boundary layer depth - leaves & film (m) Nobel (1991)
delta_blv = 0.004 * ((0.07 / params.Value.WindSpeed) ** 0.5)
delta_blf = 0.006 * ((0.07 / params.Value.WindSpeed) ** 0.5)
#Film to water MTC (m/h)
kfw = params.Value.FilmThickness * params.Value.W
#Dry deposition interception fraction (Diamond, Premiere, & Law 2001)
Ifd = 1 - np.exp(-2.8 * params.Value.Beta)
#Soil to groundwater leaching rate from Mackay & Paterson (1991)
Usg = 0.4 * params.Value.RainRate
#Fraction soil volume occupied by interstitial air and water
ic_inp.loc[:,'Bea'] = ic_inp.AirDiffCoeff*locsumm.VFAir.Soil**(10/3) \
/(locsumm.VFAir.Soil +locsumm.VFWat.Soil)**2
ic_inp.loc[:,'Bew'] = ic_inp.WatDiffCoeff*locsumm.VFWat.Soil**(10/3) \
/(locsumm.VFAir.Soil +locsumm.VFWat.Soil)**2
#Fraction sediment volume occupied by water
ic_inp.loc[:,'Bwx'] = ic_inp.WatDiffCoeff*locsumm.VFWat.Sediment**(4/3)
#Airside MTCs for veg and film (m/h)
ic_inp.loc[:,'k_av'] = ic_inp.AirDiffCoeff / delta_blv
ic_inp.loc[:,'k_af'] = ic_inp.AirDiffCoeff / delta_blf
#ppLFER system parameters - initialize defaults if not there already
if pp is None:
pp = pd.DataFrame(index = ['l','s','a','b','v','c'])
pp = make_ppLFER(pp)
#Check if partition coefficients & dU values have been provided, or only solute descriptors
#add based on ppLFER if not, then adjust partition coefficients for temperature of system
#Aerosol-Air (Kqa), use octanol-air enthalpy
if 'LogKqa' not in ic_inp.columns:
ic_inp.loc[:,'LogKqa'] = ppLFER(ic_inp.L,ic_inp.S,\
ic_inp.A,ic_inp.B,ic_inp.V,pp.logKqa.l,pp.logKqa.s,pp.logKqa.a,pp.logKqa.b,pp.logKqa.v,pp.logKqa.c)
if 'dUoa' not in ic_inp.columns: #!!!This might be broken - need to check units & sign!!!
ic_inp.loc[:,'dUoa'] = ppLFER(ic_inp.L,ic_inp.S,\
ic_inp.A,ic_inp.B,ic_inp.V,pp.dUoa.l,pp.dUoa.s,pp.dUoa.a,pp.dUoa.b,pp.dUoa.v,pp.dUoa.c)
ic_inp.loc[:,'Kqa'] = vant_conv(ic_inp.dUoa,params.Value.TempK,10**ic_inp.LogKqa,T1 = 288.15)
ic_inp.loc[:,'LogKqa'] = np.log10(ic_inp.Kqa)
#Organic carbon-water (KocW), use octanol-water enthalpy (dUow)
if 'LogKocW' not in ic_inp.columns:
ic_inp.loc[:,'LogKocW'] = ppLFER(ic_inp.L,ic_inp.S,\
ic_inp.A,ic_inp.B,ic_inp.V,pp.logKocW.l,pp.logKocW.s,pp.logKocW.a,pp.logKocW.b,pp.logKocW.v,pp.logKocW.c)
if 'dUow' not in ic_inp.columns: #!!!This might be broken - need to check units & sign!!!
ic_inp.loc[:,'dUow'] = 1000 * ppLFER(ic_inp.L,ic_inp.S,\
ic_inp.A,ic_inp.B,ic_inp.V,pp.dUow.l,pp.dUow.s,pp.dUow.a,pp.dUow.b,pp.dUow.v,pp.dUow.c)
ic_inp.loc[:,'KocW'] = vant_conv(ic_inp.dUow,params.Value.TempK,10**ic_inp.LogKocW)
ic_inp.loc[:,'LogKocW'] = np.log10(ic_inp.KocW)
#Storage Lipid Water (KslW), use ppLFER for dUslW (kJ/mol) convert to J/mol/K
if 'LogKslW' not in ic_inp.columns:
ic_inp.loc[:,'LogKslW'] = ppLFER(ic_inp.L,ic_inp.S,\
ic_inp.A,ic_inp.B,ic_inp.V,pp.logKslW.l,pp.logKslW.s,\
pp.logKslW.a,pp.logKslW.b,pp.logKslW.v,pp.logKslW.c)
if 'dUslW' not in ic_inp.columns:
ic_inp.loc[:,'dUslW'] = 1000 * ppLFER(ic_inp.L,ic_inp.S,\
ic_inp.A,ic_inp.B,ic_inp.V,pp.dUslW.l,pp.dUslW.s,pp.dUslW.a,pp.dUslW.b,pp.dUslW.v,pp.dUslW.c)
ic_inp.loc[:,'KslW'] = vant_conv(ic_inp.dUslW,params.Value.TempK,10**ic_inp.LogKslW,T1 = 310.15)
ic_inp.loc[:,'LogKslW'] = np.log10(ic_inp.KslW)
#Air-Water (Kaw) use dUaw
if 'LogKaw' not in ic_inp.columns:
ic_inp.loc[:,'LogKaw'] = ppLFER(ic_inp.L,ic_inp.S,\
ic_inp.A,ic_inp.B,ic_inp.V,pp.logKaw.l,pp.logKaw.s,\
pp.logKaw.a,pp.logKaw.b,pp.logKaw.v,pp.logKaw.c)
if 'dUaw' not in ic_inp.columns: #!!!This might be broken - need to check units & sign!!!
ic_inp.loc[:,'dUaw'] = 1000 * ppLFER(ic_inp.L,ic_inp.S,\
ic_inp.A,ic_inp.B,ic_inp.V,pp.dUaw.l,pp.dUaw.s,pp.dUaw.a,pp.dUaw.b,pp.dUaw.v,pp.dUaw.c)
ic_inp.loc[:,'Kaw'] = vant_conv(ic_inp.dUaw,params.Value.TempK,10**ic_inp.LogKaw)
ic_inp.loc[:,'LogKaw'] = np.log10(ic_inp.Kaw)
#Define storage lipid-air (KslA) and organic carbon-air (KocA) using the thermodynamic cycle
#No need to adjust these for temperature as they are defined based on temeprature adjusted values
ic_inp.loc[:,'KslA'] = ic_inp.KslW / ic_inp.Kaw
ic_inp.loc[:,'KocA'] = ic_inp.KocW / ic_inp.Kaw
#Calculate Henry's law constant (H, Pa m³/mol)
ic_inp.loc[:,'H'] = ic_inp.Kaw * R * params.Value.TempK
#Calculate temperature-corrected media reaction rates
#Air (air_rrxn /hr), 3600 converts from /s
ic_inp.loc[:,'air_rrxn'] = 3600 * \
arr_conv(params.Value.EaAir,params.Value.TempK,ic_inp.AirOHRateConst * params.Value.OHConc)
#Air Particles (airq_rrxn) 3600 converts from /s, use 10% of AirOHRateConst if not present
if 'AirQOHRateConst' not in ic_inp.columns:
ic_inp.loc[:,'airq_rrxn'] = 0.1 * ic_inp.air_rrxn
else:
ic_inp.loc[:,'airq_rrxn'] = 3600 * \
arr_conv(params.Value.EaAir,params.Value.TempK,ic_inp.AirQOHRateConst * params.Value.OHConc)
#Water (wat_rrxn) converted from half life (h)
ic_inp.loc[:,'wat_rrxn'] = \
arr_conv(params.Value.Ea,params.Value.TempK,np.log(2)/ic_inp.WatHL)
#Soil (soil_rrxn) converted from half life (h)
ic_inp.loc[:,'soil_rrxn'] = arr_conv(params.Value.Ea,params.Value.TempK,np.log(2)/ic_inp.SoilHL)
#Sediment (sed_rrxn) converted from half life
ic_inp.loc[:,'sed_rrxn'] = arr_conv(params.Value.Ea,params.Value.TempK,np.log(2)/ic_inp.SedHL)
#Vegetation is based off of air half life, this can be overridden if chemsumm contains a VegHL column
if 'VegHL' in ic_inp.columns:
ic_inp.loc[:,'veg_rrxn'] = arr_conv(params.Value.Ea,params.Value.TempK,np.log(2)/ic_inp.VegHL)
else:
ic_inp.loc[:,'veg_rrxn'] = 0.1*ic_inp.air_rrxn
#Same for film
if 'FilmHL' in ic_inp.columns:
ic_inp.loc[:,'film_rrxn'] = arr_conv(params.Value.Ea,params.Value.TempK,np.log(2)/ic_inp.FilmHL)
else:
ic_inp.loc[:,'film_rrxn'] = ic_inp.air_rrxn/0.75
#Convert back to half lives (h), good for error checking
ic_inp.loc[:,'AirHL'] = np.log(2)/(ic_inp.air_rrxn)
ic_inp.loc[:,'AirQHL'] = np.log(2)/(ic_inp.airq_rrxn)
ic_inp.loc[:,'WatHL'] = np.log(2)/(ic_inp.wat_rrxn)
#Calculate Z-values (mol/m³/Pa)
#Air lower and upper Zla and Zua, in case they are ever changed
ic_inp.loc[:,'Zla'] = 1/(R*params.Value.TempK)
ic_inp.loc[:,'Zua'] = 1/(R*params.Value.TempK)
#Dissolved water Zw
ic_inp.loc[:,'Zw'] = 1/(ic_inp.loc[:,'H'])
#Soil Solids Zs, index is 3 in the locsumm file
ic_inp.loc[:,'Zsoil'] = ic_inp.KocA*ic_inp.Zla*locsumm.Density.Soil*locsumm.FrnOC.Soil/1000
#Sediment Solids
ic_inp.loc[:,'Zsed'] = ic_inp.KocW*ic_inp.Zw*locsumm.Density.Sediment*locsumm.FrnOC.Sediment/1000
#Plant Storage
ic_inp.loc[:,'Zveg'] = ic_inp.KslA*ic_inp.Zla*locsumm.FrnOC.Vegetation
#Dissolved Film
ic_inp.loc[:,'Zfilm'] = ic_inp.KslA*ic_inp.Zla*locsumm.FrnOC.Film
#Film Aerosol - Kqa is whole particle not just organic fraction
ic_inp.loc[:,'Zqfilm'] = ic_inp.Kqa*ic_inp.Zla*locsumm.loc['Lower_Air','PartDensity']*1000
#Lower and Upper air Aerosol particles - composed of water and particle, with the water fraction defined
#by hygroscopic growth of the aerosol. Growth is defined as per the Berlin Spring aerosol from Arp et al. (2008)
if params.Value.RH > 100: #maximum RH = 100%
params.Value.RH = 100
#Hardcoded hygroscopic growth factor (GF) not ideal but ¯\_(ツ)_/¯
GF = np.interp(params.Value.RH/100,xp = [0.12,0.28,0.77,0.92],fp = [1.0,1.08,1.43,2.2],\
left = 1.0,right = params.Value.RH/100*5.13+2.2)
#Volume fraction of water in aerosol
VFQW_la = (GF - 1) * locsumm.Density.Water / ((GF - 1) * \
locsumm.Density.Water + locsumm.loc['Lower_Air','PartDensity'])
VFQW_ua = (GF - 1) * locsumm.Density.Water / ((GF - 1) * \
locsumm.Density.Water + locsumm.loc['Upper_Air','PartDensity'])
#Volume fraction of nucleus
VFQp_la = 1 - VFQW_la
VFQp_ua = 1 - VFQW_ua
#Calculate aerosol Z values
ic_inp.loc[:,'Zq_la'] = ic_inp.loc[:,'Zla']*ic_inp.loc[:,'Kqa']*locsumm.loc['Lower_Air','PartDensity']\
*1000*VFQp_la+ic_inp.Zw*VFQW_la
ic_inp.loc[:,'Zq_ua'] = ic_inp.loc[:,'Zua']*ic_inp.loc[:,'Kqa']*locsumm.loc['Upper_Air','PartDensity']\
*1000*VFQp_ua+ic_inp.Zw*VFQW_ua
#Suspended Sediment in the water compartment (Z_qw)
ic_inp.loc[:,'Z_qw'] = ic_inp.Zw*ic_inp.KocW*locsumm.PartFrnOC.Water * locsumm.PartDensity.Water/1000
#Bulk Z Value (Zb_j)
#Air - consists of Zq and Za
ic_inp.loc[:,'Zb_la'] = ic_inp.loc[:,'Zla'] + ic_inp.loc[:,'Zq_la'] * locsumm.VFPart.Lower_Air
ic_inp.loc[:,'Zb_ua'] = ic_inp.loc[:,'Zua'] + ic_inp.loc[:,'Zq_ua'] * locsumm.VFPart.Upper_Air
#Water
ic_inp.loc[:,'Zb_wat'] = ic_inp.loc[:,'Zw'] + ic_inp.loc[:,'Z_qw'] * locsumm.VFPart.Water
#Soil
ic_inp.loc[:,'Zb_soil'] = ic_inp.loc[:,'Zla'] * locsumm.VFAir.Soil+\
ic_inp.loc[:,'Zw'] * locsumm.VFWat.Soil + \
ic_inp.loc[:,'Zsoil']* (1-locsumm.VFAir.Soil -locsumm.VFWat.Soil)
#Sediment
ic_inp.loc[:,'Zb_sed'] = ic_inp.loc[:,'Zw'] * locsumm.VFWat.Sediment + \
ic_inp.loc[:,'Zsed']* (1-locsumm.VFWat.Sediment)
#Vegetation
ic_inp.loc[:,'Zb_veg'] = ic_inp.loc[:,'Zla'] * locsumm.VFAir.Vegetation+\
ic_inp.loc[:,'Zw'] * locsumm.VFWat.Vegetation + \
ic_inp.loc[:,'Zveg']* (1-locsumm.VFAir.Vegetation -locsumm.VFWat.Vegetation)
#Film
ic_inp.loc[:,'Zb_film'] = ic_inp.loc[:,'Zqfilm'] * locsumm.VFPart.Film + \
ic_inp.loc[:,'Zfilm'] * params.Value.VFOCFilm
#Partition dependent transport parameters
#veg & Film side MTCs (m/h)
ic_inp.loc[:,'k_vv'] = 10 ** (0.704 * ic_inp.LogKocW - 11.2 - ic_inp.LogKaw)
ic_inp.loc[:,'k_ff'] = 10 ** (0.704 * ic_inp.LogKocW - 11.2 - ic_inp.LogKaw)
#lower air particle fraction (phi)
ic_inp.loc[:,'phi'] = (ic_inp.Zq_la*locsumm.VFPart.Lower_Air)/ic_inp.Zb_la
#Calculate advective (G) inflows(mol/m³ * m³/h = mol/h)
if 'LairInflow' in ic_inp.columns:
ic_inp.loc[:,'Gcb_1'] = locsumm.AdvFlow.Lower_Air * ic_inp.LairInflow
else:
ic_inp.loc[:,'Gcb_1'] = 0
if 'UairInflow' in ic_inp.columns:
ic_inp.loc[:,'Gcb_2'] = locsumm.AdvFlow.Upper_Air * ic_inp.UairInflow
else:
ic_inp.loc[:,'Gcb_2'] = 0
if 'WatInflow' in ic_inp.columns:
ic_inp.loc[:,'Gcb_3'] = locsumm.AdvFlow.Water * ic_inp.WatInflow
else:
ic_inp.loc[:,'Gcb_3'] = 0
if 'SoilInflow' in ic_inp.columns: #add groundwater advective inflow
ic_inp.loc[:,'Gcb_4'] = locsumm.AdvFlow.Soil * ic_inp.SoilInflow
else:
ic_inp.loc[:,'Gcb_4'] = 0
#D Values
#Advection out from atmosphere and water
ic_inp.loc[:,'D_adv_la'] = locsumm.AdvFlow.Lower_Air * ic_inp.Zb_la
ic_inp.loc[:,'D_adv_ua'] = locsumm.AdvFlow.Upper_Air * ic_inp.Zb_la
ic_inp.loc[:,'D_adv_w'] = locsumm.AdvFlow.Water * ic_inp.Zb_wat
#Reaction- did not do a good job of indexing to make code smoother but this works
ic_inp.loc[:,'D_rxn_la'] = locsumm.V.Lower_Air * ((1 - locsumm.VFPart.Lower_Air)\
* ic_inp.Zla * ic_inp.air_rrxn + locsumm.VFPart.Lower_Air * ic_inp.Zq_la * ic_inp.airq_rrxn)
ic_inp.loc[:,'D_rxn_ua'] = locsumm.V.Upper_Air * ((1 - locsumm.VFPart.Upper_Air)\
* ic_inp.Zua * ic_inp.air_rrxn + locsumm.VFPart.Upper_Air * ic_inp.Zq_ua * ic_inp.airq_rrxn)
ic_inp.loc[:,'D_rxn_wat'] = locsumm.V.Water * ic_inp.loc[:,'Zb_wat']*ic_inp.wat_rrxn
ic_inp.loc[:,'D_rxn_soil'] = locsumm.V.Soil * ic_inp.loc[:,'Zb_soil']*ic_inp.soil_rrxn
ic_inp.loc[:,'D_rxn_sed'] = locsumm.V.Sediment * ic_inp.loc[:,'Zb_sed']*ic_inp.sed_rrxn
ic_inp.loc[:,'D_rxn_veg'] = locsumm.V.Vegetation * ic_inp.loc[:,'Zb_veg']*ic_inp.veg_rrxn
#For film particles use a rxn rate 20x lower than the organic phase
ic_inp.loc[:,'D_rxn_film'] = locsumm.V.Film * ((params.Value.VFOCFilm* ic_inp.Zfilm)\
* ic_inp.film_rrxn + locsumm.VFPart.Film * ic_inp.Zqfilm * ic_inp.film_rrxn/20)
#Inter-compartmental Transport, matrix values are D_ij others are as noted
#Lower and Upper Air
ic_inp.loc[:,'D_12'] = params.Value.Ua * locsumm.Area.Lower_Air * ic_inp.Zb_la #Lower to Upper
ic_inp.loc[:,'D_21'] = params.Value.Ua * locsumm.Area.Upper_Air * ic_inp.Zb_ua #Upper to lower
ic_inp.loc[:,'D_st'] = params.Value.Ust * locsumm.Area.Upper_Air * ic_inp.Zb_ua #Upper to stratosphere
#Lower Air to Water #Do we want to separate out the particle fraction here too? (1-ic_inp.phi) *
ic_inp.loc[:,'D_vw'] = 1 / (1 / (params.Value.kma * locsumm.Area.Water \
* ic_inp.Zla) + 1 / (params.Value.kmw * locsumm.Area.Water * ic_inp.Zw)) #Dry dep of gas
ic_inp.loc[:,'D_rw'] = locsumm.Area.Water * ic_inp.Zw * params.Value.RainRate * (1-ic_inp.phi) #Wet dep of gas
ic_inp.loc[:,'D_qw'] = locsumm.Area.Water * params.Value.RainRate * params.Value.Q \
*locsumm.VFPart.Lower_Air * ic_inp.Zq_la * ic_inp.phi #Wet dep of aerosol
ic_inp.loc[:,'D_dw'] = locsumm.Area.Water * params.Value.Up * locsumm.VFPart.Lower_Air\
* ic_inp.Zq_la #dry dep of aerosol
ic_inp.loc[:,'D_13'] = ic_inp.D_vw + ic_inp.D_rw + ic_inp.D_qw + ic_inp.D_dw #Air to water
ic_inp.loc[:,'D_31'] = ic_inp.D_vw #Water to air
#Lair and Soil
ic_inp.loc[:,'D_vs'] = 1/(1/(params.Value.ksa*locsumm.Area.Soil \
*ic_inp.Zla)+Y4/(locsumm.Area.Soil*ic_inp.Bea*ic_inp.Zla+locsumm.Area.Soil*ic_inp.Bew*ic_inp.Zw)) #Dry dep of gas
ic_inp.loc[:,'D_rs'] = locsumm.Area.Soil * ic_inp.Zw * params.Value.RainRate \
* (1-params.Value.Ifw) * (1-ic_inp.phi) #Wet dep of gas
ic_inp.loc[:,'D_qs'] = locsumm.Area.Soil * ic_inp.Zq_la * params.Value.RainRate \
* locsumm.VFPart.Lower_Air * params.Value.Q * (1-params.Value.Ifw) * ic_inp.phi #Wet dep of aerosol
ic_inp.loc[:,'D_ds'] = locsumm.Area.Soil * ic_inp.Zq_la * params.Value.Up \
* locsumm.VFPart.Lower_Air * (1-Ifd) #dry dep of aerosol
ic_inp.loc[:,'D_14'] = ic_inp.D_vs + ic_inp.D_rs + ic_inp.D_qs + ic_inp.D_ds #Air to soil
ic_inp.loc[:,'D_41'] = ic_inp.D_vs #soil to air
#Lair and Veg
ic_inp.loc[:,'D_vv'] = 1/(1/(ic_inp.k_av*locsumm.Area.Vegetation\
*ic_inp.Zla)+1/(locsumm.Area.Vegetation*ic_inp.k_vv*ic_inp.Zveg)) #Dry dep of gas
ic_inp.loc[:,'D_rv'] = locsumm.Area.Vegetation * ic_inp.Zw * params.Value.RainRate \
* params.Value.Ifw * (1-ic_inp.phi) #Wet dep of gas
ic_inp.loc[:,'D_qv'] = locsumm.Area.Vegetation * ic_inp.Zq_la * params.Value.RainRate \
* params.Value.Q * params.Value.Ifw * locsumm.VFPart.Lower_Air * ic_inp.phi #Wet dep of aerosol
ic_inp.loc[:,'D_dv'] = locsumm.Area.Vegetation * ic_inp.Zq_la * locsumm.VFPart.Lower_Air \
*params.Value.Up *Ifd #dry dep of aerosol
ic_inp.loc[:,'D_16'] = ic_inp.D_vv + ic_inp.D_rv + ic_inp.D_qv + ic_inp.D_dv #Air to veg
ic_inp.loc[:,'D_61'] = ic_inp.D_vv #veg to air
#Lair and film
ic_inp.loc[:,'D_vf'] = 1/(1/(ic_inp.k_af*locsumm.Area.Film\
*ic_inp.Zla)+1/(locsumm.Area.Film*ic_inp.k_ff*ic_inp.Zfilm)) #Dry dep of gas
ic_inp.loc[:,'D_rf'] = locsumm.Area.Film*ic_inp.Zw*params.Value.RainRate*(1-ic_inp.phi) #Wet dep of gas
ic_inp.loc[:,'D_qf'] = locsumm.Area.Film * ic_inp.Zq_la * params.Value.RainRate \
* params.Value.Q* locsumm.VFPart.Lower_Air*ic_inp.phi #Wet dep of aerosol
ic_inp.loc[:,'D_df'] = locsumm.Area.Film * ic_inp.Zq_la * locsumm.VFPart.Lower_Air\
* params.Value.Up #dry dep of aerosol
ic_inp.loc[:,'D_17'] = ic_inp.D_vf + ic_inp.D_rf + ic_inp.D_qf + ic_inp.D_df #Air to film
ic_inp.loc[:,'D_71'] = ic_inp.D_vf #film to air
#Zrain based on D values & DRain (total), used just for assessing rain concentrations
ic_inp.loc[:,'DRain'] = ic_inp.D_rw + ic_inp.D_qw + ic_inp.D_rs + ic_inp.D_qs \
+ ic_inp.D_rv + ic_inp.D_qv + ic_inp.D_rf + ic_inp.D_qf
ic_inp.loc[:,'ZRain'] = ic_inp.DRain / (locsumm.Area.Lower_Air * params.Value.RainRate)
#Water and Soil
ic_inp.loc[:,'D_sw'] = locsumm.Area.Soil * ic_inp.Zsoil * params.Value.Usw #Solid run off to water
ic_inp.loc[:,'D_ww'] = locsumm.Area.Soil * ic_inp.Zw * params.Value.Uww #Water run off to water
ic_inp.loc[:,'D_43'] = ic_inp.D_sw + ic_inp.D_ww #Soil to water
ic_inp.loc[:,'D_34'] = 0 #Water to soil
ic_inp.loc[:,'D_sg'] = locsumm.Area.Soil * ic_inp.Zw * Usg #Soil to groundwater
#Water and Sediment (x)
ic_inp.loc[:,'D_tx'] = 1/(1/(params.Value.kxw*locsumm.Area.Sediment\
*ic_inp.Zw)+Y5/(locsumm.Area.Sediment*ic_inp.Bwx*ic_inp.Zw)) #Uptake by sediment
ic_inp.loc[:,'D_dx'] = locsumm.Area.Sediment * ic_inp.Z_qw * params.Value.Udx #Sediment deposition - should we have VFpart.Water?
ic_inp.loc[:,'D_rx'] = locsumm.Area.Sediment * ic_inp.Zsed * params.Value.Urx #Sediment resuspension
ic_inp.loc[:,'D_35'] = ic_inp.D_tx + ic_inp.D_dx #Water to Sed
ic_inp.loc[:,'D_53'] = ic_inp.D_tx + ic_inp.D_rx #Sed to Water
ic_inp.loc[:,'D_bx'] = locsumm.Area.Sediment * ic_inp.Zsed * params.Value.Ubx #Sed burial
#Water and Film
ic_inp.loc[:,'D_73'] = locsumm.Area.Film * kfw * ic_inp.Zb_film #Soil to water
ic_inp.loc[:,'D_37'] = 0 #Water to film
#Soil and Veg
ic_inp.loc[:,'D_cd'] = locsumm.Area.Vegetation * params.Value.RainRate \
*(params.Value.Ifw - params.Value.Ilw)*params.Value.lamb * ic_inp.Zq_la #Canopy drip
ic_inp.loc[:,'D_we'] = locsumm.Area.Vegetation * params.Value.kwe * ic_inp.Zveg #Wax erosion
ic_inp.loc[:,'D_lf'] = locsumm.V.Vegetation * ic_inp.Zb_veg * params.Value.Rlf #litterfall
ic_inp.loc[:,'D_46'] = locsumm.V.Soil * params.Value.Rs * ic_inp.Zb_soil #Soil to veg
ic_inp.loc[:,'D_64'] = ic_inp.D_cd + ic_inp.D_we + ic_inp.D_lf #Veg to soil
#Total D-Values
ic_inp.loc[:,'DT1'] = ic_inp.D_12 + ic_inp.D_13 + ic_inp.D_14 + ic_inp.D_16 + ic_inp.D_17 + ic_inp.D_adv_la + ic_inp.D_rxn_la #Lair
ic_inp.loc[:,'DT2'] = ic_inp.D_21 + ic_inp.D_st + ic_inp.D_adv_ua + ic_inp.D_rxn_ua #Uair
ic_inp.loc[:,'DT3'] = ic_inp.D_31 + ic_inp.D_35 + ic_inp.D_adv_w + ic_inp.D_rxn_wat #Water
ic_inp.loc[:,'DT4'] = ic_inp.D_41 + ic_inp.D_43 + ic_inp.D_46 + + ic_inp.D_rxn_soil + ic_inp.D_sg #Soil
ic_inp.loc[:,'DT5'] = ic_inp.D_53 + ic_inp.D_rxn_sed + ic_inp.D_bx #Sediment
ic_inp.loc[:,'DT6'] = ic_inp.D_61 + ic_inp.D_64 + ic_inp.D_rxn_veg #Vegetation
ic_inp.loc[:,'DT7'] = ic_inp.D_71 + ic_inp.D_73 + ic_inp.D_rxn_film #Film
#Define total inputs (RHS of matrix) for each compartment
#Note that if you run backwards calcs to calculate the inputs for a cell these are NOT overwritten, so these
#should not be referenced except with that in mind.
if 'LairEmiss' in ic_inp.columns:
ic_inp.loc[:,'inp_1'] = ic_inp.loc[:,'Gcb_1'] + ic_inp.loc[:,'LairEmiss']
else:
ic_inp.loc[:,'inp_1'] = ic_inp.loc[:,'Gcb_1']
if 'UairEmiss' in ic_inp.columns:
ic_inp.loc[:,'inp_2'] = ic_inp.loc[:,'Gcb_2'] + ic_inp.loc[:,'UairEmiss']
else:
ic_inp.loc[:,'inp_2'] = ic_inp.loc[:,'Gcb_2']
if 'WatEmiss' in ic_inp.columns:
ic_inp.loc[:,'inp_3'] = ic_inp.loc[:,'Gcb_3'] + ic_inp.loc[:,'WatEmiss']
else:
ic_inp.loc[:,'inp_3'] = ic_inp.loc[:,'Gcb_3']
if 'SoilEmiss' in ic_inp.columns:
ic_inp.loc[:,'inp_4'] = ic_inp.loc[:,'Gcb_4'] + ic_inp.loc[:,'SoilEmiss']
else:
ic_inp.loc[:,'inp_4'] = ic_inp.loc[:,'Gcb_4']
if 'SedEmiss' in ic_inp.columns:
ic_inp.loc[:,'inp_5'] = ic_inp.loc[:,'SedEmiss']
else:
ic_inp.loc[:,'inp_5'] = 0
if 'VegEmiss' in ic_inp.columns:
ic_inp.loc[:,'inp_6'] = ic_inp.loc[:,'VegEmiss']
else:
ic_inp.loc[:,'inp_6'] = 0
if 'FilmEmiss' in ic_inp.columns:
ic_inp.loc[:,'inp_7'] = ic_inp.loc[:,'FilmEmiss']
else:
ic_inp.loc[:,'inp_7'] = 0
#Define target fugacity in mol/m³/Pa. Note that the target should be a fugacity!!
if 'LAirConc' in ic_inp.columns:
ic_inp.loc[:,'targ_1'] = ic_inp.loc[:,'LAirConc']/ic_inp.MolMass/ic_inp.Zb_la
if 'UAirConc' in ic_inp.columns:
ic_inp.loc[:,'targ_2'] = ic_inp.loc[:,'UAirConc']/ic_inp.MolMass/ic_inp.Zb_ua
if 'WatConc' in ic_inp.columns:
ic_inp.loc[:,'targ_3'] = ic_inp.loc[:,'WatConc']/ic_inp.MolMass/ic_inp.Zb_wat
if 'SoilConc' in ic_inp.columns:
ic_inp.loc[:,'targ_4'] = ic_inp.loc[:,'SoilConc']/ic_inp.MolMass/ic_inp.Zb_soil
if 'SedConc' in ic_inp.columns:
ic_inp.loc[:,'targ_5'] = ic_inp.loc[:,'SedConc']/ic_inp.MolMass/ic_inp.Zb_sed
if 'VegConc' in ic_inp.columns:
ic_inp.loc[:,'targ_6'] = ic_inp.loc[:,'VegConc']/ic_inp.MolMass/ic_inp.Zb_veg
if 'FilmConc' in ic_inp.columns:
ic_inp.loc[:,'targ_7'] = ic_inp.loc[:,'FilmConc']/ic_inp.MolMass/ic_inp.Zb_film
return ic_inp
| StarcoderdataPython |
50531 | """Model: A python model of RFC 5545.
=====================================
"""
__author__ = 'Jason'
import datetime
from icalendar import Calendar
from icalendar import Event
ARG_TYPE_INCORRECT = 'Argument should be of type {0}'
REQ_PROP_MISSING = 'Required property {0} is missing'
class CalendarModel():
"""RFC 5545 - Section 3.4. (page 50).::
Code::
icalstream = 1*icalobject
icalobject = "BEGIN" ":" "VCALENDAR" CRLF
icalbody
"END" ":" "VCALENDAR" CRLF
icalbody = calprops component
calprops = *(
prodid
version
calscale?
method?
x-prop*
iana-prop*
)
component = 1*(
eventc /
todoc /
journalc /
freebusyc /
timezonec /
iana-comp /
x-comp
)
iana-comp = "BEGIN" ":" iana-token CRLF
1*contentline
"END" ":" iana-token CRLF
x-comp = "BEGIN" ":" x-name CRLF
1*contentline
"END" ":" x-name CRLF
"""
def __init__(self, calendar):
"""
:param calendar:
:return: None
"""
if not isinstance(calendar, Calendar):
raise AssertionError(ARG_TYPE_INCORRECT.format(Calendar))
if not 'PRODID' in calendar.keys():
raise AssertionError(REQ_PROP_MISSING.format('PRODID'))
if not 'VERSION' in calendar.keys():
raise AssertionError(REQ_PROP_MISSING.format('VERSION'))
class EventModel():
def __init__(self, event):
assert isinstance(event, Event),\
'event is not of type {0}'.format(Event)
self._uid = None
self._time_stamp = None
self._end = None
self._duration = None
self._from_event(event)
def _from_event(self, event):
assert 'uid' in event.keys(), REQ_PROP_MISSING.format('uid')
@property
def uid(self):
"""
:return:
"""
assert self._uid is not None
@property
def time_stamp(self):
"""dtstamp
:return:
"""
assert type(self._time_stamp) is datetime.datetime
@property
def start(self):
"""start
:return: datetime
"""
pass
@property
def event_class(self):
"""
:return:
"""
pass
@property
def created(self):
"""
:return:
"""
pass
@property
def description(self):
"""
:return:
"""
pass
@property
def geo(self):
"""
:return:
"""
pass
@property
def last_mod(self):
"""
:return:
"""
pass
@property
def location(self):
"""
:return:
"""
pass
@property
def organizer(self):
"""
:return:
"""
pass
@property
def priority(self):
"""
:return:
"""
pass
@property
def seq(self):
"""
:return:
"""
pass
@property
def status(self):
"""
:return:
"""
pass
@property
def summary(self):
"""
:return:
"""
pass
@property
def transp(self):
"""
:return:
"""
pass
@property
def url(self):
"""
:return:
"""
pass
@property
def recur_id(self):
"""
:return:
"""
pass
@property
def recurrence_rule(self):
"""rrule
:return: <TODO>
"""
pass
@property
def end(self):
"""dtend
:return: <TODO>
"""
assert self._end is None or self._duration is None
@property
def duration(self):
"""duration
:return: <TODO>
"""
assert self._end is None or self._duration is None
@property
def attach(self):
"""rrule
:return: <TODO>
"""
return []
@property
def attendee(self):
"""rrule
:return: <TODO>
"""
return []
@property
def categories(self):
"""rrule
:return: <TODO>
"""
return []
@property
def comment(self):
"""rrule
:return: <TODO>
"""
return []
@property
def contact(self):
"""rrule
:return: <TODO>
"""
return []
@property
def exclude_dates(self):
"""rrule
:return: <TODO>
"""
return []
@property
def rs_status(self):
"""rrule
:return: <TODO>
"""
return []
@property
def related(self):
"""rrule
:return: <TODO>
"""
return []
@property
def resources(self):
"""rrule
:return: <TODO>
"""
return []
@property
def r_date(self):
"""rrule
:return: <TODO>
"""
return []
@property
def x_prop(self):
"""rrule
:return: <TODO>
"""
return []
@property
def iana_prop(self):
"""rrule
:return: <TODO>
"""
return [] | StarcoderdataPython |
1761941 | <gh_stars>1-10
"""
** 2 columns
we visualize live stock market data and also get live news
1. live market data (stock list)
2. live news data
"""
#### IMPORTING REQUIRED MODULES
from dash_bootstrap_components.themes import YETI
try:
#data analysis modules
import pandas as pd
import numpy as np
#dash modules
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Output, Input
import plotly.graph_objs as go
#plotly modules
import plotly.express as px
#some other libraries that will be used
from yahoo_fin.stock_info import *
from pandas_datareader import data as web
import yfinance as yf
except Exception as e:
print(e)
##SETTING THE BOOSTRAP THEME AND TITLE OF WEBSITE
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.DARKLY])
app.title = "Live market: S&P live market data"
### TICKER SYMBOLS TO CSV FILE
ticker = tickers_nasdaq()
#df.rename(columns={'oldName1': 'newName1', 'oldName2': 'newName2'}, inplace=True)
SIDEBAR_STYLE = {
"position": "fixed",
"top": 0,
"left": 0,
"bottom": 0,
"width": "16rem",
"padding": "2rem 1rem",
"background-color":"#22252B",
}
# the styles for the main content position it to the right of the sidebar and
# add some padding.
CONTENT_STYLE = {
"margin-left": "18rem",
"margin-right": "2rem",
"padding": "2rem 1rem",
}
sidebar = html.Div(
[
html.H2("Live market", className="display-3 title"),
html.Hr(),
html.P(
"We visualize live market data with dash", className="lead"
),
],
style=SIDEBAR_STYLE,
)
content = html.Div([
dbc.Row([
dbc.Col([
html.H4('Live market data', className='top-title-text'),
]),
dbc.Col([
html.Div(
dcc.Dropdown(
id="ticker-dropdown",
options=[
{"label": Ticker, "value": Ticker}
for Ticker in ticker
],
value="AACG",
clearable=False,
searchable=False,
),className='dropdown'
),
]),
]),
dbc.Row(
html.Div(id='dd-output-container'),
),
],style=CONTENT_STYLE)
app.layout = html.Div([sidebar, content])
## we intialize app layout
@app.callback(
dash.dependencies.Output('dd-output-container', 'children'),
[dash.dependencies.Input('ticker-dropdown', 'value')])
def update_graph(value):
data = yf.download(tickers='{}'.format(value), period='1d', interval='1m')
hovertext=[]
for i in range(len(data['Open'])):
hovertext.append('Open: '+str(data['Open'][i])+'<br>Close: '+str(data['Close'][i]))
fig = go.Figure(data=go.Ohlc(x=data.index,
open=data['Open'],
high=data['High'],
low=data['Low'],
close=data['Close']))
fig.update(layout_xaxis_rangeslider_visible=False)
return dbc.Card(
[
dbc.CardBody(
[
dcc.Graph(figure=fig)
]
),
],
style={"width": "63rem", "height": "31rem"},
)
### WE INITIALIZE THE APP
if __name__ == '__main__':
app.run_server(debug=True) | StarcoderdataPython |
120463 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# author : ysoftman
# title : beautifulsoup test
# python version : 2.x
import sys
# http://docs.python-requests.org/en/master/
import requests
# https://www.crummy.com/software/BeautifulSoup/
# pip install beautifulsoup4
from bs4 import BeautifulSoup
def parse_html():
print(parse_html.func_name)
html = '''<html>
<head>
<title>
ysoftman
</title>
</head>
<body>
<p class="title">
<b>
ysfotman
</b>
</p>
<p class="myclass1">
<a class="abc" href="http://ysoftman.com/abc" id="link1">
test abc
</a>
</p>
</body>
</html>
'''
# print ("html =", html)
result = BeautifulSoup(html, 'html.parser')
# 파싱 결과 출력
print(result.title)
print(result.title.string)
print(result.a)
def parse_url(url):
print(parse_url.func_name)
print("url =", url)
resp = requests.get(url)
# 컨텐츠에서 해당 파싱하기
result = BeautifulSoup(resp.content, 'html.parser')
# 파싱 결과 출력
print(result.title)
print(result.title.string)
# selector 로 찾기
# <div class="navbar"> 하위에서 <a href="xxx"> 태그 부분만
selector = ".navbar a"
result2 = result.select(selector)
# 여러개의 a 태그중 첫번째의 텍스트부분 출력
print(result2[0].text)
def parse_url2(url):
print(parse_url2.func_name)
print("url =", url)
resp = requests.get(url)
# 컨텐츠에서 해당 파싱하기
result = BeautifulSoup(resp.content, 'html.parser')
# 파싱 결과 출력
print(result.title)
print(result.title.string)
# selector 로 찾기
# copy selector string using chrome dev tool
# #mArticle > div.search_cont > div.card_word.\23 word.\23 eng > div.search_box.\23 box > div > ul > li:nth-child(1) > span.txt_search
selector = ".search_cont .card_word div ul span.txt_search"
result2 = result.select(selector)
print(result2[0].text)
if __name__ == '__main__':
print("beautifulsoup test")
parse_html()
print("\n")
parse_url("https://www.w3.org/TR/WD-html40-970917/htmlweb.html")
print("\n")
parse_url2("http://dic.daum.net/search.do?q=love")
| StarcoderdataPython |
1607838 | <filename>scripts/mark_orphaned_guids.py
"""A number of GUIDs with invalid or missing referents
were found during the mongo -> postgres migration.
These GUIDS were parsed from the migration logs and written to scripts/orphaned_guids.json.
This script adds a field, `is_orphaned` to these GUIDS and sets it to True so that they
can be skipped during the mongo -> postgres migration.
"""
import json
import sys
import os
import logging
from scripts import utils as script_utils
from framework.mongo import database
logger = logging.getLogger(__name__)
HERE = os.path.dirname(os.path.abspath(__file__))
def main(dry=True):
with open(os.path.join(HERE, 'orphaned_guids.json'), 'r') as fp:
orphaned_guids = json.load(fp)
for collection_name, guids in orphaned_guids.iteritems():
logger.info('Updating {} GUIDs that point to the collection: {}'.format(
len(guids), collection_name
))
if not dry:
database.guid.update(
{'_id': {'$in': guids}},
{'$set': {'is_orphaned': True}},
multi=True
)
if __name__ == '__main__':
dry = '--dry' in sys.argv
handler = logging.StreamHandler()
formatter = logging.Formatter(
'[%(name)s] %(levelname)s: %(message)s',
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
| StarcoderdataPython |
3347437 | <reponame>sagar30051991/helpdesk<gh_stars>1-10
from __future__ import unicode_literals
import json
import frappe
hierarchy = {
1: {
"time":2,
"role": "Administrator",
"is_dept_escalation": 0
},
2: {
"time":2,
"role": "Department Head",
"is_dept_escalation": 0
}
}
roles_priority = ["Administrator","Department Head"]
# subject = []
def after_install():
check_hod_role()
create_escalation_settings_doc()
setup_role_priority_settings()
def check_hod_role():
if not frappe.db.get_value("Role", "Department Head", "name"):
doc = frappe.new_doc("Role")
doc.role_name = "Department Head"
doc.save(ignore_permissions=True)
def create_escalation_settings_doc():
if frappe.db.get_value("Ticket Escalation Settings","Default"):
return
def append_rows(doc):
"""Append Escalation Hierarchy"""
doc.set("escalation_hierarchy",[])
for idx, ch_rec in hierarchy.items():
ch = doc.append("escalation_hierarchy", {})
ch.time = ch_rec.get("time")
ch.role = ch_rec.get("role")
ch.is_dept_escalation = ch_rec.get("is_dept_escalation")
if not frappe.db.get_value("Priority","Default","name"):
frappe.get_doc({
"doctype": "Priority",
"priority": "Default"
}).insert(ignore_permissions=True)
esc = frappe.new_doc("Ticket Escalation Settings")
esc.priority = "Default"
esc.is_default = 1
append_rows(esc)
esc.save(ignore_permissions=True)
def setup_role_priority_settings():
doc = frappe.get_doc("Role Priority Settings", "Role Priority Settings")
if not doc.roles_priority:
for i, role in enumerate(roles_priority):
rl = doc.append('roles_priority', {})
rl.role = role
rl.priority = len(roles_priority) - i
doc.save(ignore_permissions=True)
| StarcoderdataPython |
11341 | <filename>Package/CONFIG.py
import ops
import iopc
TARBALL_FILE="samba-4.8.4.tar.gz"
TARBALL_DIR="samba-4.8.4"
INSTALL_DIR="samba-bin"
pkg_path = ""
output_dir = ""
tarball_pkg = ""
tarball_dir = ""
install_dir = ""
install_tmp_dir = ""
cc_host = ""
tmp_include_dir = ""
dst_include_dir = ""
dst_lib_dir = ""
dst_usr_local_lib_dir = ""
def set_global(args):
global pkg_path
global output_dir
global tarball_pkg
global install_dir
global install_tmp_dir
global tarball_dir
global cc_host
global tmp_include_dir
global dst_include_dir
global dst_lib_dir
global dst_usr_local_lib_dir
global dst_usr_local_libexec_dir
global dst_usr_local_share_dir
global dst_usr_local_dir
global src_pkgconfig_dir
global dst_pkgconfig_dir
global dst_bin_dir
global dst_etc_dir
global install_test_utils
pkg_path = args["pkg_path"]
output_dir = args["output_path"]
tarball_pkg = ops.path_join(pkg_path, TARBALL_FILE)
install_dir = ops.path_join(output_dir, INSTALL_DIR)
install_tmp_dir = ops.path_join(output_dir, INSTALL_DIR + "-tmp")
tarball_dir = ops.path_join(output_dir, TARBALL_DIR)
cc_host_str = ops.getEnv("CROSS_COMPILE")
cc_host = cc_host_str[:len(cc_host_str) - 1]
tmp_include_dir = ops.path_join(output_dir, ops.path_join("include",args["pkg_name"]))
dst_include_dir = ops.path_join("include",args["pkg_name"])
dst_lib_dir = ops.path_join(install_dir, "lib")
dst_bin_dir = ops.path_join(install_dir, "bin")
dst_etc_dir = ops.path_join(install_dir, "etc")
dst_usr_local_lib_dir = ops.path_join(install_dir, "usr/local/lib")
dst_usr_local_dir = ops.path_join(install_dir, "usr/local")
dst_usr_local_libexec_dir = ops.path_join(install_dir, "usr/local/libexec")
dst_usr_local_share_dir = ops.path_join(install_dir, "usr/local/share")
src_pkgconfig_dir = ops.path_join(pkg_path, "pkgconfig")
dst_pkgconfig_dir = ops.path_join(install_dir, "pkgconfig")
if ops.getEnv("INSTALL_TEST_UTILS") == 'y':
install_test_utils = True
else:
install_test_utils = False
def MAIN_ENV(args):
set_global(args)
ops.exportEnv(ops.setEnv("CC", ops.getEnv("CROSS_COMPILE") + "gcc"))
'''
ops.exportEnv(ops.setEnv("CXX", ops.getEnv("CROSS_COMPILE") + "g++"))
ops.exportEnv(ops.setEnv("CPP", ops.getEnv("CROSS_COMPILE") + "g++"))
ops.exportEnv(ops.setEnv("AR", ops.getEnv("CROSS_COMPILE") + "ar"))
ops.exportEnv(ops.setEnv("RANLIB", ops.getEnv("CROSS_COMPILE") + "ranlib"))
ops.exportEnv(ops.setEnv("CROSS", ops.getEnv("CROSS_COMPILE")))
'''
ops.exportEnv(ops.setEnv("DESTDIR", install_tmp_dir))
return False
def MAIN_EXTRACT(args):
set_global(args)
ops.unTarGz(tarball_pkg, output_dir)
return True
def MAIN_PATCH(args, patch_group_name):
set_global(args)
for patch in iopc.get_patch_list(pkg_path, patch_group_name):
if iopc.apply_patch(tarball_dir, patch):
continue
else:
sys.exit(1)
return True
def MAIN_CONFIGURE(args):
set_global(args)
job_count = ops.getEnv("BUILD_JOBS_COUNT")
extra_conf = []
'''
#extra_conf.append("--cross-compile")
#extra_conf.append("-C -V")
#extra_conf.append("--cross-answers=cc.txt")
#extra_conf.append("--hostcc=" + cc_host)
extra_conf.append("--abi-check-disable")
extra_conf.append("--disable-rpath")
extra_conf.append("--bundled-libraries=NONE")
#extra_conf.append("--cross-execute='qemu-arm-static -L /usr/arm-linux-gnu'")
extra_conf.append("--jobs=" + job_count)
extra_conf.append("--disable-gnutls")
#extra_conf.append("--private-libraries=NONE")
extra_conf.append("--without-gettext")
extra_conf.append("--without-systemd")
extra_conf.append("--without-ad-dc")
extra_conf.append("--without-ads")
extra_conf.append("--without-winbind")
extra_conf.append("--without-ldap")
extra_conf.append("--without-pam")
extra_conf.append("--without-pie")
extra_conf.append("--without-fam")
extra_conf.append("--without-dmapi")
extra_conf.append("--without-automount")
extra_conf.append("--without-utmp")
extra_conf.append("--without-dnsupdate")
extra_conf.append("--without-acl-support")
extra_conf.append("--without-quotas")
extra_conf.append("--without-cluster-support")
extra_conf.append("--disable-glusterfs")
extra_conf.append("--without-profiling-data")
extra_conf.append("--without-libarchive")
extra_conf.append("--without-regedit")
extra_conf.append("--without-ntvfs-fileserver")
extra_conf.append("--disable-python")
extra_conf.append("--disable-cups")
extra_conf.append("--disable-iprint")
extra_conf.append("--disable-avahi")
'''
extra_conf.append("--disable-python")
extra_conf.append("--without-ad-dc")
extra_conf.append("--without-acl-support")
extra_conf.append("--without-ldap")
extra_conf.append("--without-ads")
extra_conf.append("--without-pam")
extra_conf.append("--without-gettext")
extra_conf.append("--jobs=" + job_count)
extra_conf.append("--without-systemd")
extra_conf.append("--without-regedit")
extra_conf.append("--without-cluster-support")
extra_conf.append("--without-ntvfs-fileserver")
extra_conf.append("--without-winbind")
extra_conf.append("--disable-glusterfs")
extra_conf.append("--disable-cups")
extra_conf.append("--disable-iprint")
extra_conf.append("--disable-avahi")
extra_conf.append("--without-automount")
extra_conf.append("--without-dnsupdate")
extra_conf.append("--without-fam")
extra_conf.append("--without-dmapi")
extra_conf.append("--without-quotas")
extra_conf.append("--without-profiling-data")
extra_conf.append("--without-utmp")
extra_conf.append("--without-libarchive")
#extra_conf.append("--enable-developer")
print extra_conf
#iopc.waf(tarball_dir, extra_conf)
iopc.configure(tarball_dir, extra_conf)
return True
def MAIN_BUILD(args):
set_global(args)
ops.mkdir(install_dir)
ops.mkdir(install_tmp_dir)
iopc.make(tarball_dir)
iopc.make_install(tarball_dir)
ops.mkdir(install_dir)
ops.mkdir(dst_lib_dir)
ops.mkdir(dst_bin_dir)
ops.mkdir(dst_usr_local_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/sbin/nmbd"), dst_bin_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/sbin/smbd"), dst_bin_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libdcerpc-binding.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libdcerpc-binding.so.0.0.1", "libdcerpc-binding.so.0.0")
ops.ln(dst_lib_dir, "libdcerpc-binding.so.0.0.1", "libdcerpc-binding.so.0")
ops.ln(dst_lib_dir, "libdcerpc-binding.so.0.0.1", "libdcerpc-binding.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libdcerpc-samr.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libdcerpc-samr.so.0.0.1", "libdcerpc-samr.so.0.0")
ops.ln(dst_lib_dir, "libdcerpc-samr.so.0.0.1", "libdcerpc-samr.so.0")
ops.ln(dst_lib_dir, "libdcerpc-samr.so.0.0.1", "libdcerpc-samr.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libdcerpc.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libdcerpc.so.0.0.1", "libdcerpc.so.0.0")
ops.ln(dst_lib_dir, "libdcerpc.so.0.0.1", "libdcerpc.so.0")
ops.ln(dst_lib_dir, "libdcerpc.so.0.0.1", "libdcerpc.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libndr-krb5pac.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libndr-krb5pac.so.0.0.1", "libndr-krb5pac.so.0.0")
ops.ln(dst_lib_dir, "libndr-krb5pac.so.0.0.1", "libndr-krb5pac.so.0")
ops.ln(dst_lib_dir, "libndr-krb5pac.so.0.0.1", "libndr-krb5pac.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libndr-nbt.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libndr-nbt.so.0.0.1", "libndr-nbt.so.0.0")
ops.ln(dst_lib_dir, "libndr-nbt.so.0.0.1", "libndr-nbt.so.0")
ops.ln(dst_lib_dir, "libndr-nbt.so.0.0.1", "libndr-nbt.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libndr.so.0.1.0"), dst_lib_dir)
ops.ln(dst_lib_dir, "libndr.so.0.1.0", "libndr.so.0.1")
ops.ln(dst_lib_dir, "libndr.so.0.1.0", "libndr.so.0")
ops.ln(dst_lib_dir, "libndr.so.0.1.0", "libndr.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libndr-standard.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libndr-standard.so.0.0.1", "libndr-standard.so.0.0")
ops.ln(dst_lib_dir, "libndr-standard.so.0.0.1", "libndr-standard.so.0")
ops.ln(dst_lib_dir, "libndr-standard.so.0.0.1", "libndr-standard.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libnetapi.so.0"), dst_lib_dir)
ops.ln(dst_lib_dir, "libnetapi.so.0", "libnetapi.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libnss_winbind.so.2"), dst_lib_dir)
ops.ln(dst_lib_dir, "libnss_winbind.so.2", "libnss_winbind.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libnss_wins.so.2"), dst_lib_dir)
ops.ln(dst_lib_dir, "libnss_wins.so.2", "libnss_wins.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-credentials.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-credentials.so.0.0.1", "libsamba-credentials.so.0.0")
ops.ln(dst_lib_dir, "libsamba-credentials.so.0.0.1", "libsamba-credentials.so.0")
ops.ln(dst_lib_dir, "libsamba-credentials.so.0.0.1", "libsamba-credentials.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-errors.so.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-errors.so.1", "libsamba-errors.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-hostconfig.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-hostconfig.so.0.0.1", "libsamba-hostconfig.so.0.0")
ops.ln(dst_lib_dir, "libsamba-hostconfig.so.0.0.1", "libsamba-hostconfig.so.0")
ops.ln(dst_lib_dir, "libsamba-hostconfig.so.0.0.1", "libsamba-hostconfig.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-passdb.so.0.27.0"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-passdb.so.0.27.0", "libsamba-passdb.so.0.27")
ops.ln(dst_lib_dir, "libsamba-passdb.so.0.27.0", "libsamba-passdb.so.0")
ops.ln(dst_lib_dir, "libsamba-passdb.so.0.27.0", "libsamba-passdb.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-util.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-util.so.0.0.1", "libsamba-util.so.0.0")
ops.ln(dst_lib_dir, "libsamba-util.so.0.0.1", "libsamba-util.so.0")
ops.ln(dst_lib_dir, "libsamba-util.so.0.0.1", "libsamba-util.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamdb.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamdb.so.0.0.1", "libsamdb.so.0.0")
ops.ln(dst_lib_dir, "libsamdb.so.0.0.1", "libsamdb.so.0")
ops.ln(dst_lib_dir, "libsamdb.so.0.0.1", "libsamdb.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsmbclient.so.0.3.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsmbclient.so.0.3.1", "libsmbclient.so.0.3")
ops.ln(dst_lib_dir, "libsmbclient.so.0.3.1", "libsmbclient.so.0")
ops.ln(dst_lib_dir, "libsmbclient.so.0.3.1", "libsmbclient.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsmbconf.so.0"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsmbconf.so.0", "libsmbconf.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libtevent-util.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libtevent-util.so.0.0.1", "libtevent-util.so.0.0")
ops.ln(dst_lib_dir, "libtevent-util.so.0.0.1", "libtevent-util.so.0")
ops.ln(dst_lib_dir, "libtevent-util.so.0.0.1", "libtevent-util.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libwbclient.so.0.14"), dst_lib_dir)
ops.ln(dst_lib_dir, "libwbclient.so.0.14", "libwbclient.so.0")
ops.ln(dst_lib_dir, "libwbclient.so.0.14", "libwbclient.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/winbind_krb5_locator.so"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/private/."), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/auth"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/idmap"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/ldb"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/nss_info"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/vfs"), dst_lib_dir)
ops.ln(dst_usr_local_dir, "/tmp/samba", "samba")
return True
def MAIN_INSTALL(args):
set_global(args)
iopc.installBin(args["pkg_name"], ops.path_join(dst_lib_dir, "."), "lib")
iopc.installBin(args["pkg_name"], ops.path_join(dst_bin_dir, "."), "usr/sbin")
iopc.installBin(args["pkg_name"], ops.path_join(dst_usr_local_dir, "."), "usr/local")
#iopc.installBin(args["pkg_name"], ops.path_join(tmp_include_dir, "."), dst_include_dir)
#iopc.installBin(args["pkg_name"], ops.path_join(dst_pkgconfig_dir, '.'), "pkgconfig")
return False
def MAIN_SDKENV(args):
set_global(args)
return False
def MAIN_CLEAN_BUILD(args):
set_global(args)
return False
def MAIN(args):
set_global(args)
| StarcoderdataPython |
55538 | <filename>ownblock/ownblock/apps/storage/models.py
import uuid
import os
import mimetypes
from django.conf import settings
from django.db import models
from sorl.thumbnail import get_thumbnail, delete
from ..buildings.models import Building
class Place(models.Model):
name = models.CharField(max_length=60)
location = models.TextField(blank=True)
building = models.ForeignKey(Building)
def __str__(self):
return self.name
def has_permission(self, user, perm):
return (user.role == 'manager' and
user.site_id == self.building.site_id)
def _upload_image_to(instance, filename):
_, ext = os.path.splitext(filename)
filename = uuid.uuid4().hex + ext
return "storage/{}/{}".format(instance.place_id, filename)
class Item(models.Model):
description = models.CharField(max_length=100)
serial_no = models.CharField(max_length=30, blank=True)
resident = models.ForeignKey(settings.AUTH_USER_MODEL)
place = models.ForeignKey(Place)
photo = models.ImageField(
upload_to=_upload_image_to, null=True, blank=True)
def __str__(self):
return self.description
def has_permission(self, user, perm):
if user.role == 'resident':
return self.resident == user
if user.role == 'manager':
return self.place.building.site_id == user.site_id
return False
def get_photo_content_type(self):
if not self.photo:
return None
return mimetypes.guess_type(self.photo.name)[0]
@models.permalink
def get_photo_url(self):
return ('item-photo', [self.pk])
@models.permalink
def get_thumbnail_url(self):
return ('item-thumbnail', [self.pk])
def get_thumbnail(self):
if not self.photo:
return None
return get_thumbnail(self.photo.file,
'150x150',
crop='center',
quality=99)
def delete_photo(self):
delete(self.photo)
self.photo = ""
self.save()
| StarcoderdataPython |
3261778 | """
Betty365 - Unofficial Bet365 WebSocket Stream Data Processor
Author: @ElJaviLuki
"""
# DEPENDENCIES
from random import random
import websockets
from readit import StandardProtocolConstants, ReaditMessage
# Generate URIs
def generate_uid():
return str(random())[2:]
def generate_premws_uri():
return 'wss://premws-pt3.365lpodds.com/zap/?uid=' + generate_uid()
def generate_pshudws_uri():
return 'wss://pshudws.365lpodds.com/zap/?uid=' + generate_uid()
# WebSocket stuff
class WebSocketWrapper:
def __init__(self, socket, name):
self.socket = socket
self.name = name
async def send(self, message):
print(f"{self.name} <- {message}")
await self.socket.send(message)
async def recv(self):
message = await self.socket.recv()
print(f"{self.name} -> {message}")
return message
class SubscriptionStreamDataProcessor:
"""
Own implementation of Bet365's WebSocket Stream Data Processor.
Note that other parts are directly ported from the original JS code.
"""
def __init__(self, session_id, d_token, *, user_agent):
self.session_id = session_id
self.d_token = d_token
self.user_agent = user_agent
async def handshake(self):
"""
Perform a handshake to both 'premws' and 'pshudws' websockets and return their respective responses.
:return: Handshake response in the format [<premws Handshake Response>, <pshudws Handshake Response>]
"""
def get_handshake_data(session_id, d_token):
handshake_data = ""
handshake_data += chr(SubscriptionStreamDataProcessor.HANDSHAKE_PROTOCOL)
handshake_data += chr(SubscriptionStreamDataProcessor.HANDSHAKE_VERSION)
handshake_data += chr(SubscriptionStreamDataProcessor.HANDSHAKE_CONNECTION_TYPE)
handshake_data += chr(SubscriptionStreamDataProcessor.HANDSHAKE_CAPABILITIES_FLAG)
# Connection Details - Default Topic '__time'
handshake_data += "__time" + ","
handshake_data += "S_" + session_id
if d_token:
handshake_data += ",D_" + d_token
handshake_data += chr(0)
return handshake_data
handshake_data = get_handshake_data(self.session_id, self.d_token)
await self._premws.send(handshake_data)
await self._pshudws.send(handshake_data)
return [await self._premws.recv(), await self._pshudws.recv()]
async def subscribe(self, batches):
"""
:param batches: Array of topics to subscribe to.
:return:
"""
if batches not in [[], None]:
data = ""
data += chr(StandardProtocolConstants.CLIENT_SUBSCRIBE)
data += chr(StandardProtocolConstants.NONE_ENCODING)
data += ','.join(batches)
data += StandardProtocolConstants.RECORD_DELIM
return await self._premws.send(data)
async def forward_nst(self):
"""
Read and decrypt NST from 'pshudws' and send it to 'premws'.
Without this your session will be too short.
:return:
"""
def decrypt_nst(data):
char_map = [
["A", "d"],
["B", "e"],
["C", "f"],
["D", "g"],
["E", "h"],
["F", "i"],
["G", "j"],
["H", "k"],
["I", "l"],
["J", "m"],
["K", "n"],
["L", "o"],
["M", "p"],
["N", "q"],
["O", "r"],
["P", "s"],
["Q", "t"],
["R", "u"],
["S", "v"],
["T", "w"],
["U", "x"],
["V", "y"],
["W", "z"],
["X", "a"],
["Y", "b"],
["Z", "c"],
["a", "Q"],
["b", "R"],
["c", "S"],
["d", "T"],
["e", "U"],
["f", "V"],
["g", "W"],
["h", "X"],
["i", "Y"],
["j", "Z"],
["k", "A"],
["l", "B"],
["m", "C"],
["n", "D"],
["o", "E"],
["p", "F"],
["q", "0"],
["r", "1"],
["s", "2"],
["t", "3"],
["u", "4"],
["v", "5"],
["w", "6"],
["x", "7"],
["y", "8"],
["z", "9"],
["0", "G"],
["1", "H"],
["2", "I"],
["3", "J"],
["4", "K"],
["5", "L"],
["6", "M"],
["7", "N"],
["8", "O"],
["9", "P"],
["\n", ":|~"],
["\r", ""]
]
decrypted = ""
for index in range(0, len(data)):
new_char = data[index]
for char_unit in char_map:
if ":" == new_char and ":|~" == data[index:index + 3]:
new_char = "\n"
index += 2
break
if new_char == char_unit[1]:
new_char = char_unit[0]
break
decrypted += new_char
return decrypted
def to_readit_msg(data) -> list[ReaditMessage]:
rd_msgs = []
if data:
messages = data.split(StandardProtocolConstants.MESSAGE_DELIM)
for message in messages:
type = ord(message[0])
if type in [StandardProtocolConstants.INITIAL_TOPIC_LOAD,
StandardProtocolConstants.DELTA]:
records = message.split(StandardProtocolConstants.RECORD_DELIM)
fields = records[0].split(StandardProtocolConstants.FIELD_DELIM)
topic = fields[0][1:]
payload = ''.join(records[1:])
user_headers = fields[1:]
rd_msgs.append(ReaditMessage(str(type), topic, payload, user_headers))
elif type in [StandardProtocolConstants.CLIENT_ABORT,
StandardProtocolConstants.CLIENT_CLOSE]:
raise (
"Connection close/abort message type sent from publisher. Message type: " + str(
type))
else:
raise ("Unrecognised message type sent from publisher: " + str(type))
return rd_msgs
async def read_encrypted_nst(socket: WebSocketWrapper):
while True:
rd_msgs = to_readit_msg(await socket.recv())
for rd_msg in rd_msgs:
if rd_msg.topic[rd_msg.topic.rfind('_') + 1:] == "D23":
return rd_msg.message
def make_nst_message(decrypted_nst):
data = ""
data += chr(StandardProtocolConstants.CLIENT_SEND)
data += chr(StandardProtocolConstants.NONE_ENCODING)
data += "command"
data += StandardProtocolConstants.RECORD_DELIM
data += "nst" + StandardProtocolConstants.RECORD_DELIM + decrypted_nst + StandardProtocolConstants.FIELD_DELIM + "SPTBK"
return data
encrypted_nst = await read_encrypted_nst(self._pshudws)
decrypted_nst = decrypt_nst(encrypted_nst)
nst_message = make_nst_message(decrypted_nst)
await self._premws.send(nst_message)
async def coroutine(self):
self._premws = WebSocketWrapper(await websockets.connect(uri=generate_premws_uri(),
subprotocols=['zap-protocol-v1'],
extra_headers={'User-Agent': self.user_agent}), 'PREMWS')
self._pshudws = WebSocketWrapper(await websockets.connect(uri=generate_pshudws_uri(),
subprotocols=['zap-protocol-v1'],
extra_headers={'User-Agent': self.user_agent}), 'PSHUDWS')
# zap-protocol-v1 Handshake
await self.handshake()
# Subscribe to topic(s)
await self.subscribe(
[
'CONFIG_3_0',
'OVInPlay_3_0'
]
)
# Decrypt 'pshudws' NST token and forward it to 'premws'
await self.forward_nst()
while True:
recv = await self._premws.recv()
#TODO Handle data
TRAILING = "/zap/"
CONNECTION_TIMEOUT_LIMIT = 15e3
HANDSHAKE_TIMEOUT_LIMIT = 15e3
HANDSHAKE_PROTOCOL = 35
HANDSHAKE_VERSION = 3
HANDSHAKE_CONNECTION_TYPE = 80
HANDSHAKE_CAPABILITIES_FLAG = 1
HANDSHAKE_STATUS_CONNECTED = "100"
HANDSHAKE_STATUS_REJECTED = "111"
| StarcoderdataPython |
45642 | from collections import namedtuple
from itertools import chain
from os import makedirs, rename, scandir, listdir
from os.path import (join as p, exists, relpath, isdir, isfile,
expanduser, expandvars, realpath)
from struct import pack
import errno
import hashlib
import json
import logging
import re
import shutil
from rdflib import plugin
from rdflib.parser import Parser, create_input_source
from rdflib.term import URIRef
import six
from textwrap import dedent
import transaction
import yaml
from .. import OWMETA_PROFILE_DIR, connect
from ..context import (DEFAULT_CONTEXT_KEY, IMPORTS_CONTEXT_KEY,
CLASS_REGISTRY_CONTEXT_KEY, Context)
from ..mapper import Mapper
from ..context_common import CONTEXT_IMPORTS
from ..data import Data
from ..file_match import match_files
from ..file_lock import lock_file
from ..file_utils import hash_file
from ..graph_serialization import write_canonical_to_file
from ..rdf_utils import transitive_lookup, BatchAddGraph
from ..utils import FCN, aslist
from .archive import Unarchiver
from .common import (find_bundle_directory, fmt_bundle_directory, BUNDLE_MANIFEST_FILE_NAME,
BUNDLE_INDEXED_DB_NAME, validate_manifest, BUNDLE_MANIFEST_VERSION)
from .exceptions import (NotADescriptor, BundleNotFound, NoRemoteAvailable, NoBundleLoader,
NotABundlePath, MalformedBundle, NoAcceptableUploaders,
FetchTargetIsNotEmpty, TargetIsNotEmpty, UncoveredImports)
from .loaders import LOADER_CLASSES, UPLOADER_CLASSES, load_entry_point_loaders
from urllib.parse import quote as urlquote, unquote as urlunquote
L = logging.getLogger(__name__)
DEFAULT_BUNDLES_DIRECTORY = p(OWMETA_PROFILE_DIR, 'bundles')
'''
Default directory for the bundle cache
'''
DEFAULT_REMOTES_DIRECTORY = p(OWMETA_PROFILE_DIR, 'remotes')
'''
Default directory for descriptors of user-level remotes as opposed to project-specific
remotes
'''
class Remote(object):
'''
A place where bundles come from and go to
'''
def __init__(self, name, accessor_configs=()):
'''
Parameters
----------
name : str
The name of the remote
accessor_configs : iterable of AccessorConfig
Configs for how you access the remote
'''
self.name = name
''' Name of the remote '''
self.accessor_configs = list(accessor_configs)
'''
Configs for how you access the remote.
One might configure mirrors or replicas for a given bundle repository as multiple
accessor configs
'''
self.file_name = None
'''
If read from a file, the remote should have this attribute set to its source
file's path
'''
def add_config(self, accessor_config):
'''
Add the given accessor config to this remote
Parameters
----------
accessor_config : AccessorConfig
The config to add
Returns
-------
bool
`True` if the accessor config was added (meaning there's no equivalent one
already set for this remote). Otherwise, `False`.
'''
if accessor_config in self.accessor_configs:
return False
self.accessor_configs.append(accessor_config)
return True
def generate_loaders(self):
'''
Generate the bundle loaders for this remote.
Loaders are generated from `accessor_configs` and `LOADER_CLASSES` according with
which type of `.Loader` can load a type of accessor
'''
for ac in self.accessor_configs:
for lc in LOADER_CLASSES:
if lc.can_load_from(ac):
loader = lc(ac)
yield loader
def generate_uploaders(self):
'''
Generate the bundle uploaders for this remote
'''
for ac in self.accessor_configs:
for uc in UPLOADER_CLASSES:
if uc.can_upload_to(ac):
loader = uc(ac)
yield loader
def write(self, out):
'''
Serialize the `Remote` and write to `out`
Parameters
----------
out : :term:`file object`
Target for writing the remote
'''
yaml.dump(self, out)
@classmethod
def read(cls, inp):
'''
Read a serialized `Remote`
Parameters
----------
inp : :term:`file object`
File-like object containing the serialized `Remote`
'''
res = yaml.unsafe_load(inp)
assert isinstance(res, cls)
return res
def __eq__(self, other):
return (self.name == other.name and
self.accessor_configs == other.accessor_configs)
def __hash__(self):
return hash((self.name, self.accessor_configs))
def __str__(self):
if self.accessor_configs:
accessors = '\n' + '\n'.join(' ' + '\n '.join(str(acc).split('\n')) for acc in self.accessor_configs)
else:
accessors = ' <none>'
return dedent('''\
{name}
Accessors:{accessors}''').format(name=self.name,
accessors=accessors)
def __repr__(self):
return f'{FCN(type(self))}({repr(self.name)}, {repr(self.accessor_configs)})'
class DependencyDescriptor(namedtuple('_DependencyDescriptor',
('id', 'version', 'excludes'))):
__slots__ = ()
def __new__(cls, id, version=None, excludes=()):
return super(DependencyDescriptor, cls).__new__(cls, id, version, excludes)
class AccessorConfig(object):
'''
Configuration for accessing a `Remote`. `Loaders <Loader>` are added to a remote according to
which accessors are avaialble
'''
def __eq__(self, other):
raise NotImplementedError()
def __hash__(self):
raise NotImplementedError()
class _DepList(list):
def add(self, dd):
self.append(dd)
class URLConfig(AccessorConfig):
'''
Configuration for accessing a remote with just a URL.
Note that URLConfigs should be pickle-able since they are written to a YAML file as
part of the `.Remote` they're apart of.
'''
def __init__(self, url):
self.url = url
def __eq__(self, other):
return isinstance(other, URLConfig) and self.url == other.url
def __hash__(self):
return hash(self.url)
def __str__(self):
return '{}(url={})'.format(FCN(type(self)), repr(self.url))
@classmethod
def register(cls, scheme):
URL_CONFIG_MAP[scheme] = cls
__repr__ = __str__
URL_CONFIG_MAP = {}
'''
`URLConfigs <URLConfig>` by scheme. Can be populated by pkg_resources entry points
'''
class Descriptor(object):
'''
Descriptor for a bundle.
The descriptor is sufficient to build a distributable bundle directory tree from a
`~rdflib.graph.ConjunctiveGraph` and a set of files (see `Installer`).
'''
def __init__(self, ident, **kwargs):
self.id = ident
self._set(kwargs)
@classmethod
def make(cls, obj):
'''
Makes a descriptor from the given object.
Parameters
----------
obj : a `dict-like object <dict>`
An object with parameters for the Descriptor. Typically a dict
Returns
-------
Descriptor
The created descriptor
'''
res = cls(ident=obj['id'])
res._set(obj)
return res
@classmethod
def load(cls, descriptor_source):
'''
Load a descriptor from a YAML record
Parameters
----------
descriptor_source : str or :term:`file object`
The descriptor source. Handled by `yaml.safe_load
<https://pyyaml.org/wiki/PyYAMLDocumentation#the-yaml-package>`_
Raises
------
.NotADescriptor
Thrown when the object loaded from `descriptor_source` isn't a `dict`
'''
dat = yaml.safe_load(descriptor_source)
if isinstance(dat, dict):
return cls.make(dat)
else:
raise NotADescriptor()
def _set(self, obj):
self.name = obj.get('name', self.id)
self.version = obj.get('version', 1)
self.description = obj.get('description', None)
self.patterns = set(make_pattern(x) for x in obj.get('patterns', ()))
self.includes = set(make_include_func(x) for x in obj.get('includes', ()))
self.empties = {uri for uri, options in (inc.popitem()
for inc in obj.get('includes', ())
if isinstance(inc, dict))
if options.get('empty', False) is True}
deps_set = set()
deps = _DepList()
for x in obj.get('dependencies', ()):
if isinstance(x, six.string_types):
dd = DependencyDescriptor(x)
elif isinstance(x, dict):
dd = DependencyDescriptor(**x)
else:
dd = DependencyDescriptor(*x)
if dd not in deps_set:
deps.append(dd)
deps_set.add(dd)
self.dependencies = deps
self.files = FilesDescriptor.make(obj.get('files', None))
def __str__(self):
return (FCN(type(self)) + '(ident={},'
'name={},version={},description={},'
'patterns={},includes={},'
'files={},dependencies={})').format(
repr(self.id),
repr(self.name),
repr(self.version),
repr(self.description),
repr(self.patterns),
repr(self.includes),
repr(self.files),
repr(self.dependencies))
class Bundle(object):
'''
Main entry point for using bundles
Typical usage is something like this::
>>> with Bundle('example/bundleId', version=42) as bnd:
... for aDataObject in bnd(DataObject)().load():
... # Do something with `aDataObject`
... print(aDataObject)
DataObject(<http://example.org/entities#aDataObject>)
'''
def __init__(self, ident, bundles_directory=DEFAULT_BUNDLES_DIRECTORY, version=None,
conf=None, remotes=None, remotes_directory=DEFAULT_REMOTES_DIRECTORY):
'''
.. note::
Paths, `bundles_directory` and `remotes_directory`, will have symbolic links,
environment variables, and "~" (for the current user's home directory)
expanded when the `Bundle` is initialized. To reflect changes to symbolic
links or home directories, the `bundles_directory` or `remotes_directory`
attributes must be updated directly or a new instance must be created.
Parameters
----------
ident : str
Bundle ID
bundles_directory : str, optional
Path to the bundles directory. Defaults to `.DEFAULT_BUNDLES_DIRECTORY`
version : int, optional
Bundle version to access. By default, the latest version will be used.
conf : .Configuration or dict, optional
Configuration to add to the one created for the bundle automatically. Values
for the default imports context (`.IMPORTS_CONTEXT_KEY`), the default context
(`.DEFAULT_CONTEXT_KEY`) and store (``'rdf.store'``, ``'rdf.source'``, and,
``'rdf.store_conf'``) will be ignored and overwritten.
remotes : iterable of Remote or str, optional
A subset of remotes and additional remotes to fetch from. See `Fetcher.fetch`
remotes_directory : str, optional
The directory to load `Remotes <Remote>` from in case a bundle is not in the
bundle cache. Defaults to `.DEFAULT_REMOTES_DIRECTORY`
'''
if not ident or not isinstance(ident, str):
raise ValueError('ident must be a non-empty string')
self.ident = ident
if not bundles_directory:
bundles_directory = DEFAULT_BUNDLES_DIRECTORY
self.bundles_directory = realpath(expandvars(expanduser(bundles_directory)))
if not conf:
conf = {}
conf.update({'rdf.source': 'default'})
self.version = version
self.remotes = remotes
# XXX: Look at how we bring in projects remotes directory
if not remotes_directory:
remotes_directory = DEFAULT_REMOTES_DIRECTORY
self.remotes_directory = realpath(expandvars(expanduser(remotes_directory)))
self._store_config_builder = \
BundleDependentStoreConfigBuilder(
bundles_directory=bundles_directory,
remotes_directory=remotes_directory,
remotes=remotes)
self._bundle_dep_mgr = BundleDependencyManager(
bundles_directory=self.bundles_directory,
remotes=self.remotes,
remotes_directory=self.remotes_directory,
dependencies=self.dependencies)
self._given_conf = conf
self.conf = None
self._contexts = None
self.connection = None
''' The owmeta_core connection to the bundle's indexed database '''
self._bundle_context = None
self._loaded_dependencies = dict()
@property
def identifier(self):
return self.ident
def resolve(self):
try:
bundle_directory = self._get_bundle_directory()
except BundleNotFound:
bundle_directory = self._fetch_bundle(self.ident, self.version)
return bundle_directory
@property
def manifest_data(self):
bundle_directory = self.resolve()
with open(p(bundle_directory, BUNDLE_MANIFEST_FILE_NAME)) as mf:
return json.load(mf)
def _get_bundle_directory(self):
# - look up the bundle in the bundle cache
# - generate a config based on the current config load the config
# - make a database from the graphs, if necessary (similar to `owm regendb`). If
# delete the existing database if it doesn't match the store config
return find_bundle_directory(self.bundles_directory, self.ident, self.version)
def initdb(self):
'''
Initialize the bundle's `conf` `~owmeta_core.data.Data` instance
'''
if self.conf is None:
bundle_directory = self.resolve()
self.conf = Data().copy(self._given_conf)
with open(p(bundle_directory, BUNDLE_MANIFEST_FILE_NAME)) as mf:
manifest_data = json.load(mf)
self.conf[DEFAULT_CONTEXT_KEY] = manifest_data.get(DEFAULT_CONTEXT_KEY)
self.conf[IMPORTS_CONTEXT_KEY] = manifest_data.get(IMPORTS_CONTEXT_KEY)
self.conf[CLASS_REGISTRY_CONTEXT_KEY] = manifest_data.get(CLASS_REGISTRY_CONTEXT_KEY)
indexed_db_path = p(bundle_directory, BUNDLE_INDEXED_DB_NAME)
store_name, store_conf = self._store_config_builder.build(
indexed_db_path,
manifest_data.get('dependencies', ()))
self.conf['rdf.store'] = store_name
self.conf['rdf.store_conf'] = store_conf
self.connection = connect(conf=self.conf)
def _fetch_bundle(self, bundle_ident, version):
remotes_list = list(retrieve_remotes(self.remotes_directory))
f = Fetcher(self.bundles_directory, remotes_list)
return f.fetch(bundle_ident, version, self.remotes)
@property
def contexts(self):
'''
`List <list>` of `str`. Context IDs in this bundle
'''
# Since bundles are meant to be immutable, we won't need to add
if self._contexts is not None:
return self._contexts
bundle_directory = self.resolve()
contexts = list()
graphs_directory = p(bundle_directory, 'graphs')
idx_fname = p(graphs_directory, 'index')
if not exists(idx_fname):
raise Exception('Cannot find an index at {}'.format(repr(idx_fname)))
with open(idx_fname, 'rb') as index_file:
for l in index_file:
l = l.strip()
if not l:
continue
ctx, _ = l.split(b'\x00')
contexts.append(ctx.decode('UTF-8'))
self._contexts = frozenset(contexts)
return self._contexts
@property
def rdf(self):
self.initdb()
return self.conf['rdf.graph']
def __str__(self):
return f'Bundle({self.ident}' + (')' if self.version is None else f', {self.version})')
def __enter__(self):
self.initdb()
return self
def __exit__(self, exc_type, exc_value, traceback):
# Close the database connection
self.connection.disconnect()
self.connection = None
self.conf = None
def dependencies(self):
return self.manifest_data.get('dependencies', ())
def load_dependencies_transitive(self):
'''
Load dependencies from this bundle transitively
Yields
------
Bundle
A direct or indirect dependency of this bundle
'''
return self._bundle_dep_mgr.load_dependencies_transitive()
def load_dependencies(self):
'''
Load direct dependencies of this bundle
Yields
------
Bundle
A direct dependency of this bundle
'''
return self._bundle_dep_mgr._load_dependencies()
def _lookup_context_bundle(self, context_id):
owner = self._bundle_dep_mgr.lookup_context_bundle(
self.contexts,
context_id)
if owner is self._bundle_dep_mgr:
return self
def _load_dependency(self, dependencies_item):
try:
return self._bundle_dep_mgr._load_dependency(dependencies_item)
except BundleDependencyConfigIsMalformed as e:
bundle_directory = self.resolve()
raise MalformedBundle(bundle_directory, str(e)) from e
def __call__(self, target):
if not target or not hasattr(target, 'contextualize'):
return target
self.initdb()
if self._bundle_context is None:
self._bundle_context = _BundleContext(
None, conf=self.conf, bundle=self).stored
return target.contextualize(self._bundle_context)
class BundleDependencyManager(object):
'''
Finds the bundle in which a context is defined.
For a given bundle graph, that there is *one* Bundle that "owns" a given context.
Although multiple bundles may provide that context, the one closest to the root of the
graph which provides some statements in that context is called the owner. Note that
this does not mean that bundles on which the owner depends do not also be queried;
however, the exact behavior is up to the component that uses this component.
'''
def __init__(self, dependencies, **common_bundle_arguments):
self._loaded_dependencies = dict()
self._common_bundle_arguments = common_bundle_arguments
self.dependencies = dependencies
def load_dependencies_transitive(self):
'''
Load dependencies from this bundle transitively
Yields
------
Bundle
A direct or indirect dependency of this bundle
'''
border = {None: self}
seen = set()
while border:
new_border = {}
for bnd in border.values():
for d_bnd in bnd.load_dependencies():
key = (d_bnd.ident, d_bnd.version)
if key in seen:
continue
seen.add(key)
new_border[key] = d_bnd
yield d_bnd
border = new_border
def lookup_context_bundle(self, contexts, context_id):
if context_id is None or str(context_id) in contexts:
return self
for d in self.dependencies():
d_excludes = frozenset(d.get('excludes', ()))
if context_id in d_excludes:
continue
d_bnd = self._load_dependency(d)
match = d_bnd._lookup_context_bundle(context_id)
if match:
return match
return None
def _load_dependencies(self):
for d in self.dependencies():
yield self._load_dependency(d)
load_dependencies = _load_dependencies
def _load_dependency(self, dependencies_item):
d_id = dependencies_item.get('id')
if not d_id:
raise BundleDependencyConfigIsMalformed('Dependency entry is missing an identifier')
d_version = dependencies_item.get('version')
if not d_version:
raise BundleDependencyConfigIsMalformed(f'Dependency entry for {d_id} is'
' missing a version number')
bundle = self._loaded_dependencies.get((d_id, d_version))
if not bundle:
bundle = Bundle(d_id, version=d_version,
**self._common_bundle_arguments)
self._loaded_dependencies[(d_id, d_version)] = bundle
return bundle
class BundleDependencyConfigIsMalformed(Exception):
pass
class BundleDependentStoreConfigBuilder(object):
'''
Builds an RDFLib store configuration that depends on bundles.
The process of building the store configurationi requires traversing the graph of
dependencies so that duplicate dependencies in the graph can be omitted. To support
this process, this builder will fetch bundles as needed to resolve transitive
dependencies
'''
def __init__(self, bundles_directory=None, remotes_directory=None, remotes=None,
read_only=True):
if not bundles_directory:
bundles_directory = DEFAULT_BUNDLES_DIRECTORY
self.bundles_directory = realpath(expandvars(expanduser(bundles_directory)))
if not remotes_directory:
remotes_directory = DEFAULT_REMOTES_DIRECTORY
self.remotes_directory = realpath(expandvars(expanduser(remotes_directory)))
self.remotes = remotes
self.read_only = read_only
def build(self, indexed_db_path, dependencies, bundle_directory=None):
'''
Builds the store configuration
Parameters
----------
indexed_db_path : str
Path to the indexed database of the store that depends on the listed
dependenices
dependencies : list of dict
List of dependencies info at least including keys for 'id' and 'version'
bundle_directory : str, optional
Path to the bundle directory for the dependent store, if the dependent store
is a bundle. Used for information in an exceptional path, but not otherwise
used
Returns
-------
str
The type of the store. This is the name used to look up the RDFLib store plugin
object
The configuration for the store. This is the object that will be passed to
`rdflib.store.Store.open` to configure the store.
'''
return 'agg', self._construct_store_config(indexed_db_path, dependencies,
read_only=self.read_only)
__call__ = build
def _construct_store_config(self, indexed_db_path, dependencies,
current_path=None, paths=None, bundle_directory=None,
read_only=True):
if paths is None:
paths = set()
if current_path is None:
current_path = _BDTD()
dependency_configs = self._gather_dependency_configs(dependencies, current_path, paths, bundle_directory)
fs_store_config = dict(url=indexed_db_path, read_only=read_only)
return [
('FileStorageZODB', fs_store_config)
] + dependency_configs
@aslist
def _gather_dependency_configs(self, dependencies, current_path, paths, bundle_directory=None):
for dd in dependencies:
dep_path = current_path.merge_excludes(dd.get('excludes', ()))
dep_ident = dd.get('id')
dep_version = dd.get('version')
if not dep_ident:
if bundle_directory:
raise MalformedBundle(bundle_directory, 'bundle dependency descriptor is lacking an identifier')
else:
raise ValueError('bundle dependency descriptor is lacking an identifier')
if (dep_path, (dep_ident, dep_version)) in paths:
return
paths.add((dep_path, (dep_ident, dep_version)))
tries = 0
while tries < 2:
try:
bundle_directory = find_bundle_directory(self.bundles_directory, dep_ident, dep_version)
with open(p(bundle_directory, BUNDLE_MANIFEST_FILE_NAME)) as mf:
manifest_data = json.load(mf)
break
except (BundleNotFound, FileNotFoundError):
bundle_directory = self._fetch_bundle(dep_ident, dep_version)
tries += 1
# We don't want to include items in the configuration that aren't specified by
# the dependency descriptor. Also, all of the optionals have defaults that
# BundleDependencyStore handles itself, so we don't want to impose them here.
addl_dep_confs = {k: v for k, v in dd.items()
if k in ('excludes',) and v}
yield ('owmeta_core_bds', dict(type='agg',
conf=self._construct_store_config(
p(bundle_directory, BUNDLE_INDEXED_DB_NAME),
manifest_data.get('dependencies', ()),
dep_path, paths, bundle_directory),
**addl_dep_confs))
def _fetch_bundle(self, bundle_ident, version):
remotes_list = list(retrieve_remotes(self.remotes_directory))
f = Fetcher(self.bundles_directory, remotes_list)
return f.fetch(bundle_ident, version, self.remotes)
class _BDTD(namedtuple('_BDTD', ('excludes',))):
'''
Bundle Dependency Traversal Data (BDTD)
Holds data we use in traversing bundle dependencies. Looks a lot like a dependency
descriptor, but without an ID and version
'''
__slots__ = ()
def __new__(cls, *args, excludes=(), **kwargs):
return super(_BDTD, cls).__new__(cls, *args, excludes=excludes, **kwargs)
def merge_excludes(self, excludes):
return self._replace(excludes=self.excludes +
tuple(e for e in excludes if e not in self.excludes))
class _BundleContext(Context):
'''
`Context` for a bundle.
'''
def __init__(self, *args, bundle, **kwargs):
super().__init__(*args, **kwargs)
self.bundle = bundle
self._mapper = None
@property
def mapper(self):
if self._mapper is None:
self._mapper = _BundleMapper(bundle=self.bundle)
return self._mapper
class _BundleMapper(Mapper):
def __init__(self, bundle):
try:
bundle_conf = bundle.conf
except AttributeError:
raise Exception('Bundle connection has not been established.'
' Call `initdb` or use the bundle in a context manager')
super().__init__(name=f'{bundle.ident}' +
(f'@{bundle.version}' if bundle.version else ''),
conf=bundle_conf)
self.bundle = bundle
self._resolved_classes = dict()
def resolve_class(self, rdf_type, context):
prev_resolved_class = self._resolved_classes.get((rdf_type, context.identifier))
if prev_resolved_class:
return prev_resolved_class
own_resolved_class = super().resolve_class(rdf_type, context)
if own_resolved_class:
self._resolved_classes[(rdf_type, context.identifier)] = own_resolved_class
return own_resolved_class
target_id = context.identifier
target_bundle = self.bundle._lookup_context_bundle(target_id)
deps = target_bundle.load_dependencies_transitive()
for bnd in deps:
crctx_id = bnd.manifest_data.get(CLASS_REGISTRY_CONTEXT_KEY, None)
if not crctx_id:
continue
with bnd:
resolved_class = bnd.connection.mapper.resolve_class(rdf_type, context)
if resolved_class:
self._resolved_classes[(rdf_type, context.identifier)] = resolved_class
return resolved_class
return None
class _RemoteHandlerMixin(object):
'''
Utility mixin for handling remotes
The mixed-in class must have a `remotes` attribute which is a list of `Remote`
'''
def __init__(self, load_entry_points=True, **kwargs):
'''
Parameters
----------
load_entry_points : bool, optional
If `False`, then entry points will not be loaded
'''
super(_RemoteHandlerMixin, self).__init__(**kwargs)
self.load_entry_points = load_entry_points
def _get_remotes(self, remotes):
''''
Get remotes
Parameters
----------
remotes : iterable of Remote or str
A subset of names of remotes to act on and additional remotes to act on
'''
if self.load_entry_points:
load_entry_point_loaders()
instance_remotes = []
additional_remotes = []
if remotes:
configured_remotes = {r.name: r for r in self.remotes}
for r in remotes:
if isinstance(r, six.text_type):
instance_remotes.append(configured_remotes.get(r))
elif isinstance(r, Remote):
additional_remotes.append(r)
else:
instance_remotes = self.remotes
has_remote = False
for rem in chain(additional_remotes, instance_remotes):
has_remote = True
yield rem
if not has_remote:
raise NoRemoteAvailable()
class Fetcher(_RemoteHandlerMixin):
'''
Fetches bundles from `Remotes <Remote>`
A fetcher takes a list of remotes, a bundle ID, and, optionally, a version number and
downloads the bundle to a local directory. `Deployer` is, functionally, the dual of
this class.
'''
def __init__(self, bundles_root, remotes, **kwargs):
'''
Parameters
----------
bundles_root : str
The root directory of the bundle cache
remotes : list of Remote or str
List of pre-configured remotes used in calls to `fetch`
'''
super(Fetcher, self).__init__(**kwargs)
self.bundles_root = bundles_root
self.remotes = remotes
def __call__(self, *args, **kwargs):
'''
Calls `fetch` with the given arguments
'''
return self.fetch(*args, **kwargs)
def fetch(self, bundle_id, bundle_version=None, remotes=None, progress_reporter=None,
triples_progress_reporter=None):
'''
Retrieve a bundle by name from a remote and put it in the local bundle cache.
The first remote that can retrieve the bundle will be tried. Each remote will be
tried in succession until one downloads the bundle.
Parameters
----------
bundle_id : str
The id of the bundle to retrieve
bundle_version : int
The version of the bundle to retrieve. optional
remotes : iterable of Remote or str
A subset of remotes and additional remotes to fetch from. If an entry in the
iterable is a string, then it will be looked for amongst the remotes passed in
initially.
progress_reporter : `tqdm.tqdm <https://tqdm.github.io/>`_-like object, optional
Receives updates of progress in fetching and installing locally
triples_progress_reporter : `tqdm.tqdm <https://tqdm.github.io/>`_-like object, optional
Receives updates of progress for adding triples for an individual graph
Returns
-------
str
returns the directory where the bundle has been placed
Raises
------
.exceptions.NoBundleLoader
Thrown when none of the loaders are able to download the bundle
.FetchTargetIsNotEmpty
Thrown when the requested bundle is already in the cache
'''
if remotes:
remotes = list(remotes)
given_bundle_version = bundle_version
loaders = self._get_bundle_loaders(bundle_id, given_bundle_version, remotes)
loaders_list = list(loaders)
if bundle_version is None:
bundle_version = self._find_latest_remote_bundle_versions(bundle_id, loaders_list)
bdir = fmt_bundle_directory(self.bundles_root, bundle_id, bundle_version)
self._assert_target_is_empty(bdir)
for loader in loaders_list:
try:
loader.base_directory = bdir
loader(bundle_id, bundle_version)
with open(p(bdir, BUNDLE_MANIFEST_FILE_NAME)) as mf:
manifest_data = json.load(mf)
for dd in manifest_data.get('dependencies', ()):
try:
find_bundle_directory(self.bundles_root, dd['id'], dd.get('version'))
except BundleNotFound:
self.fetch(dd['id'], dd.get('version'), remotes=remotes)
dat = self._post_fetch_dest_conf(bdir)
build_indexed_database(dat['rdf.graph'], bdir, progress_reporter,
triples_progress_reporter)
dat.close()
return bdir
except Exception:
L.warning('Failed to load bundle %s with %s', bundle_id, loader, exc_info=True)
shutil.rmtree(bdir)
else: # no break
raise NoBundleLoader(bundle_id, given_bundle_version)
def _post_fetch_dest_conf(self, bundle_directory):
res = Data().copy({
'rdf.source': 'default',
'rdf.store': 'FileStorageZODB',
'rdf.store_conf': p(bundle_directory, BUNDLE_INDEXED_DB_NAME)
})
res.init()
if not exists(res['rdf.store_conf']):
raise Exception('Could not create the database file at ' + res['rdf.store_conf'])
return res
def _find_latest_remote_bundle_versions(self, bundle_id, loaders_list):
latest_bundle_version = 0
for loader in loaders_list:
versions = loader.bundle_versions(bundle_id)
if not versions:
L.warning('Loader %s does not have any versions of the bundle %s', loader, bundle_id)
continue
loader_latest_version = max(versions)
if loader_latest_version > latest_bundle_version:
latest_bundle_version = loader_latest_version
if latest_bundle_version <= 0:
raise BundleNotFound(bundle_id, 'No versions of the requested bundle found from any remotes')
return latest_bundle_version
def _assert_target_is_empty(self, bdir):
target_empty = True
try:
for _ in scandir(bdir):
target_empty = False
break
except FileNotFoundError:
return
if not target_empty:
raise FetchTargetIsNotEmpty(bdir)
def _get_bundle_loaders(self, bundle_id, bundle_version, remotes):
for rem in self._get_remotes(remotes):
for loader in rem.generate_loaders():
if loader.can_load(bundle_id, bundle_version):
yield loader
class Deployer(_RemoteHandlerMixin):
'''
Deploys bundles to `Remotes <Remote>`.
A deployer takes a bundle directory tree or bundle archive and uploads it to a remote.
`Fetcher` is, functionally, the dual of this class.
Deployer is responsible for selecting remotes and corresponding uploaders among a set
of options. `Uploaders <Uploader>` are responsible for actually doing the upload.
'''
def __init__(self, remotes=(), **kwargs):
super(Deployer, self).__init__(**kwargs)
self.remotes = remotes
def __call__(self, *args, **kwargs):
return self.deploy(*args, **kwargs)
def deploy(self, bundle_path, remotes=None):
'''
Deploy a bundle
Parameters
----------
bundle_path : str
Path to a bundle directory tree or archive
remotes : iterable of Remote or str
A subset of remotes to deploy to and additional remotes to deploy to
Raises
------
.NoAcceptableUploaders
Thrown when none of the selected uploaders could upload the bundle
'''
if not exists(bundle_path):
raise NotABundlePath(bundle_path, 'the file does not exist')
manifest_data = self._extract_manifest_data_from_bundle_path(bundle_path)
validate_manifest(bundle_path, manifest_data)
uploaded = False
for uploader in self._get_bundle_uploaders(bundle_path, remotes=remotes):
uploader(bundle_path)
uploaded = True
if not uploaded:
raise NoAcceptableUploaders(bundle_path)
def _extract_manifest_data_from_bundle_path(self, bundle_path):
if isdir(bundle_path):
return self._get_directory_manifest_data(bundle_path)
elif isfile(bundle_path):
return self._get_archive_manifest_data(bundle_path)
else:
raise NotABundlePath(bundle_path, 'path does not point to a file or directory')
def _get_bundle_uploaders(self, bundle_directory, remotes=None):
for rem in self._get_remotes(remotes):
for uploader in rem.generate_uploaders():
if uploader.can_upload(bundle_directory):
yield uploader
def _get_directory_manifest_data(self, bundle_path):
try:
with open(p(bundle_path, BUNDLE_MANIFEST_FILE_NAME)) as mf:
return json.load(mf)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT: # FileNotFound
raise MalformedBundle(bundle_path, 'no bundle manifest found')
if e.errno == errno.EISDIR: # IsADirectoryError
raise MalformedBundle(bundle_path, 'manifest is not a regular file')
raise
except json.decoder.JSONDecodeError:
raise MalformedBundle(bundle_path, 'manifest is malformed: expected a'
' JSON file')
def _get_archive_manifest_data(self, bundle_path):
with Unarchiver().to_tarfile(bundle_path) as tf:
try:
mf0 = tf.extractfile(BUNDLE_MANIFEST_FILE_NAME)
if mf0 is None:
raise MalformedBundle(bundle_path, 'manifest is not a regular file')
# Would like to pull the
with mf0 as mf:
return json.load(mf)
except KeyError:
raise MalformedBundle(bundle_path, 'no bundle manifest found')
except json.decoder.JSONDecodeError:
raise MalformedBundle(bundle_path, 'manifest is malformed: expected a'
' JSON file')
class Cache(object):
'''
Cache of bundles
'''
def __init__(self, bundles_directory):
'''
Parameters
----------
bundles_directory : str
The where bundles are stored
'''
self.bundles_directory = bundles_directory
def list(self):
'''
Returns a generator of summary bundle info
'''
try:
bundle_directories = scandir(self.bundles_directory)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return
raise
for bundle_directory in bundle_directories:
if not bundle_directory.is_dir():
continue
# Ignore deletes out from under us
try:
version_directories = scandir(bundle_directory.path)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
continue
raise
def keyfunc(x):
try:
return int(x.name)
except ValueError:
return float('+inf')
for version_directory in sorted(version_directories, key=keyfunc, reverse=True):
if not version_directory.is_dir():
continue
try:
manifest_fname = p(version_directory.path, BUNDLE_MANIFEST_FILE_NAME)
with open(manifest_fname) as mf:
try:
manifest_data = json.load(mf)
bd_id = urlunquote(bundle_directory.name)
bd_version = int(version_directory.name)
if (bd_id != manifest_data.get('id') or
bd_version != manifest_data.get('version')):
L.warning('Bundle manifest at %s does not match bundle'
' directory', manifest_fname)
continue
yield manifest_data
except json.decoder.JSONDecodeError:
L.warning("Bundle manifest at %s is malformed",
manifest_fname)
except (OSError, IOError) as e:
if e.errno != errno.ENOENT:
raise
def retrieve_remote_by_name(remotes_dir, name, **kwargs):
for rem in retrieve_remotes(remotes_dir, **kwargs):
if rem.name == name:
return rem
def retrieve_remotes(remotes_dir, load_entry_points=True):
'''
Retrieve remotes from a project directory or user remotes directory
Parameters
----------
owmdir : str
path to the project directory
load_entry_points : bool, optional
if `True`, then the entry points for `~.loaders.Loader` and `~.loaders.Uploader`
implementations that have been added as entry points
'''
if not exists(remotes_dir):
return
if load_entry_points:
load_entry_point_loaders()
for r in listdir(remotes_dir):
if r.endswith('.remote'):
fname = p(remotes_dir, r)
with open(fname) as inp:
try:
rem = Remote.read(inp)
rem.file_name = fname
yield rem
except Exception:
L.warning('Unable to read remote %s', r, exc_info=True)
class Installer(object):
'''
Installs a bundle locally
'''
def __init__(self, source_directory, bundles_directory, graph,
imports_ctx=None, default_ctx=None, class_registry_ctx=None,
installer_id=None, remotes=(), remotes_directory=None):
'''
Parameters
----------
source_directory : str
Directory where files come from. All files for a bundle must be below this
directory
bundles_directory : str
Directory where the bundles files go. Usually this is the bundle cache
directory
graph : rdflib.graph.ConjunctiveGraph
The graph from which we source contexts for this bundle
default_ctx : str, optional
The ID of the default context -- the target of a query when not otherwise
specified.
imports_ctx : str, optional
The ID of the imports context this installer should use. Imports relationships
are selected from this graph according to the included contexts.
class_registry_ctx : str, optional
The ID of the class registry context this installer should use. Class registry
entries are retrieved from this graph.
installer_id : iterable of Remote or str, optional
Name of this installer for purposes of mutual exclusion
remotes : iterable of Remote, optional
Remotes to be used for retrieving dependencies when needed during
installation. If not provided, the remotes will be collected from
`remotes_directory`
remotes_directory : str, optional
The directory to load `Remotes <Remote>` from in case a bundle is not in the
bundle cache. Defaults to `.DEFAULT_REMOTES_DIRECTORY`
'''
self.context_hash = hashlib.sha224
self.file_hash = hashlib.sha224
self.source_directory = source_directory
self.bundles_directory = bundles_directory
self.graph = graph
self.installer_id = installer_id
self.imports_ctx = imports_ctx
self.default_ctx = default_ctx
self.class_registry_ctx = class_registry_ctx
self.remotes = list(remotes)
self.remotes_directory = remotes_directory
def install(self, descriptor, progress_reporter=None):
'''
Given a descriptor, install a bundle
Parameters
----------
descriptor : Descriptor
The descriptor for the bundle
progress_reporter : `tqdm.tqdm <https://tqdm.github.io/>`_-like object
Used for reporting progress during installation. optional
Returns
-------
str
The directory where the bundle is installed
Raises
------
.TargetIsNotEmpty
Thrown when the target directory for installation is not empty.
'''
# Create the staging directory in the base directory to reduce the chance of
# moving across file systems
try:
staging_directory = fmt_bundle_directory(self.bundles_directory, descriptor.id,
descriptor.version)
makedirs(staging_directory)
except OSError:
pass
target_empty = True
for _ in scandir(staging_directory):
target_empty = False
break
if not target_empty:
raise TargetIsNotEmpty(staging_directory)
with lock_file(p(staging_directory, '.lock'), unique_key=self.installer_id):
try:
self._install(descriptor, staging_directory,
progress_reporter=progress_reporter)
return staging_directory
except Exception:
self._cleanup_failed_install(staging_directory)
raise
def _cleanup_failed_install(self, staging_directory):
shutil.rmtree(p(staging_directory, 'graphs'))
shutil.rmtree(p(staging_directory, 'files'))
def _install(self, descriptor, staging_directory, progress_reporter=None):
graphs_directory, files_directory = self._set_up_directories(staging_directory)
self._write_file_hashes(descriptor, files_directory)
self._write_context_data(descriptor, graphs_directory)
self._generate_bundle_class_registry_ctx(descriptor, graphs_directory)
self._generate_bundle_imports_ctx(descriptor, graphs_directory)
self._write_manifest(descriptor, staging_directory)
self._initdb(staging_directory)
self._build_indexed_database(staging_directory, progress_reporter)
def _set_up_directories(self, staging_directory):
graphs_directory = p(staging_directory, 'graphs')
files_directory = p(staging_directory, 'files')
try:
makedirs(graphs_directory)
makedirs(files_directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return graphs_directory, files_directory
def _write_file_hashes(self, descriptor, files_directory):
with open(p(files_directory, 'hashes'), 'wb') as hash_out:
for fname in _select_files(descriptor, self.source_directory):
hsh = self.file_hash()
source_fname = p(self.source_directory, fname)
hash_file(hsh, source_fname)
self._write_hash_line(hash_out, fname.encode('UTF-8'), hsh)
shutil.copy2(source_fname, p(files_directory, fname))
def _write_context_data(self, descriptor, graphs_directory):
contexts = _select_contexts(descriptor, self.graph)
imports_ctxg = None
if self.imports_ctx:
imports_ctxg = self.graph.get_context(self.imports_ctx)
included_context_ids = set()
for ctxid in self._write_graphs(graphs_directory, *contexts):
included_context_ids.add(ctxid)
# Compute imported contexts
imported_contexts = set()
for ctxid in included_context_ids:
if imports_ctxg is not None:
imported_contexts |= transitive_lookup(imports_ctxg,
ctxid,
CONTEXT_IMPORTS,
seen=imported_contexts)
uncovered_contexts = imported_contexts - included_context_ids
if self.class_registry_ctx:
uncovered_contexts.discard(URIRef(self.class_registry_ctx))
uncovered_contexts = self._cover_with_dependencies(uncovered_contexts, descriptor)
if uncovered_contexts:
raise UncoveredImports(uncovered_contexts)
def _write_manifest(self, descriptor, staging_directory):
manifest_data = {}
if self.default_ctx:
manifest_data[DEFAULT_CONTEXT_KEY] = self.default_ctx
if self.imports_ctx:
# If an imports context was specified, then we'll need to generate an
# imports context with the appropriate imports. We don't use the source
# imports context ID for the bundle's imports context because the bundle
# imports that we actually need are a subset of the total set of imports
manifest_data[IMPORTS_CONTEXT_KEY] = fmt_bundle_imports_ctx_id(descriptor.id,
descriptor.version)
if self.class_registry_ctx:
manifest_data[CLASS_REGISTRY_CONTEXT_KEY] = fmt_bundle_class_registry_ctx_id(descriptor.id,
descriptor.version)
manifest_data['id'] = descriptor.id
manifest_data['version'] = descriptor.version
manifest_data['manifest_version'] = BUNDLE_MANIFEST_VERSION
mf_deps = []
for dd in descriptor.dependencies:
bnd = self._dd_to_bundle(dd)
# Fetch the dependency if necessary and get the version of the latest from the
# bundle manifest. Usually, the bundle will already be on the system since it
# *should have* been used for testing.
#
# (It's probably possible to do something like just grabbing the bundle
# manifest data in the case there is not a local copy of the bundle, but that
# should be unusual enough that it's probably not justified considering the
# overhead of having an alternative to fetching that bundle loaders might be
# expected to support.)
dd_version = bnd.manifest_data['version']
mf_deps.append({'version': dd_version,
'id': dd.id,
'excludes': dd.excludes})
manifest_data['dependencies'] = mf_deps
self.manifest_data = manifest_data
with open(p(staging_directory, BUNDLE_MANIFEST_FILE_NAME), 'w') as mf:
json.dump(manifest_data, mf, separators=(',', ':'))
def _generate_bundle_imports_ctx(self, descriptor, graphs_directory):
if not self.imports_ctx:
return
imports_ctxg = self.graph.get_context(self.imports_ctx)
# select all of the imports for all of the contexts in the bundle and serialize
contexts = []
idx_fname = p(graphs_directory, 'index')
with open(idx_fname) as index_file:
for l in index_file:
ctx, _ = l.strip().split('\x00')
contexts.append(URIRef(ctx))
for c in descriptor.empties:
contexts.append(URIRef(c))
ctxgraph = imports_ctxg.triples_choices((contexts, CONTEXT_IMPORTS, None))
if self.class_registry_ctx:
cr_ctxid = URIRef(fmt_bundle_class_registry_ctx_id(descriptor.id, descriptor.version))
contexts.append(cr_ctxid)
old_ctxgraph = ctxgraph
def replace_cr_ctxid():
src_cr_ctxid = URIRef(self.class_registry_ctx)
for t in old_ctxgraph:
if t[0] == src_cr_ctxid:
yield (cr_ctxid, t[1], t[2])
elif t[2] == src_cr_ctxid:
yield (t[0], t[1], cr_ctxid)
else:
yield t
ctxgraph = replace_cr_ctxid()
ctxid = fmt_bundle_imports_ctx_id(descriptor.id, descriptor.version)
self._write_graph(graphs_directory, ctxid, ctxgraph)
def _generate_bundle_class_registry_ctx(self, descriptor, graphs_directory):
if not self.class_registry_ctx:
return
ctx_id = fmt_bundle_class_registry_ctx_id(descriptor.id, descriptor.version)
class_registry_ctxg = self.graph.get_context(self.class_registry_ctx)
self._write_graph(graphs_directory, ctx_id, class_registry_ctxg)
def _write_graph(self, graphs_directory, ctxid, ctxgraph):
for _ in self._write_graphs(graphs_directory, (ctxid, ctxgraph)):
pass
def _write_graphs(self, graphs_directory, *graphs_sequence):
with open(p(graphs_directory, 'hashes'), 'ab') as hash_out,\
open(p(graphs_directory, 'index'), 'ab') as index_out:
for ctxid, ctxgraph in graphs_sequence:
ctxidb = ctxid.encode('UTF-8')
gbname, hsh = self._write_graph_to_file(ctxgraph, graphs_directory)
self._write_hash_line(hash_out, ctxidb, hsh)
self._write_index_line(index_out, ctxidb, gbname)
yield ctxid
hash_out.flush()
index_out.flush()
def _write_graph_to_file(self, ctxgraph, graphs_directory):
hsh = self.context_hash()
temp_fname = p(graphs_directory, 'graph.tmp')
write_canonical_to_file(ctxgraph, temp_fname)
hash_file(hsh, temp_fname)
gbname = hsh.hexdigest() + '.nt'
ctx_file_name = p(graphs_directory, gbname)
rename(temp_fname, ctx_file_name)
return gbname, hsh
def _write_hash_line(self, hash_out, key, hsh):
hash_out.write(key + b'\x00' + pack('B', hsh.digest_size) + hsh.digest() + b'\n')
def _write_index_line(self, index_out, ctxidb, gbname):
index_out.write(ctxidb + b'\x00' + gbname.encode('UTF-8') + b'\n')
def _initdb(self, staging_directory):
self.conf = Data().copy({
'rdf.source': 'default',
'rdf.store': 'FileStorageZODB',
'rdf.store_conf': p(staging_directory, BUNDLE_INDEXED_DB_NAME)
})
# Create the database file and initialize some needed data structures
self.conf.init()
if not exists(self.conf['rdf.store_conf']):
raise Exception('Could not create the database file at ' + self.conf['rdf.store_conf'])
def _build_indexed_database(self, staging_directory, progress=None):
try:
dest = self.conf['rdf.graph']
build_indexed_database(dest, staging_directory, progress)
finally:
self.conf.close()
def _dd_to_bundle(self, dependency_descriptor):
return Bundle(dependency_descriptor.id,
version=dependency_descriptor.version,
bundles_directory=self.bundles_directory,
remotes=self.remotes,
remotes_directory=self.remotes_directory)
def _cover_with_dependencies(self, uncovered_contexts, descriptor):
# XXX: Will also need to check for the contexts having a given ID being consistent
# with each other across dependencies
dependencies = descriptor.dependencies
for d in dependencies:
bnd = self._dd_to_bundle(d)
for c in bnd.contexts:
uncovered_contexts.discard(URIRef(c))
if not uncovered_contexts:
break
for c in descriptor.empties:
uncovered_contexts.discard(URIRef(c))
if not uncovered_contexts:
break
return uncovered_contexts
def fmt_bundle_imports_ctx_id(id, version):
return fmt_bundle_ctx_id('generated_imports_ctx', id, version)
def fmt_bundle_class_registry_ctx_id(id, version):
return fmt_bundle_ctx_id('generated_class_registry_ctx', id, version)
def fmt_bundle_class_registry_ctx_list_id(id, version):
return fmt_bundle_ctx_id('generated_class_registry_ctx_list', id, version)
def fmt_bundle_ctx_id(kind, id, version):
return f'http://data.openworm.org/bundle/{kind}?bundle_id={urlquote(id)}&bundle_version={version}'
class FilesDescriptor(object):
'''
Descriptor for files
'''
def __init__(self):
self.patterns = set()
self.includes = set()
@classmethod
def make(cls, obj):
if not obj:
return
res = cls()
res.patterns = set(obj.get('patterns', ()))
res.includes = set(obj.get('includes', ()))
return res
def make_pattern(s):
if s.startswith('rgx:'):
return RegexURIPattern(s[4:])
else:
return GlobURIPattern(s)
def make_include_func(s):
if isinstance(s, str):
return URIIncludeFunc(s)
elif isinstance(s, dict):
uri = None
for k in s.keys():
if uri is not None:
raise ValueError('Context "includes" entry must have one key--the URI of'
f' the context to include. Extra key is "{k}"')
uri = k
return URIIncludeFunc(uri)
else:
raise ValueError('Context "includes" entry must be a str or a dict')
class URIIncludeFunc(object):
def __init__(self, include):
self.include = URIRef(include.strip())
def __hash__(self):
return hash(self.include)
def __call__(self, uri):
return URIRef(uri.strip()) == self.include
def __str__(self):
return '{}({})'.format(FCN(type(self)), repr(self.include))
__repr__ = __str__
class URIPattern(object):
def __init__(self, pattern):
self._pattern = pattern
def __hash__(self):
return hash(self._pattern)
def __call__(self, uri):
return False
def __str__(self):
return '{}({})'.format(FCN(type(self)), self._pattern)
class RegexURIPattern(URIPattern):
def __init__(self, pattern):
super(RegexURIPattern, self).__init__(re.compile(pattern))
def __call__(self, uri):
# Cast the pattern match result to a boolean
return not not self._pattern.match(str(uri))
class GlobURIPattern(RegexURIPattern):
def __init__(self, pattern):
replacements = [
['*', '.*'],
['?', '.?'],
['[!', '[^']
]
for a, b in replacements:
pattern = pattern.replace(a, b)
super(GlobURIPattern, self).__init__(re.compile(pattern))
def _select_files(descriptor, directory):
fdescr = descriptor.files
if not fdescr:
return
for f in fdescr.includes:
if not exists(p(directory, f)):
raise Exception('Included file in bundle does not exist', f)
yield f
for f in fdescr.patterns:
for match in match_files(directory, p(directory, f)):
yield relpath(match, directory)
def _select_contexts(descriptor, graph):
for context in graph.contexts():
ctx = context.identifier
for inc in descriptor.includes:
if inc(ctx):
yield ctx, context
break
for pat in descriptor.patterns:
if pat(ctx):
yield ctx, context
break
def build_indexed_database(dest, bundle_directory, progress=None, trip_prog=None):
'''
Build the indexed database from a bundle directory
'''
idx_fname = p(bundle_directory, 'graphs', 'index')
# This code was copied from OWM._load_all_graphs, but we don't have a specific
# reason for projects and bundles to have the same format, so keeping the logic
# separate
triples_read = 0
with open(idx_fname) as index_file:
cnt = 0
for l in index_file:
cnt += 1
index_file.seek(0)
if progress is not None:
progress.total = cnt
with transaction.manager:
bag = BatchAddGraph(dest, batchsize=10000)
for l in index_file:
ctx, fname = l.strip().split('\x00')
parser = plugin.get('nt', Parser)()
graph_fname = p(bundle_directory, 'graphs', fname)
with open(graph_fname, 'rb') as f, bag.get_context(ctx) as g:
parser.parse(create_input_source(f), g)
if progress is not None:
progress.update(1)
if trip_prog is not None:
trip_prog.update(bag.count - triples_read)
triples_read = g.count
if progress is not None:
progress.write('Finalizing writes to database...')
if progress is not None:
progress.write('Loaded {:,} triples'.format(triples_read))
| StarcoderdataPython |
1746339 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from novaclient import api_versions
from novaclient.tests.unit import utils
from novaclient.tests.unit.v2 import fakes
from novaclient.v2 import migrations
class MigrationsTest(utils.TestCase):
def setUp(self):
super(MigrationsTest, self).setUp()
self.cs = fakes.FakeClient(api_versions.APIVersion("2.1"))
def test_list_migrations(self):
ml = self.cs.migrations.list()
self.assert_request_id(ml, fakes.FAKE_REQUEST_ID_LIST)
self.cs.assert_called('GET', '/os-migrations')
for m in ml:
self.assertIsInstance(m, migrations.Migration)
self.assertRaises(AttributeError, getattr, m, "migration_type")
def test_list_migrations_v223(self):
cs = fakes.FakeClient(api_versions.APIVersion("2.23"))
ml = cs.migrations.list()
self.assert_request_id(ml, fakes.FAKE_REQUEST_ID_LIST)
cs.assert_called('GET', '/os-migrations')
for m in ml:
self.assertIsInstance(m, migrations.Migration)
self.assertEqual(m.migration_type, 'live-migration')
@mock.patch('novaclient.v2.migrations.warnings.warn')
def test_list_migrations_with_cell_name(self, mock_warn):
ml = self.cs.migrations.list(cell_name="abc")
self.assert_request_id(ml, fakes.FAKE_REQUEST_ID_LIST)
self.cs.assert_called('GET', '/os-migrations?cell_name=abc')
for m in ml:
self.assertIsInstance(m, migrations.Migration)
self.assertTrue(mock_warn.called)
def test_list_migrations_with_filters(self):
ml = self.cs.migrations.list('host1', 'finished', 'child1')
self.assert_request_id(ml, fakes.FAKE_REQUEST_ID_LIST)
self.cs.assert_called(
'GET',
'/os-migrations?cell_name=child1&host=host1&status=finished')
for m in ml:
self.assertIsInstance(m, migrations.Migration)
def test_list_migrations_with_instance_uuid_filter(self):
ml = self.cs.migrations.list('host1', 'finished', 'child1',
'instance_id_456')
self.assert_request_id(ml, fakes.FAKE_REQUEST_ID_LIST)
self.cs.assert_called(
'GET',
('/os-migrations?cell_name=child1&host=host1&'
'instance_uuid=instance_id_456&status=finished'))
self.assertEqual(1, len(ml))
self.assertEqual('instance_id_456', ml[0].instance_uuid)
| StarcoderdataPython |
3266869 | <gh_stars>0
from typing import Union, List, Tuple
import numpy as np
import pandas as pd
from scipy import optimize
from matplotlib import pyplot as plt
from pyquil.api import QuantumComputer
from pyquil.gates import RX, RZ, CZ, MEASURE
from pyquil.quil import Program
from pyquil.quilbase import Pragma
MILLISECOND = 1e-6 # A millisecond (ms) is an SI unit of time
MICROSECOND = 1e-6 # A microsecond (us) is an SI unit of time
NANOSECOND = 1e-9 # A nanosecond (ns) is an SI unit of time
# A Hertz (Hz) is a derived unit of frequency in SI Units; 1 Hz is defined as one cycle per second.
KHZ = 1e3 # kHz
MHZ = 1e6 # MHz
GHZ = 1e9 # GHz
# ==================================================================================================
# T1
# ==================================================================================================
def generate_single_t1_experiment(qubits: Union[int, List[int]],
time: float,
n_shots: int = 1000) -> Program:
"""
Return a t1 program in native Quil for a single time point.
:param qubits: Which qubits to measure.
:param time: The decay time before measurement.
:param n_shots: The number of shots to average over for the data point.
:return: A T1 Program.
"""
program = Program()
try:
len(qubits)
except TypeError:
qubits = [qubits]
ro = program.declare('ro', 'BIT', len(qubits))
for q in qubits:
program += RX(np.pi, q)
program += Pragma('DELAY', [q], str(time))
for i in range(len(qubits)):
program += MEASURE(qubits[i], ro[i])
program.wrap_in_numshots_loop(n_shots)
return program
def generate_t1_experiments(qubits: Union[int, List[int]],
stop_time: float,
n_shots: int = 1000,
n_points: int = 15) -> pd.DataFrame:
"""
Return a DataFrame containing programs which ran in sequence constitute a t1 experiment to
measure the decay time from the excited state to ground state.
:param qubits: Which qubits to measure.
:param stop_time: The maximum decay time to measure at.
:param n_shots: The number of shots to average over for each data point.
:param num_points: The number of points for each t1 curve.
:return: A dataframe with columns: time, t1 program
"""
start_time = 0
time_and_programs = []
for t in np.linspace(start_time, stop_time, n_points):
t = round(t, 7) # try to keep time on 100ns boundaries
time_and_programs.append({
'Time': t,
'Program': generate_single_t1_experiment(qubits, t, n_shots)
})
return pd.DataFrame(time_and_programs)
def acquire_data_t1(qc: QuantumComputer,
t1_experiment: pd.DataFrame,
) -> pd.DataFrame:
"""
Execute experiments to measure the T1 decay time of 1 or more qubits.
:param qc: The QuantumComputer to run the experiment on
:param t1_experiment: A pandas DataFrame with columns: time, t1 program
:return: pandas DataFrame
"""
results = []
for index, row in t1_experiment.iterrows():
t = row['Time']
program = row['Program']
executable = qc.compiler.native_quil_to_executable(program)
bitstrings = qc.run(executable)
qubits = list(program.get_qubits())
for i in range(len(qubits)):
avg = np.mean(bitstrings[:, i])
results.append({
'Qubit': qubits[i],
'Time': t,
'Num_bitstrings': len(bitstrings),
'Average': float(avg),
'Program': program,
})
df = pd.DataFrame(results)
return df
def estimate_t1(df: pd.DataFrame):
"""
Estimate T1 from experimental data.
:param df: A pandas DataFrame of experimental T1 results to plot
:return: pandas DataFrame
"""
results = []
for q in df['Qubit'].unique():
df2 = df[df['Qubit'] == q].sort_values('Time')
x_data = df2['Time']
y_data = df2['Average']
try:
fit_params, fit_params_errs = fit_to_exponential_decay_curve(x_data, y_data)
results.append({
'Qubit': q,
'T1': fit_params[1] / MICROSECOND,
'Fit_params': fit_params,
'Fit_params_errs': fit_params_errs,
'Message': None,
})
except RuntimeError:
print(f"Could not fit to experimental data for qubit {q}")
results.append({
'Qubit': q,
'T1': None,
'Fit_params': None,
'Fit_params_errs': None,
'Message': 'Could not fit to experimental data for qubit' + str(q),
})
return results
def plot_t1_estimate_over_data(df: pd.DataFrame,
qubits: list = None,
filename: str = None) -> None:
"""
Plot T1 experimental data and estimated value of T1 as and exponential decay curve.
:param df: A pandas DataFrame experimental results to plot and fit exponential decay curve to.
:param qubits: A list of qubits that you actually want plotted. The default is all qubits.
:param qc_type: String indicating whether QVM or QPU was used to collect data.
:return: None
"""
if qubits is None:
qubits = df['Qubit'].unique().tolist()
# check the user specified valid qubits
for qbx in qubits:
if qbx not in df['Qubit'].unique():
raise ValueError("The list of qubits does not match the ones you experimented on.")
for q in qubits:
df2 = df[df['Qubit'] == q].sort_values('Time')
x_data = df2['Time']
y_data = df2['Average']
plt.plot(x_data / MICROSECOND, y_data, 'o-', label=f"QC{q} T1 data")
try:
fit_params, fit_params_errs = fit_to_exponential_decay_curve(x_data, y_data)
except RuntimeError:
print(f"Could not fit to experimental data for qubit {q}")
else:
plt.plot(x_data / MICROSECOND, exponential_decay_curve(x_data, *fit_params),
label=f"QC{q} fit: T1={fit_params[1] / MICROSECOND:.2f}us")
plt.xlabel("Time [us]")
plt.ylabel("Pr(measuring 1)")
plt.title("T1 decay")
plt.legend(loc='best')
plt.tight_layout()
if filename is not None:
plt.savefig(filename)
plt.show()
# ==================================================================================================
# T2 star and T2 echo functions
# ==================================================================================================
def generate_single_t2_star_experiment(qubits: Union[int, List[int]],
time: float,
detuning: float,
n_shots: int = 1000) -> Program:
"""
Return a T2 star program in native Quil for a single time point.
:param qubits: Which qubits to measure.
:param time: The decay time before measurement.
:param detuning: The additional detuning frequency about the z axis.
:param n_shots: The number of shots to average over for the data point.
:return: A T2 Program.
"""
program = Program()
try:
len(qubits)
except TypeError:
qubits = [qubits]
ro = program.declare('ro', 'BIT', len(qubits))
for q in qubits:
program += RX(np.pi / 2, q)
program += Pragma('DELAY', [q], str(time))
program += RZ(2 * np.pi * time * detuning, q)
program += RX(np.pi / 2, q)
for i in range(len(qubits)):
program += MEASURE(qubits[i], ro[i])
program.wrap_in_numshots_loop(n_shots)
return program
def generate_t2_star_experiments(qubits: Union[int, List[int]],
stop_time: float,
detuning: float = 5e6,
n_shots: int = 1000,
num_points: int = 15) -> pd.DataFrame:
"""
Return a DataFrame containing programs which ran in sequence constitute a T2 star
experiment to measure the T2 star coherence decay time.
:param qubits: Which qubits to measure.
:param stop_time: The maximum decay time to measure at.
:param detuning: The additional detuning frequency about the z axis.
:param n_shots: The number of shots to average over for each data point.
:param num_points: The number of points for each T2 curve.
:return: pandas DataFrame with columns: time, program, detuning
"""
start_time = 0
time_and_programs = []
for t in np.linspace(start_time, stop_time, num_points):
# TODO: avoid aliasing while being mindful of the 20ns resolution in the QCS stack
time_and_programs.append({
'Time': t,
'Program': generate_single_t2_star_experiment(qubits, t, detuning, n_shots=n_shots),
'Detuning': detuning,
})
return pd.DataFrame(time_and_programs)
def generate_single_t2_echo_experiment(qubits: Union[int, List[int]],
time: float,
detuning: float,
n_shots: int = 1000) -> Program:
"""
Return a T2 echo program in native Quil for a single time point.
:param qubits: Which qubits to measure.
:param time: The decay time before measurement.
:param detuning: The additional detuning frequency about the z axis.
:param n_shots: The number of shots to average over for the data point.
:return: A T2 Program.
"""
program = Program()
try:
len(qubits)
except TypeError:
qubits = [qubits]
ro = program.declare('ro', 'BIT', len(qubits))
for q in qubits:
# prepare plus state |+>
program += RX(np.pi / 2, q)
# wait half of the delay
program += Pragma('DELAY', [q], str(time / 2))
# apply an X gate compiled out of RX(90)
program += RX(np.pi / 2, q)
program += RX(np.pi / 2, q)
# wait the other half of the delay
program += Pragma('DELAY', [q], str(time / 2))
program += RZ(2 * np.pi * time * detuning, q)
program += RX(np.pi / 2, q)
for i in range(len(qubits)):
program += MEASURE(qubits[i], ro[i])
program.wrap_in_numshots_loop(n_shots)
return program
def generate_t2_echo_experiments(qubits: Union[int, List[int]],
stop_time: float,
detuning: float = 5e6,
n_shots: int = 1000,
num_points: int = 15) -> pd.DataFrame:
"""
Return a DataFrame containing programs which ran in sequence constitute a T2 echo
experiment to measure the T2 echo coherence decay time.
:param qubits: Which qubits to measure.
:param stop_time: The maximum decay time to measure at.
:param detuning: The additional detuning frequency about the z axis.
:param n_shots: The number of shots to average over for each data point.
:param num_points: The number of points for each T2 curve.
:return: pandas DataFrame with columns: time, program, detuning
"""
start_time = 0
time_and_programs = []
for t in np.linspace(start_time, stop_time, num_points):
# TODO: avoid aliasing while being mindful of the 20ns resolution in the QCS stack
time_and_programs.append({
'Time': t,
'Program': generate_single_t2_echo_experiment(qubits, t, detuning, n_shots=n_shots),
'Detuning': detuning,
})
return pd.DataFrame(time_and_programs)
def acquire_data_t2(qc: QuantumComputer,
t2_experiment: pd.DataFrame,
) -> pd.DataFrame:
"""
Execute experiments to measure the T2 star or T2 echo decay time of 1 or more qubits.
:param qc: The QuantumComputer to run the experiment on
:param t2_experiment: A pandas DataFrame containing: time, T2 program
:param detuning: The additional detuning frequency about the z axis.
:return: pandas DataFrame containing T2 results, and detuning used in creating experiments for
those results.
"""
results = []
for index, row in t2_experiment.iterrows():
t = row['Time']
program = row['Program']
detuning = row['Detuning']
executable = qc.compiler.native_quil_to_executable(program)
bitstrings = qc.run(executable)
qubits = list(program.get_qubits())
for i in range(len(qubits)):
avg = np.mean(bitstrings[:, i])
results.append({
'Qubit': qubits[i],
'Time': t,
'Num_bitstrings': len(bitstrings),
'Average': float(avg),
'Detuning': float(detuning),
})
return pd.DataFrame(results)
def estimate_t2(df: pd.DataFrame) -> pd.DataFrame:
"""
Estimate T2 star or T2 echo from experimental data.
:param df: A pandas DataFrame with experimental T2 results
:param detuning: Detuning frequency used in experiment creation
:return: pandas DataFrame
"""
results = []
for q in df['Qubit'].unique():
df2 = df[df['Qubit'] == q].sort_values('Time')
x_data = df2['Time']
y_data = df2['Average']
detuning = df2['Detuning'].values[0]
try:
fit_params, fit_params_errs = fit_to_exponentially_decaying_sinusoidal_curve(x_data,
y_data,
detuning)
results.append({
'Qubit': q,
'T2': fit_params[1] / MICROSECOND,
'Freq': fit_params[2] / MHZ,
'Fit_params': fit_params,
'Fit_params_errs': fit_params_errs,
'Message': None,
})
except RuntimeError:
print(f"Could not fit to experimental data for qubit {q}")
results.append({
'Qubit': q,
'T2': None,
'Freq': None,
'Fit_params': None,
'Fit_params_errs': None,
'Message': 'Could not fit to experimental data for qubit' + str(q),
})
return pd.DataFrame(results)
def plot_t2_estimate_over_data(df: pd.DataFrame,
qubits: list = None,
t2_type: str = 'unknown',
filename: str = None) -> None:
"""
Plot T2 star or T2 echo experimental data and estimated value of T1 as and exponential decay
curve.
:param df: A pandas DataFrame containing experimental results to plot.
:param qubits: A list of qubits that you actually want plotted. The default is all qubits.
:param detuning: Detuning frequency used in experiment creation.
:param type: String either 'star' or 'echo'.
:param filename: String.
:return: None
"""
if qubits is None:
qubits = df['Qubit'].unique().tolist()
# check the user specified valid qubits
for qbx in qubits:
if qbx not in df['Qubit'].unique():
raise ValueError("The list of qubits does not match the ones you experimented on.")
for q in qubits:
df2 = df[df['Qubit'] == q].sort_values('Time')
x_data = df2['Time']
y_data = df2['Average']
detuning = df2['Detuning'].values[0]
plt.plot(x_data / MICROSECOND, y_data, 'o-', label=f"Qubit {q} T2 data")
try:
fit_params, fit_params_errs = fit_to_exponentially_decaying_sinusoidal_curve(x_data,
y_data,
detuning)
except RuntimeError:
print(f"Could not fit to experimental data for qubit {q}")
else:
plt.plot(x_data / MICROSECOND,
exponentially_decaying_sinusoidal_curve(x_data, *fit_params),
label=f"QC{q} fit: freq={fit_params[2] / MHZ:.2f}MHz, "
f""f"T2={fit_params[1] / MICROSECOND:.2f}us")
plt.xlabel("Time [µs]")
plt.ylabel("Pr(measuring 1)")
if t2_type.lower() == 'star':
plt.title("$T_2^*$ (Ramsey) decay")
elif t2_type.lower() == 'echo':
plt.title("$T_2$ (Echo) decay")
else:
plt.title("$T_2$ (unknown) decay")
plt.legend(loc='best')
plt.tight_layout()
if filename is not None:
plt.savefig(filename)
plt.show()
# ==================================================================================================
# TODO CPMG
# ==================================================================================================
# ==================================================================================================
# Rabi
# ==================================================================================================
def generate_single_rabi_experiment(qubits: Union[int, List[int]],
theta: float,
n_shots: int = 1000) -> Program:
"""
Return a Rabi program in native Quil rotated through the given angle.
Rabi oscillations are observed by applying successively larger rotations to the same initial
state.
:param qubits: Which qubits to measure.
:param theta: The angle of the Rabi RX rotation.
:param n_shots: The number of shots to average over for the data point.
:return: A Program that rotates through a given angle about the X axis.
"""
program = Program()
try:
len(qubits)
except TypeError:
qubits = [qubits]
ro = program.declare('ro', 'BIT', len(qubits))
for q in qubits:
program += RX(theta, q)
for i in range(len(qubits)):
program += MEASURE(qubits[i], ro[i])
program.wrap_in_numshots_loop(n_shots)
return program
def generate_rabi_experiments(qubits: Union[int, List[int]],
n_shots: int = 1000,
num_points: int = 15) -> pd.DataFrame:
"""
Return a DataFrame containing programs which, when run in sequence, constitute a Rabi
experiment.
Rabi oscillations are observed by applying successively larger rotations to the same initial
state.
:param qubits: Which qubits to measure.
:param n_shots: The number of shots to average over for each data point
:param num_points: The number of points for each Rabi curve
:return: pandas DataFrame with columns: angle, program
"""
angle_and_programs = []
for theta in np.linspace(0.0, 2 * np.pi, num_points):
angle_and_programs.append({
'Angle': theta,
'Program': generate_single_rabi_experiment(qubits, theta, n_shots),
})
return pd.DataFrame(angle_and_programs)
def acquire_data_rabi(qc: QuantumComputer,
rabi_experiment: pd.DataFrame,
filename: str = None) -> pd.DataFrame:
"""
Execute experiments to measure Rabi flop one or more qubits.
:param qc: The QuantumComputer to run the experiment on
:param rabi_experiment: pandas DataFrame: (theta, Rabi program)
:return: DataFrame with Rabi results
"""
results = []
for index, row in rabi_experiment.iterrows():
theta = row['Angle']
program = row['Program']
executable = qc.compiler.native_quil_to_executable(program)
bitstrings = qc.run(executable)
qubits = list(program.get_qubits())
for i in range(len(qubits)):
avg = np.mean(bitstrings[:, i])
results.append({
'Qubit': qubits[i],
'Angle': theta,
'Num_bitstrings': len(bitstrings),
'Average': float(avg),
})
if filename:
pd.DataFrame(results).to_json(filename)
return pd.DataFrame(results)
def estimate_rabi(df: pd.DataFrame):
"""
Estimate Rabi oscillation from experimental data.
:param df: Experimental Rabi results to estimate
:return: pandas DataFrame
"""
results = []
for q in df['Qubit'].unique():
df2 = df[df['Qubit'] == q].sort_values('Angle')
angles = df2['Angle']
prob_of_one = df2['Average']
try:
# fit to sinusoid
fit_params, fit_params_errs = fit_to_sinusoidal_waveform(angles, prob_of_one)
results.append({
'Qubit': q,
'Angle': fit_params[1],
'Prob_of_one': fit_params[2],
'Fit_params': fit_params,
'Fit_params_errs': fit_params_errs,
'Message': None,
})
except RuntimeError:
print(f"Could not fit to experimental data for qubit {q}")
results.append({
'Qubit': q,
'Angle': None,
'Prob_of_one': None,
'Fit_params': None,
'Fit_params_errs': None,
'Message': 'Could not fit to experimental data for qubit' + str(q),
})
return pd.DataFrame(results)
def plot_rabi_estimate_over_data(df: pd.DataFrame,
qubits: list = None,
filename: str = None) -> None:
"""
Plot Rabi oscillation experimental data and estimated curve.
:param df: Experimental results to plot and fit curve to.
:param qubits: A list of qubits that you actually want plotted. The default is all qubits.
:param filename: String.
:return: None
"""
if qubits is None:
qubits = df['Qubit'].unique().tolist()
# check the user specified valid qubits
for qbx in qubits:
if qbx not in df['Qubit'].unique():
raise ValueError("The list of qubits does not match the ones you experimented on.")
for q in qubits:
df2 = df[df['Qubit'] == q].sort_values('Angle')
angles = df2['Angle']
prob_of_one = df2['Average']
# plot raw data
plt.plot(angles, prob_of_one, 'o-', label=f"qubit {q} Rabi data")
try:
# fit to sinusoid
fit_params, fit_params_errs = fit_to_sinusoidal_waveform(angles, prob_of_one)
except RuntimeError:
print(f"Could not fit to experimental data for qubit {q}")
else:
# overlay fitted sinusoidal curve
plt.plot(angles, sinusoidal_waveform(angles, *fit_params),
label=f"qubit {q} fitted line")
plt.xlabel("RX angle [rad]")
plt.ylabel("Pr($|1\langle)")
plt.title("Rabi flop")
plt.legend(loc='best')
plt.tight_layout()
if filename is not None:
plt.savefig(filename)
plt.show()
# ==================================================================================================
# CZ phase Ramsey
# ==================================================================================================
def generate_cz_phase_ramsey_program(qb: int, other_qb: int, n_shots: int = 1000) -> Program:
"""
Generate a single CZ phase Ramsey experiment at a given phase.
:param qb: The qubit to move around the Bloch sphere and measure the incurred RZ on.
:param other_qb: The other qubit that constitutes a two-qubit pair along with `qb`.
:param n_shots: The number of shots to average over for each data point.
:param phase: The phase kick to supply after playing the CZ pulse on the equator.
:param num_shots: The number of shots to average over for the data point.
:return: A parametric Program for performing a CZ Ramsey experiment.
"""
program = Program()
# NOTE: only need readout register for `qb` not `other_qb` since `other_qb` is only
# needed to identify which CZ gate we're using
ro = program.declare('ro', 'BIT', 1)
theta = program.declare('theta', 'REAL')
# go to the equator
program += Program(RX(np.pi / 2, qb))
# apply the CZ gate - note that CZ is symmetric, so the order of qubits doesn't matter
program += Program(CZ(qb, other_qb))
# go to |1> after a phase kick
program += Program(RZ(theta, qb), RX(np.pi / 2, qb))
program += MEASURE(qb, ro[0])
program.wrap_in_numshots_loop(n_shots)
return program
def generate_cz_phase_ramsey_experiment(edges: List[Tuple[int, int]],
start_phase: float = 0.0,
stop_phase: float = 2 * np.pi,
num_points: int = 15,
num_shots: int = 1000):
'''
Returns a DataFrame of parameters and programs that constitute a CZ phase ramsey experiment.
:param edges: List of Tuples containing edges that one can perform a CZ on.
:param start_phase: The starting phase for the CZ phase Ramsey experiment.
:param stop_phase: The stopping phase for the CZ phase Ramsey experiment.
:param num_points: The number of points to sample at between the starting and stopping phase.
:param num_shots: The number of shots to average over for each data point.
:return: pandas DataFrame
'''
cz_expriment = []
rz_qubit = [] # this is the qubit to which the RZ is applied
for edge in edges:
qubit, other_qubit = edge
# first qubit gets RZ
cz_expriment.append({
'Edge': tuple(edge),
'Rz_qubit': qubit,
'Program': generate_cz_phase_ramsey_program(qubit, other_qubit, num_shots),
'Start_phase': start_phase,
'Stop_phase': stop_phase,
'Num_points': num_points,
'Num_shots': num_shots,
})
# second qubit gets RZ
cz_expriment.append({
'Edge': tuple(edge),
'Rz_qubit': other_qubit,
'Program': generate_cz_phase_ramsey_program(other_qubit, qubit, num_shots),
'Start_phase': start_phase,
'Stop_phase': stop_phase,
'Num_points': num_points,
'Num_shots': num_shots,
})
return pd.DataFrame(cz_expriment)
def acquire_data_cz_phase_ramsey(qc: QuantumComputer,
cz_experiment: pd.DataFrame,
filename: str = None) -> pd.DataFrame:
"""
Execute experiments to measure the RZ incurred as a result of a CZ gate.
:param qc: The qubit to move around the Bloch sphere and measure the incurred RZ on
:param cz_experiment: pandas DataFrame
:param filename: The name of the file to write JSON-serialized results to
:return: pandas DataFrame
"""
results = []
for index, row in cz_experiment.iterrows():
parametric_ramsey_prog = row['Program']
edge = row['Edge']
rz_qb = row['Rz_qubit']
start_phase = row['Start_phase']
stop_phase = row['Stop_phase']
num_points = row['Num_points']
num_shots = row['Num_shots']
binary = compile_parametric_program(qc, parametric_ramsey_prog, num_shots=num_shots)
qc.qam.load(binary)
for theta in np.linspace(start_phase, stop_phase, num_points):
qc.qam.write_memory(region_name='theta', value=theta)
qc.qam.run()
qc.qam.wait()
bitstrings = qc.qam.read_from_memory_region(region_name="ro")
avg = np.mean(bitstrings[:, 0])
results.append({
'Edge': edge,
'Rz_qubit': rz_qb,
'Phase': theta,
'Num_bitstrings': len(bitstrings),
'Average': float(avg),
})
if filename:
pd.DataFrame(results).to_json(filename)
return pd.DataFrame(results)
def estimate_cz_phase_ramsey(df: pd.DataFrame) -> pd.DataFrame:
"""
Estimate CZ phase ramsey experimental data.
:param df: Experimental results to plot and fit exponential decay curve to.
:param detuning: Detuning frequency used in experiment creation.
:return: List of dicts.
"""
results = []
edges = df['Edge'].unique()
for id_row, edge in enumerate(edges):
for id_col, qubit in enumerate(edge):
qubit_df = df[(df['Rz_qubit'] == qubit) & (df['Edge'] == edge)].sort_values('Phase')
phases = qubit_df['Phase']
prob_of_one = qubit_df['Average']
rz_qb = qubit_df['Rz_qubit'].values[0]
try:
# fit to sinusoid
fit_params, fit_params_errs = fit_to_sinusoidal_waveform(phases, prob_of_one)
results.append({
'Edge': edge,
'Rz_qubit': rz_qb,
'Angle': fit_params[1],
'Prob_of_one': fit_params[2],
'Fit_params': fit_params,
'Fit_params_errs': fit_params_errs,
'Message': None,
})
except RuntimeError:
print(f"Could not fit to experimental data for edge {edge}")
results.append({
'Edge': edge,
'Rz_qubit': rz_qb,
'Angle': None,
'Prob_of_one': None,
'Fit_params': None,
'Fit_params_errs': None,
'Message': 'Could not fit to experimental data for edge' + str(edge),
})
return pd.DataFrame(results)
def plot_cz_phase_estimate_over_data(df: pd.DataFrame,
filename: str = None) -> None:
"""
Plot Ramsey experimental data, the fitted sinusoid, and the maximum of that sinusoid.
:param df: Experimental results to plot and fit exponential decay curve to.
:return: None
"""
edges = df['Edge'].unique()
if len(edges) == 1:
# this deals with the one edge case, then plot will have an empty row
# if you don't do this you get `axes.shape = (2,)`
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(24, 30))
else:
fig, axes = plt.subplots(nrows=len(edges), ncols=2, figsize=(24, 10 * len(edges)))
for id_row, edge in enumerate(edges):
for id_col, qubit in enumerate(edge):
qubit_df = df[(df['Rz_qubit'] == qubit) & (df['Edge'] == edge)].sort_values('Phase')
phases = qubit_df['Phase']
prob_of_one = qubit_df['Average']
# plot raw data
axes[id_row, id_col].plot(phases, prob_of_one, 'o',
label=f"qubit{qubit} CZ Ramsey data")
try:
# fit to sinusoid
fit_params, fit_params_errs = fit_to_sinusoidal_waveform(phases,
prob_of_one)
except RuntimeError:
print(f"Could not fit to experimental data for qubit {qubit}")
else:
# find max excited state visibility (ESV) and propagate error from fit params
max_ESV, max_ESV_err = get_peak_from_fit_params(fit_params, fit_params_errs)
# overlay fitted curve and vertical line at maximum ESV
axes[id_row, id_col].plot(phases, sinusoidal_waveform(phases, *fit_params),
label=f"QC{qubit} fitted line")
axes[id_row, id_col].axvline(max_ESV,
label=f"QC{qubit} max ESV={max_ESV:.3f}+/-{max_ESV_err:.3f} rad")
axes[id_row, id_col].set_xlabel("Phase on second +X/2 gate [rad]")
axes[id_row, id_col].set_ylabel("Pr($|1\langle)")
axes[id_row, id_col].set_title(f"CZ Phase Ramsey fringes on QC{qubit}\n"
f"due to CZ_{edge[0]}_{edge[1]} application")
axes[id_row, id_col].legend(loc='best')
if filename is not None:
plt.savefig(filename)
plt.show()
# ==================================================================================================
# Fits and so forth
# ==================================================================================================
def exponential_decay_curve(t: Union[float, np.ndarray],
amplitude: float,
time_decay_constant: float,
t_offset: float = 0.0) -> Union[float, np.ndarray]:
"""
Calculate exponential decay at a series of points.
:param t: The independent variable with respect to which decay is calculated.
:param amplitude: The amplitude of the decay curve.
:param time_decay_constant: The time decay constant - in this case T1 - of the decay curve.
:param t_offset: The time offset of the curve, assumed to be 0.0.
:return: The exponential decay at the point(s) in time.
"""
return amplitude * np.exp(-1 * (t - t_offset) / time_decay_constant)
def fit_to_exponential_decay_curve(x_data: np.ndarray,
y_data: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Fit experimental data to exponential decay curve.
:param x_data: Independent data to fit to.
:param y_data: Experimental, dependent data to fit to.
:return: Arrays of fitted decay curve parameters and their errors
"""
params, params_covariance = optimize.curve_fit(exponential_decay_curve,
x_data, y_data,
p0=[1.0, 15e-6, 0.0])
# parameter error extraction from
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
params_errs = np.sqrt(np.diag(params_covariance))
return params, params_errs
def sinusoidal_waveform(x: float,
amplitude: float,
baseline: float,
frequency: float,
x_offset: float) -> np.ufunc:
"""
Calculate sinusoidal response at a series of points.
:param x: The independent variable with respect to which the sinusoidal response is calculated.
:param amplitude: The amplitude of the sinusoid.
:param baseline: The baseline of the sinusoid.
:param frequency: The frequency of the sinusoid.
:param x_offset: The x offset of the sinusoid.
:return: The sinusoidal response at the given phases(s).
"""
return amplitude * np.sin(frequency * x + x_offset) + baseline
def fit_to_sinusoidal_waveform(x_data: np.ndarray,
y_data: List[float],
displayflag: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Fit experimental data to sinusoid.
:param x_data: Independent data to fit to.
:param y_data: Experimental, dependent data to fit to.
:param displayflag: If True displays results from scipy curve fit analysis.
:return: Arrays of fitted decay curve parameters and their standard deviations
"""
params, params_covariance = optimize.curve_fit(sinusoidal_waveform, x_data, y_data,
p0=[0.5, 0.5, 1.0, np.pi / 2])
# parameter error extraction from
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
params_errs = np.sqrt(np.diag(params_covariance))
# interleave params and params_errs
print_params = []
for idx in range(len(params)):
print_params.append(params[idx])
print_params.append(params_errs[idx])
if displayflag:
print("scipy curve fitting analysis returned\n"
"amplitude:\t{:.5f} +/- {:.5f}\n"
"baseline:\t{:.5f} +/- {:.5f}\n"
"frequency:\t{:.5f} +/- {:.5f}\n"
"x offset:\t{:.5f} +/- {:.5f}".format(*print_params))
return params, params_errs
def get_peak_from_fit_params(fit_params: np.ndarray,
fit_params_errs: np.ndarray) -> Tuple[float, float]:
"""
Extract peak from the fit parameters returned by scipy.optimize.curve_fit.
:param fit_params: fit parameters out of scipy.optimize.curve_fit
:param fit_params_errs: standard deviations on the fit parameters from scipy.optimize.curve_fit
:return: The phase corresponding the to the maximum excited state visibility and its st. dev.
"""
# TODO: do away with hard-coded indices for fit params
x0 = fit_params[-1]
x0_err = fit_params_errs[-1]
freq = fit_params[-2]
freq_err = fit_params_errs[-2]
print("propagating error using x_0 = {} +/- {} and freq = {} +/- {}".format(x0, x0_err,
freq, freq_err))
# find the phase corresponding to maximum excited state visibility (ESV) using the fit params
max_ESV = (np.pi / 2 - x0) / freq
# max_ESV_err obtained by applying error propagation formula to max_ESV
max_ESV_err = np.sqrt((x0_err / freq) ** 2 + ((np.pi / 2 - x0) * (freq_err / freq ** 2)) ** 2)
print("\nmaximum excited state visibility observed at x = {} +/- {}".format(max_ESV,
max_ESV_err))
return max_ESV, max_ESV_err
def exponentially_decaying_sinusoidal_curve(t: Union[float, np.ndarray],
amplitude: float,
time_decay_constant: float,
frequency: float,
baseline: float,
sin_t_offset: float = 0.0) -> Union[float, np.ndarray]:
"""
Calculate exponentially decaying sinusoid at a series of points.
:param t: The independent variable with respect to which decay is calculated.
:param amplitude: The amplitude of the decay curve.
:param time_decay_constant: The time decay constant - in this case T2 - of the decay curve.
:param frequency: The frequency to fit to the Ramsey fringes.
:param baseline: The baseline of the Ramsey fringes.
:param sin_t_offset: The time offset of the sinusoidal curve, assumed to be 0.0.
:return: The exponentially decaying sinusoid evaluated at the point(s) in time.
"""
return amplitude * np.exp(-1 * t / time_decay_constant) * \
np.sin(frequency * (t - sin_t_offset)) + baseline
def fit_to_exponentially_decaying_sinusoidal_curve(x_data: np.ndarray,
y_data: np.ndarray,
detuning: float = 5e6) -> Tuple[np.ndarray,
np.ndarray]:
"""
Fit experimental data to exponential decay curve.
:param x_data: Independent data to fit to.
:param y_data: Experimental, dependent data to fit to.
:param detuning: Detuning frequency used in experiment creation.
:return: Arrays of fitted decay curve parameters and their errors
"""
params, params_covariance = optimize.curve_fit(exponentially_decaying_sinusoidal_curve,
x_data, y_data,
p0=[0.5, 15e-6, detuning, 0.5, 0.0])
# parameter error extraction from
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
params_errs = np.sqrt(np.diag(params_covariance))
return params, params_errs
def compile_parametric_program(qc: QuantumComputer,
parametric_prog: Program,
num_shots: int = 1000) -> None:
"""
Compile the parametric program, and transfer the binary to the quantum device.
:param qc: The QuantumComputer to run the experiment on.
:param parametric_prog: The parametric program to compile and transfer to the quantum device.
:param num_shots: The number of shots to average over for each data point.
:return: The binary from the compiled parametric program.
"""
parametric_prog.wrap_in_numshots_loop(shots=num_shots)
binary = qc.compiler.native_quil_to_executable(parametric_prog)
return binary
def remove_qubits_from_qubit_list(qubit_list: List[int],
qubits_to_remove: Union[int, List[int]]) -> Union[int, List[int]]:
"""
Remove the selected qubits from the given list and return the pruned list.
:param qubit_list: The qubit list to remove the selected qubits from.
:param qubits_to_remove: The qubits to remove from the selected list.
:return: The given qubit list with the selected qubits removed
"""
# cast qubits_to_remove as a list
try:
len(qubits_to_remove)
except TypeError:
qubits_to_remove = [qubits_to_remove]
# remove list of qubits_to_remove
new_qubit_list = list(set(qubit_list) - set(qubits_to_remove))
# return an int or a list, as appropriate
if len(new_qubit_list) == 1:
return new_qubit_list[0]
else:
return new_qubit_list
| StarcoderdataPython |
4800847 | # vim: fileencoding=utf-8
import fnmatch
import os
import werkzeug
from docutils import nodes
from docutils.core import publish_parts
from docutils.parsers.rst import Directive, directives
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import LEXERS, guess_lexer_for_filename, TextLexer
from blikit import utils
from blikit.models import BlobObject
from blikit.docutilsext import Writer
class Document(object):
title = None
description = None
body = None
author_name = None
last_modified = None
created = None
def __init__(self, **attrs):
for name, value in attrs.iteritems():
if name.startswith('_'):
continue
setattr(self, name, value)
renderer_map = []
def register_for(*pats):
def _register_for(func):
for p in pats:
renderer_map.append((p, func))
return func
return _register_for
def render_blob(ctx, blob_obj):
u'''render BlobObject as HTML portion using proper render function
return <Document object>
if there is no render function for this object, return None
'''
if blob_obj.commit.sha is None:
# IndexObject
# don't cache
pass
else:
cache_key = 'render.render_blob:%s:%s' % \
(blob_obj.commit.sha, blob_obj.abs_name)
cached = ctx.app.cache.get(cache_key)
if cached is not None:
return cached
if not isinstance(blob_obj, BlobObject):
# TODO: raise proper exception
# XXX: may this function treat TreeObject?
raise Exception
for p, func in renderer_map:
if fnmatch.fnmatch(blob_obj.name, p):
result = func(ctx, blob_obj)
break
else:
result = None
if isinstance(result, Document):
if result.author_name is None:
result.author_name = blob_obj.author_name
if result.last_modified is None:
result.last_modified = blob_obj.last_modified
if result.created is None:
result.created = blob_obj.created
if blob_obj.commit.sha is not None:
ctx.app.cache.set(cache_key, result)
return result
@register_for('*.txt')
def render_text(ctx, blob_obj):
udata = blob_obj.data.decode('utf-8', 'replace')
return Document(title=blob_obj.name,
body=u'<pre>' + werkzeug.escape(udata) + u'</pre>')
@register_for('*.rst')
def render_rst(ctx, blob_obj):
parts = publish_parts(blob_obj.data, writer=Writer(),
settings_overrides={'initial_header_level': 2,
'syntax_highlight': 'short',
'ctx': ctx, 'obj': blob_obj})
parts['description'] = parts['title']
return Document(**parts)
@register_for('*.png', '*.jpg', '*.jpeg', '*.gif')
def render_images(ctx, blob_obj):
w, h = utils.calc_thumb_size(blob_obj.data, (640, 480))
url = ctx.url_for('view_obj',
rev=blob_obj.commit.name, path=blob_obj.root_path)
raw_url = url + '?raw=1'
body = '<a href="%s"><img src="%s" width="%d" height="%s"></a>' % \
(raw_url, raw_url, w, h)
return Document(title=blob_obj.name, body=body)
formatter = HtmlFormatter(noclasses=True, linenos=True)
@register_for(*[p for l in LEXERS.values() for p in l[3]])
def render_sourcecode(ctx, blob_obj):
try:
data = blob_obj.data.decode('utf-8')
except UnicodeDecodeError:
data = blob_obj.data
try:
lexer = guess_lexer_for_filename(blob_obj.name, data)
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
return Document(title=blob_obj.name,
description=lexer.name,
body=highlight(data, lexer, formatter))
@register_for('*')
def render_default(ctx, blob_obj):
if '\x00' in blob_obj.data:
# maybe binary file
# display download link
escaped = werkzeug.escape(blob_obj.name)
body = '<a href="%s?raw=1">download "%s"</a>' % (escaped, escaped)
return Document(title=blob_obj.name, body=body)
else:
# maybe some text file
# render like *.txt
return render_text(ctx, blob_obj)
| StarcoderdataPython |
3253940 | <gh_stars>0
#!/usr/bin/env python
from multi_circle_2 import Multi_circle_2
if __name__ == '__main__':
multi_circle_2 = Multi_circle_2(
[
#x , y, z, yaw, sleep
[0.0 , 0.0, 1.0, 0, 8],
[0.0 , 0.0 ,1.0, 0, 3],
[-0.3 , -1.4, 0.0, 0, 0],
]
)
multi_circle_2.run()
| StarcoderdataPython |
1786055 | <filename>ThirdParty/incremental/vtkincremental/src/incremental/__init__.py<gh_stars>1-10
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Versions for Python packages.
See L{Version}.
"""
from __future__ import division, absolute_import
import os
import sys
import warnings
#
# Compat functions
#
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
unicode = str
def _nativeString(s):
"""
Convert C{bytes} or C{unicode} to the native C{str} type, using ASCII
encoding if conversion is necessary.
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
"""
if not isinstance(s, (bytes, unicode)):
raise TypeError("%r is neither bytes nor unicode" % s)
if _PY3:
if isinstance(s, bytes):
return s.decode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.encode("ascii")
else:
if isinstance(s, unicode):
return s.encode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.decode("ascii")
return s
try:
_cmp = cmp
except NameError:
def _cmp(a, b):
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b:
return -1
elif a == b:
return 0
else:
return 1
def _comparable(klass):
"""
Class decorator that ensures support for the special C{__cmp__} method.
On Python 2 this does nothing.
On Python 3, C{__eq__}, C{__lt__}, etc. methods are added to the class,
relying on C{__cmp__} to implement their comparisons.
"""
# On Python 2, __cmp__ will just work, so no need to add extra methods:
if not _PY3:
return klass
def __eq__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass
#
# Versioning
#
@_comparable
class _inf(object):
"""
An object that is bigger than all other objects.
"""
def __cmp__(self, other):
"""
@param other: Another object.
@type other: any
@return: 0 if other is inf, 1 otherwise.
@rtype: C{int}
"""
if other is _inf:
return 0
return 1
_inf = _inf()
class IncomparableVersions(TypeError):
"""
Two versions could not be compared.
"""
@_comparable
class Version(object):
"""
An encapsulation of a version for a project, with support for outputting
PEP-440 compatible version strings.
This class supports the standard major.minor.micro[rcN] scheme of
versioning, with support for "local versions" which may include a SVN
revision or Git SHA1 hash.
"""
def __init__(self, package, major, minor, micro, release_candidate=None,
prerelease=None, dev=None):
"""
@param package: Name of the package that this is a version of.
@type package: C{str}
@param major: The major version number.
@type major: C{int} or C{str} (for the "NEXT" symbol)
@param minor: The minor version number.
@type minor: C{int}
@param micro: The micro version number.
@type micro: C{int}
@param release_candidate: The release candidate number.
@type release_candidate: C{int}
@param prerelease: The prerelease number. (Deprecated)
@type prerelease: C{int}
@param dev: The development release number.
@type dev: C{int}
"""
if release_candidate and prerelease:
raise ValueError("Please only return one of these.")
elif prerelease and not release_candidate:
release_candidate = prerelease
warnings.warn(("Passing prerelease to incremental.Version was "
"deprecated in Incremental 16.9.0. Please pass "
"release_candidate instead."),
DeprecationWarning, stacklevel=2)
if major == "NEXT":
if minor or micro or release_candidate or dev:
raise ValueError(("When using NEXT, all other values except "
"Package must be 0."))
self.package = package
self.major = major
self.minor = minor
self.micro = micro
self.release_candidate = release_candidate
self.dev = dev
@property
def prerelease(self):
warnings.warn(("Accessing incremental.Version.prerelease was "
"deprecated in Incremental 16.9.0. Use "
"Version.release_candidate instead."),
DeprecationWarning, stacklevel=2),
return self.release_candidate
def short(self):
"""
Return a string in canonical short version format,
<major>.<minor>.<micro>[+rSVNVer/+gitsha1].
"""
s = self.base()
gitver = self._getGitVersion()
if not gitver:
svnver = self._getSVNVersion()
if svnver:
s += '+r' + _nativeString(svnver)
else:
s += '+' + gitver
return s
def local(self):
"""
Return a PEP440-compatible "local" representation of this L{Version}.
This includes a SVN revision or Git commit SHA1 hash, if available.
Examples:
- 14.4.0+r1223
- 1.2.3rc1+rb2e812003b5d5fcf08efd1dffed6afa98d44ac8c
- 12.10.1
- 3.4.8rc2
- 11.93.0rc1dev3
"""
return self.short()
def public(self):
"""
Return a PEP440-compatible "public" representation of this L{Version}.
Examples:
- 14.4.0
- 1.2.3rc1
- 14.2.1rc1dev9
- 16.04.0dev0
"""
return self.base()
def base(self):
"""
Like L{short}, but without the +rSVNVer or @gitsha1.
"""
if self.major == "NEXT":
return self.major
if self.release_candidate is None:
rc = ""
else:
rc = "rc%s" % (self.release_candidate,)
if self.dev is None:
dev = ""
else:
dev = "dev%s" % (self.dev,)
return '%r.%d.%d%s%s' % (self.major,
self.minor,
self.micro,
rc, dev)
def __repr__(self):
# Git repr
gitver = self._formatGitVersion()
if gitver:
gitver = ' #' + gitver
# SVN repr
svnver = self._formatSVNVersion()
if svnver:
svnver = ' #' + svnver
if self.release_candidate is None:
release_candidate = ""
else:
release_candidate = ", release_candidate=%r" % (
self.release_candidate,)
if self.dev is None:
dev = ""
else:
dev = ", dev=%r" % (self.dev,)
return '%s(%r, %r, %d, %d%s%s)%s' % (
self.__class__.__name__,
self.package,
self.major,
self.minor,
self.micro,
release_candidate,
dev,
gitver or svnver)
def __str__(self):
return '[%s, version %s]' % (
self.package,
self.short())
def __cmp__(self, other):
"""
Compare two versions, considering major versions, minor versions, micro
versions, then release candidates. Package names are case insensitive.
A version with a release candidate is always less than a version
without a release candidate. If both versions have release candidates,
they will be included in the comparison.
@param other: Another version.
@type other: L{Version}
@return: NotImplemented when the other object is not a Version, or one
of -1, 0, or 1.
@raise IncomparableVersions: when the package names of the versions
differ.
"""
if not isinstance(other, self.__class__):
return NotImplemented
if self.package.lower() != other.package.lower():
raise IncomparableVersions("%r != %r"
% (self.package, other.package))
if self.major == "NEXT":
major = _inf
else:
major = self.major
if self.release_candidate is None:
release_candidate = _inf
else:
release_candidate = self.release_candidate
if self.dev is None:
dev = _inf
else:
dev = self.dev
if other.major == "NEXT":
othermajor = _inf
else:
othermajor = other.major
if other.release_candidate is None:
otherrc = _inf
else:
otherrc = other.release_candidate
if other.dev is None:
otherdev = _inf
else:
otherdev = other.dev
x = _cmp((major,
self.minor,
self.micro,
release_candidate,
dev),
(othermajor,
other.minor,
other.micro,
otherrc,
otherdev))
return x
def _parseGitDir(self, directory):
headFile = os.path.abspath(os.path.join(directory, 'HEAD'))
with open(headFile, "r") as f:
headContent = f.read().strip()
if headContent.startswith("ref: "):
with open(os.path.abspath(
os.path.join(directory,
headContent.split(" ")[1]))) as f:
commit = f.read()
return commit.strip()
return headContent
def _getGitVersion(self):
"""
Given a package directory, walk up and find the git commit sha.
"""
mod = sys.modules.get(self.package)
if mod:
basepath = os.path.dirname(mod.__file__)
upOne = os.path.abspath(os.path.join(basepath, '..'))
if ".git" in os.listdir(upOne):
return self._parseGitDir(os.path.join(upOne, '.git'))
while True:
upOneMore = os.path.abspath(os.path.join(upOne, '..'))
if upOneMore == upOne:
return None
if ".git" in os.listdir(upOneMore):
return self._parseGitDir(os.path.join(upOneMore, '.git'))
upOne = upOneMore
def _parseSVNEntries_4(self, entriesFile):
"""
Given a readable file object which represents a .svn/entries file in
format version 4, return the revision as a string. We do this by
reading first XML element in the document that has a 'revision'
attribute.
"""
from xml.dom.minidom import parse
doc = parse(entriesFile).documentElement
for node in doc.childNodes:
if hasattr(node, 'getAttribute'):
rev = node.getAttribute('revision')
if rev is not None:
return rev.encode('ascii')
def _parseSVNEntries_8(self, entriesFile):
"""
Given a readable file object which represents a .svn/entries file in
format version 8, return the revision as a string.
"""
entriesFile.readline()
entriesFile.readline()
entriesFile.readline()
return entriesFile.readline().strip()
# Add handlers for version 9 and 10 formats, which are the same as
# version 8 as far as revision information is concerned.
_parseSVNEntries_9 = _parseSVNEntries_8
_parseSVNEntriesTenPlus = _parseSVNEntries_8
def _getSVNVersion(self):
"""
Figure out the SVN revision number based on the existence of
<package>/.svn/entries, and its contents. This requires discovering the
format version from the 'format' file and parsing the entries file
accordingly.
@return: None or string containing SVN Revision number.
"""
mod = sys.modules.get(self.package)
if mod:
svn = os.path.join(os.path.dirname(mod.__file__), '.svn')
if not os.path.exists(svn):
# It's not an svn working copy
return None
formatFile = os.path.join(svn, 'format')
if os.path.exists(formatFile):
# It looks like a less-than-version-10 working copy.
with open(formatFile, 'rb') as fObj:
format = fObj.read().strip()
parser = getattr(self,
'_parseSVNEntries_' + format.decode('ascii'),
None)
else:
# It looks like a version-10-or-greater working copy, which
# has version information in the entries file.
parser = self._parseSVNEntriesTenPlus
if parser is None:
return b'Unknown'
entriesFile = os.path.join(svn, 'entries')
entries = open(entriesFile, 'rb')
try:
try:
return parser(entries)
finally:
entries.close()
except:
return b'Unknown'
def _formatSVNVersion(self):
ver = self._getSVNVersion()
if ver is None:
return ''
return ' (SVN r%s)' % (ver,)
def _formatGitVersion(self):
ver = self._getGitVersion()
if ver is None:
return ''
return ' (Git %s)' % (ver,)
def getVersionString(version):
"""
Get a friendly string for the given version object.
@param version: A L{Version} object.
@return: A string containing the package and short version number.
"""
result = '%s %s' % (version.package, version.short())
return result
def _get_version(dist, keyword, value):
"""
Get the version from the package listed in the Distribution.
"""
if not value:
return
from distutils.command import build_py
sp_command = build_py.build_py(dist)
sp_command.finalize_options()
for item in sp_command.find_all_modules():
if item[1] == "_version":
version_file = {}
with open(item[2]) as f:
exec(f.read(), version_file)
dist.metadata.version = version_file["__version__"].public()
return None
raise Exception("No _version.py found.")
from ._version import __version__ # noqa
__all__ = ["__version__", "Version", "getVersionString"]
| StarcoderdataPython |
3359 | def modify(y):
return y # returns same reference. No new object is created
x = [1, 2, 3]
y = modify(x)
print("x == y", x == y)
print("x == y", x is y) | StarcoderdataPython |
196575 | import re
from datetime import datetime
from moto.core import get_account_id, BaseBackend
from moto.core.utils import iso_8601_datetime_without_milliseconds, BackendDict
from .exceptions import (
InvalidInputException,
ResourceAlreadyExistsException,
ResourceNotFoundException,
ValidationException,
)
class DatasetGroup:
accepted_dataset_group_name_format = re.compile(r"^[a-zA-Z][a-z-A-Z0-9_]*")
accepted_dataset_group_arn_format = re.compile(r"^[a-zA-Z0-9\-\_\.\/\:]+$")
accepted_dataset_types = [
"INVENTORY_PLANNING",
"METRICS",
"RETAIL",
"EC2_CAPACITY",
"CUSTOM",
"WEB_TRAFFIC",
"WORK_FORCE",
]
def __init__(
self, region_name, dataset_arns, dataset_group_name, domain, tags=None
):
self.creation_date = iso_8601_datetime_without_milliseconds(datetime.now())
self.modified_date = self.creation_date
self.arn = (
"arn:aws:forecast:"
+ region_name
+ ":"
+ str(get_account_id())
+ ":dataset-group/"
+ dataset_group_name
)
self.dataset_arns = dataset_arns if dataset_arns else []
self.dataset_group_name = dataset_group_name
self.domain = domain
self.tags = tags
self._validate()
def update(self, dataset_arns):
self.dataset_arns = dataset_arns
self.last_modified_date = iso_8601_datetime_without_milliseconds(datetime.now())
def _validate(self):
errors = []
errors.extend(self._validate_dataset_group_name())
errors.extend(self._validate_dataset_group_name_len())
errors.extend(self._validate_dataset_group_domain())
if errors:
err_count = len(errors)
message = str(err_count) + " validation error"
message += "s" if err_count > 1 else ""
message += " detected: "
message += "; ".join(errors)
raise ValidationException(message)
def _validate_dataset_group_name(self):
errors = []
if not re.match(
self.accepted_dataset_group_name_format, self.dataset_group_name
):
errors.append(
"Value '"
+ self.dataset_group_name
+ "' at 'datasetGroupName' failed to satisfy constraint: Member must satisfy regular expression pattern "
+ self.accepted_dataset_group_name_format.pattern
)
return errors
def _validate_dataset_group_name_len(self):
errors = []
if len(self.dataset_group_name) >= 64:
errors.append(
"Value '"
+ self.dataset_group_name
+ "' at 'datasetGroupName' failed to satisfy constraint: Member must have length less than or equal to 63"
)
return errors
def _validate_dataset_group_domain(self):
errors = []
if self.domain not in self.accepted_dataset_types:
errors.append(
"Value '"
+ self.domain
+ "' at 'domain' failed to satisfy constraint: Member must satisfy enum value set "
+ str(self.accepted_dataset_types)
)
return errors
class ForecastBackend(BaseBackend):
def __init__(self, region_name):
super().__init__()
self.dataset_groups = {}
self.datasets = {}
self.region_name = region_name
def create_dataset_group(self, dataset_group_name, domain, dataset_arns, tags):
dataset_group = DatasetGroup(
region_name=self.region_name,
dataset_group_name=dataset_group_name,
domain=domain,
dataset_arns=dataset_arns,
tags=tags,
)
if dataset_arns:
for dataset_arn in dataset_arns:
if dataset_arn not in self.datasets:
raise InvalidInputException(
"Dataset arns: [" + dataset_arn + "] are not found"
)
if self.dataset_groups.get(dataset_group.arn):
raise ResourceAlreadyExistsException(
"A dataset group already exists with the arn: " + dataset_group.arn
)
self.dataset_groups[dataset_group.arn] = dataset_group
return dataset_group
def describe_dataset_group(self, dataset_group_arn):
try:
dataset_group = self.dataset_groups[dataset_group_arn]
except KeyError:
raise ResourceNotFoundException("No resource found " + dataset_group_arn)
return dataset_group
def delete_dataset_group(self, dataset_group_arn):
try:
del self.dataset_groups[dataset_group_arn]
except KeyError:
raise ResourceNotFoundException("No resource found " + dataset_group_arn)
def update_dataset_group(self, dataset_group_arn, dataset_arns):
try:
dsg = self.dataset_groups[dataset_group_arn]
except KeyError:
raise ResourceNotFoundException("No resource found " + dataset_group_arn)
for dataset_arn in dataset_arns:
if dataset_arn not in dsg.dataset_arns:
raise InvalidInputException(
"Dataset arns: [" + dataset_arn + "] are not found"
)
dsg.update(dataset_arns)
def list_dataset_groups(self):
return [v for (_, v) in self.dataset_groups.items()]
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
forecast_backends = BackendDict(ForecastBackend, "forecast")
| StarcoderdataPython |
1690005 | <filename>tests/api/controller/test_controller.py
# (c) Copyright 2015-2016 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
from datetime import datetime
import json
import logging
import mock
import time
from bll import api
from bll.api.controllers import app_controller
from bll.api.controllers.v1 import V1
from bll.common.job_status import get_job_status
from tests.util import TestCase, log_level
@mock.patch.object(app_controller, 'response')
@mock.patch.object(app_controller, 'request')
class Test(TestCase):
def test_post_no_txn_id(self, _mock_request, _mock_response):
_mock_request.body = json.dumps({
api.TARGET: 'general',
api.DATA: {
api.OPERATION: 'null',
}
})
reply = app_controller.AppController().post()
self.assertEqual(reply[api.STATUS], api.COMPLETE)
def test_status_update_no_txn_id(self, _mock_request, _mock_response):
_mock_request.body = json.dumps({
api.TARGET: 'general',
api.DATA: {
api.OPERATION: 'null',
},
api.JOB_STATUS_REQUEST: True
})
with log_level(logging.CRITICAL, 'bll'):
reply = app_controller.AppController().post()
self.assertEqual(reply[api.STATUS], api.STATUS_ERROR)
self.assertIn("No txn_id", reply[api.DATA][0][api.DATA])
self.assertTrue(_mock_response.status, 400)
def test_post_request_fail(self, _mock_request, _mock_response):
_mock_request.body = json.dumps({api.TARGET: 'general',
api.DATA: {api.OPERATION:
'failhandle'}})
# Suppress the expected exception message
with log_level(logging.CRITICAL, 'bll'):
reply = app_controller.AppController().post()
self.assertEqual(reply[api.STATUS], api.STATUS_ERROR)
self.assertTrue(_mock_response.status, 400)
def test_post_complete_fail(self, _mock_request, _mock_response):
_mock_request.body = json.dumps({api.TARGET: 'general',
api.DATA: {api.OPERATION:
'failcomplete'}})
# Suppress the expected exception message from service
with log_level(logging.CRITICAL, 'bll.plugins.service'):
reply = app_controller.AppController().post()
time.sleep(0.1)
txn_id = reply.get(api.TXN_ID)
reply = get_job_status(txn_id)
self.assertEqual(reply[api.STATUS], 'error')
def test_post_complete_error(self, _mock_request, _mock_response):
_mock_request.body = json.dumps({api.TARGET: 'general',
api.DATA: {api.OPERATION:
'errorcomplete'}})
# Suppress the expected exception message from service
with log_level(logging.CRITICAL, 'bll.plugins.service'):
reply = app_controller.AppController().post()
time.sleep(0.1)
txn_id = reply.get(api.TXN_ID)
reply = get_job_status(txn_id)
self.assertEqual(reply[api.STATUS], 'error')
self.assertEqual(reply[api.DATA][0][api.DATA], 'some error happened')
class TestV1(TestCase):
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls.load_test_app()
def testIndex(self):
assert self.app.get('/').status_int == 200
def testV1Index(self):
assert self.app.get('/v1/').status_int == 200
@mock.patch('bll.api.controllers.v1.login',
return_value=type('X', (object,),
dict(auth_token='foo',
expires=datetime.utcnow())))
def testLogin(self, mock_login):
body = {'username': 'user', 'password': '<PASSWORD>'}
response = self.app.post_json('/v1/auth_token', body)
self.assertEqual(200, response.status_code)
@mock.patch('bll.api.controllers.v1.login', side_effect=Exception('foo'))
def testFailLogin(self, _):
body = {'username': 'user', 'password': '<PASSWORD>'}
response = self.app.post_json('/v1/auth_token', body,
expect_errors=True)
self.assertEqual(401, response.status_code)
def testBypassPermissions(self):
# Create a request object for eula, which bypasses permission checks
request = type('Request', (object,), dict(body='{"target": "eula"}'))
with mock.patch('bll.api.controllers.v1.request', request):
self.assertTrue(V1.check_permissions())
def testPermissionsWithoutToken(self):
# Create a request object without a token
request = type('Request', (object,), dict(body='{"target": "foo"}',
headers='{}'))
with mock.patch('bll.api.controllers.v1.request', request):
self.assertFalse(V1.check_permissions())
@mock.patch('bll.api.controllers.v1.validate', return_value=True)
def testPermissionsWithToken(self, _):
# Create a request object with a token
request = type('Request', (object,), dict(body='{"target": "foo"}',
headers={"X-Auth-Token": "sometoken"}))
with mock.patch('bll.api.controllers.v1.request', request):
self.assertTrue(V1.check_permissions())
@mock.patch('bll.api.controllers.v1.validate', return_value=True)
def testBackwardCompat(self, _):
# Create an old token blob as a json string
headers = """
{"management_appliance" : {"tokens": [{"auth_token" : "blah" }]}}
"""
# Create a bogus request object
request = type('Request', (object,), dict(body='{"target": "plugins"}',
headers={"X-Auth-Token": headers}))
with mock.patch('bll.api.controllers.v1.request', request):
self.assertTrue(V1.check_permissions())
@mock.patch('bll.api.controllers.v1.validate', return_value=True)
def test_missing_service(self, _):
# Suppress the expected exception message from service
body = {'target': 'bogus-service'}
response = self.app.post_json('/v1/bll', body,
expect_errors=True)
self.assertEqual(401, response.status_code)
| StarcoderdataPython |
54741 | <reponame>Jimmy-INL/SKDMD<gh_stars>1-10
import sys
import numpy as np
sys.path.insert(0, '../../../')
from SKDMD.MODEL_SRC.kdmd import KDMD
from SKDMD.PREP_DATA_SRC.source_code.lib.utilities import timing
class CKDMD(KDMD):
"""
Class for Kernel DMD
with kernel as
* Gaussian kernel
* polynomial kernel
* linear kernel DMD
"""
def __init__(self, config):
super(CKDMD, self).__init__(config)
self.type = 'c'
self.model_dir = self.case_dir + '/' + self.type + '-kdmd-s' + str(config['sigma']) + '-r' + str(config['reduced_rank'])
self.makedir(self.model_dir)
def compute_deigphi_dt(self, x, xdot):
# compute Ahat between x and self.X
if self.kernel == 'linear':
Ahat = self.computeKernelArray(xdot, self.X)
elif self.kernel == 'gaussian':
# dot_x^i_k * (x^i_k - x^j_k) scalar field from inner product
Ahat_1 = np.tensordot(np.ones(self.X.shape[0]), xdot, axes=0)
Z = np.tensordot(np.ones(self.X.shape[0]), x, axes=0)
Z2 = np.tensordot(np.ones(x.shape[0]), self.X, axes=0)
ZT = np.transpose(Z2,axes=(1,0,2))
ZV = Z - ZT
Ahat_2 = np.einsum('ijk,ijk->ji',Ahat_1,ZV)
# elementwise multiplication with the last kernel thing
newGhat = self.computeKernelArray(x, self.X)
Ahat = Ahat_2 * newGhat * -2.0 / (self.sigma_gaussian**2)
elif self.kernel == 'polynomial':
Ahat_1 = np.matmul(xdot, np.transpose(self.X))
newGhat = self.computeKernelArray(x, self.X)
Ahat = self.power * np.power(newGhat, (self.power - 1)/self.power) * Ahat_1
else:
raise NotImplementedError("this kernel: " + str(self.kernel) + " is not implemented!")
# then compute deigen_phi_dt
deigen_phi_dt = np.matmul(np.matmul(np.matmul(Ahat, self.Q), self.inverse_sigma), self.Koopman['eigenvectorHat'])
return deigen_phi_dt
def computeAhat(self, X, Xdot):
if self.kernel == 'linear':
Ahat = np.matmul(Xdot, np.transpose(X))
elif self.kernel == 'gaussian':
# dot_x^i_k * (x^i_k - x^j_k) scalar field from inner product
Ahat_1 = np.tensordot(np.ones(Xdot.shape[0]), Xdot, axes=0)
Z = np.tensordot(np.ones(X.shape[0]), X, axes=0)
ZT = np.transpose(Z,axes=(1,0,2))
ZV = Z - ZT
Ahat_2 = np.einsum('ijk,ijk->ji',Ahat_1,ZV)
# elementwise multiplication with the last kernel thing
Ahat = Ahat_2 * self.Ghat * -2.0 / (self.sigma_gaussian**2)
elif self.kernel == 'polynomial':
Ahat_1 = np.matmul(Xdot, np.transpose(X))
Ahat = self.power * np.power(self.Ghat, (self.power - 1)/self.power) * Ahat_1
else:
raise NotImplementedError("this kernel: " + str(self.kernel) + " is not implemented!")
return Ahat
@timing
def train(self, X, Xdot):
"""
Given X and Xdot, training for Koopman eigenfunctions, eigenvalues, eigenvectors, and Koopman modes
:type X: np.ndarray
:param X: state of the system
:type Xdot: np.ndarray
:param Xdot: time derivative of the state of the system
"""
self.X = X
self.Xdot = Xdot
# prepare scaler
self.prepare_scaler(self.X)
# compute Koopman tuples
self.compute_Koopman_analysis()
| StarcoderdataPython |
154705 | from .mobilenetv2 import QuantizableMobileNetV2, mobilenet_v2, __all__ as mv2_all
from .mobilenetv3 import QuantizableMobileNetV3, mobilenet_v3_large, mobilenet_v3_small, __all__ as mv3_all
__all__ = mv2_all + mv3_all
| StarcoderdataPython |
1701468 | <reponame>skurob/cgas
from .constants import DEFAULT_SUCCESS, SUCCESS_KEY, MESSAGE_KEY, DATA_KEY, DEFAULT_FAILURE
from telethon.tl.types import User
from typing import Any
class UserModels:
@staticmethod
def success(message: str = None, data: Any = None) -> dict:
if message != None and data != None:
return {
SUCCESS_KEY : True,
MESSAGE_KEY : message,
DATA_KEY : data
}
elif message != None:
return {
SUCCESS_KEY : True,
MESSAGE_KEY : message,
}
elif data != None:
return {
SUCCESS_KEY : True,
DATA_KEY : data,
}
return DEFAULT_SUCCESS
@staticmethod
def failure(message: str = None) -> dict:
if message != None:
return {
SUCCESS_KEY: False,
MESSAGE_KEY: message
}
return DEFAULT_FAILURE
@staticmethod
def needs2FA(message: str):
return {
SUCCESS_KEY: False,
MESSAGE_KEY: message,
DATA_KEY: {
"needs2fa": True
}
}
@staticmethod
def userDetails(userDetails: User) -> dict:
return {
SUCCESS_KEY: True,
DATA_KEY: {
"userId" : userDetails.id,
"username" : userDetails.username,
"firstName" : userDetails.first_name,
"lastName" : userDetails.last_name,
"phoneNumber" : userDetails.phone
}
}
@staticmethod
def unauthorized() -> dict:
return {
SUCCESS_KEY: False,
MESSAGE_KEY: "Invalid phone number or session expired!"
}
| StarcoderdataPython |
3318169 | <reponame>larsoner/beamformer_simulation
import warnings
import mne
import numpy as np
import pandas as pd
from mne.beamformer import make_dics, apply_dics_csd
from mne.forward.forward import _restrict_forward_to_src_sel
from mne.time_frequency import csd_morlet
import config
from config import fname, dics_settings
from spatial_resolution import get_nearest_neighbors
from time_series import simulate_raw, add_source_to_raw, create_epochs
# Don't be verbose
mne.set_log_level(False)
#fn_report_h5 = fname.report(vertex=config.vertex)
fn_report_h5 = None # Don't make reports.
###############################################################################
# Simulate raw data
###############################################################################
print('simulate data')
info = mne.io.read_info(fname.sample_raw)
info = mne.pick_info(info, mne.pick_types(info, meg=True, eeg=False))
fwd_disc_true = mne.read_forward_solution(fname.fwd_discrete_true)
fwd_disc_true = mne.pick_types_forward(fwd_disc_true, meg=True, eeg=False)
er_raw = mne.io.read_raw_fif(fname.ernoise, preload=True)
raw, stc_signal = simulate_raw(info=info, fwd_disc_true=fwd_disc_true,
signal_vertex=config.vertex,
signal_freq=config.signal_freq,
n_trials=config.n_trials, noise_multiplier=0,
random_state=config.random, n_noise_dipoles=0,
er_raw=er_raw)
del info, er_raw
# Read in forward solution
fwd_disc_man = mne.read_forward_solution(fname.fwd_discrete_man)
###############################################################################
# Get nearest neighbors
###############################################################################
nearest_neighbors, distances = get_nearest_neighbors(config.vertex, signal_hemi=0, src=fwd_disc_true['src'])
corrs = []
n_settings = len(dics_settings)
do_break = np.zeros(shape=n_settings, dtype=bool)
for i, (nb_vertex, nb_dist) in enumerate(np.column_stack((nearest_neighbors, distances))[:config.n_neighbors_max]):
print(f'Processing neighbour {i}/{config.n_neighbors_max}', flush=True)
# after column_stack nb_vertex is float
nb_vertex = int(nb_vertex)
###############################################################################
# Simulate second dipole
###############################################################################
raw2, stc_signal2 = add_source_to_raw(raw, fwd_disc_true=fwd_disc_true,
signal_vertex=nb_vertex, signal_freq=config.signal_freq2,
trial_length=config.trial_length, n_trials=config.n_trials,
source_type='chirp')
###############################################################################
# Create epochs
###############################################################################
title = 'Simulated evoked for two signal vertices'
epochs = create_epochs(raw2, title=title, fn_simulated_epochs=None, fn_report_h5=fn_report_h5)
epochs_grad = epochs.copy().pick_types(meg='grad')
epochs_mag = epochs.copy().pick_types(meg='mag')
epochs_joint = epochs.copy().pick_types(meg=True)
# Make CSDs
csd = csd_morlet(epochs, [config.signal_freq, config.signal_freq2], tmin=0, tmax=1, decim=5)
noise_csd = csd_morlet(epochs, [config.signal_freq, config.signal_freq2], tmin=-1, tmax=0, decim=5)
###############################################################################
# Compute DICS beamformer results
###############################################################################
# Speed things up by restricting the forward solution to only the two
# relevant source points.
src_sel = np.sort(np.array([config.vertex, nb_vertex]))
fwd = _restrict_forward_to_src_sel(fwd_disc_man, src_sel)
for idx_setting, setting in enumerate(dics_settings):
if do_break[idx_setting]:
print(setting, '(skip)')
continue
reg, sensor_type, pick_ori, inversion, weight_norm, normalize_fwd, real_filter, use_noise_cov, reduce_rank = setting
try:
if sensor_type == 'grad':
info = epochs_grad.info
elif sensor_type == 'mag':
info = epochs_mag.info
elif sensor_type == 'joint':
info = epochs_joint.info
else:
raise ValueError('Invalid sensor type: %s', sensor_type)
filters = make_dics(info, fwd, csd, reg=reg, pick_ori=pick_ori,
inversion=inversion, weight_norm=weight_norm,
noise_csd=noise_csd if use_noise_cov else None,
normalize_fwd=normalize_fwd, reduce_rank=reduce_rank,
real_filter=real_filter)
stc, freqs = apply_dics_csd(csd, filters)
vert1_idx = np.searchsorted(src_sel, config.vertex)
vert2_idx = np.searchsorted(src_sel, nb_vertex)
ratio1 = stc.data[vert1_idx, 1] / stc.data[vert1_idx, 0]
ratio2 = stc.data[vert2_idx, 0] / stc.data[vert2_idx, 1]
ratio = np.sqrt(ratio1 * ratio2)
corrs.append(list(setting) + [nb_vertex, nb_dist, ratio])
print(setting, nb_dist, ratio)
if ratio < 0.5 ** 0.5:
do_break[idx_setting] = True
except Exception as e:
print(e)
corrs.append(list(setting) + [nb_vertex, nb_dist, np.nan])
if do_break.all():
break
else:
warnings.warn('Reached max number of sources, but still some parameter combinations have large correlations.')
###############################################################################
# Save everything to a pandas dataframe
###############################################################################
df = pd.DataFrame(corrs,
columns=['reg', 'sensor_type', 'pick_ori', 'inversion',
'weight_norm', 'normalize_fwd', 'real_filter', 'use_noise_cov',
'reduce_rank', 'nb_vertex', 'nb_dist', 'ratio'])
df.to_csv(fname.dics_results_2s(vertex=config.vertex))
print('OK!')
| StarcoderdataPython |
1633837 | <reponame>knuu/competitive-programming<filename>atcoder/arc/arc079_b.py
K = int(input())
L = 50
N = K // L + L - 1
res = K - (N - L + 1) * L
assert(res < 50)
ans = [N] * L
for i in range(res):
ans[i] += L - res + 1
for i in range(res, L):
ans[i] -= res
print(L)
print(*ans)
| StarcoderdataPython |
3385512 | from django.urls import path
from main import views
from django.db import connection
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('',views.index, name="index-1"),
path('services', views.services, name='services'),
path('contact-us', views.contact_us, name='contact-us'),
path('about-us', views.about_us, name='about-us'),
path('login',views.login,name='login'),
path('loginrequest',views.loginaccess,name='loginrequest'),
path('paralegal',views.paralegal, name='paralegal'),
path('customer',views.customer,name='customer'),
path('form_lawyer',views.user_search_lawyer_query, name='form_lawyer'),
path('lawyer',views.lawyer,name='lawyer'),
path('otherstaff',views.otherstaff,name='otherstaff'),
path('managing_partner',views.managing_partner,name='managing_partner'),
path('meeting_form',views.meeting_form,name='meeting_form'),
path('customer_client',views.customer_client,name='customer_client')
]
| StarcoderdataPython |
1660004 | #
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import matplotlib.pyplot as plt
import neptune
import skopt.plots as sk_plots
from neptunecontrib.monitoring.utils import axes2fig
class NeptuneMonitor:
"""Logs hyperparameter optimization process to Neptune.
Examples:
Initialize NeptuneMonitor::
import neptune
import neptunecontrib.monitoring.skopt as sk_utils
neptune.init(project_qualified_name='USER_NAME/PROJECT_NAME')
monitor = sk_utils.NeptuneMonitor()
Run skopt training passing monitor as a a callback::
...
results = skopt.forest_minimize(objective, space, callback=[monitor],
base_estimator='ET', n_calls=100, n_random_starts=10)
"""
def __init__(self, experiment=None):
self._exp = experiment if experiment else neptune
self._iteration = 0
def __call__(self, res):
self._exp.send_metric('run_score',
x=self._iteration, y=res.func_vals[-1])
self._exp.send_text('run_parameters',
x=self._iteration, y=NeptuneMonitor._get_last_params(res))
self._iteration += 1
@staticmethod
def _get_last_params(res):
param_vals = res.x_iters[-1]
named_params = _format_to_named_params(param_vals, res)
return str(named_params)
def send_runs(results, experiment=None):
"""Logs runs results and parameters to neptune.
Text channel `hyperparameter_search_score` is created and a list of tuples (name, value)
of best paramters is logged to neptune.
Args:
results('scipy.optimize.OptimizeResult'): Results object that is typically an
output of the function like `skopt.forest_minimize(...)`
experiment(`neptune.experiments.Experiment`): Neptune experiment. Default is None.
Examples:
Run skopt training::
...
results = skopt.forest_minimize(objective, space,
base_estimator='ET', n_calls=100, n_random_starts=10)
Send best parameters to neptune::
import neptune
import neptunecontrib.monitoring.skopt as sk_utils
neptune.init(project_qualified_name='USER_NAME/PROJECT_NAME')
sk_monitor.send_best_parameters(results)
"""
_exp = experiment if experiment else neptune
for i, (loss, params) in enumerate(zip(results.func_vals, results.x_iters)):
_exp.send_metric('run_score', x=i, y=loss)
named_params = _format_to_named_params(params, results)
_exp.send_text('run_parameters', str(named_params))
def send_best_parameters(results, experiment=None):
"""Logs best_parameters list to neptune.
Text channel `best_parameters` is created and a list of tuples (name, value)
of best paramters is logged to neptune.
Args:
results('scipy.optimize.OptimizeResult'): Results object that is typically an
output of the function like `skopt.forest_minimize(...)`
experiment(`neptune.experiments.Experiment`): Neptune experiment. Default is None.
Examples:
Run skopt training::
...
results = skopt.forest_minimize(objective, space,
base_estimator='ET', n_calls=100, n_random_starts=10)
Send best parameters to neptune::
import neptune
import neptunecontrib.monitoring.skopt as sk_utils
neptune.init(project_qualified_name='USER_NAME/PROJECT_NAME')
sk_monitor.send_best_parameters(results)
"""
_exp = experiment if experiment else neptune
named_params = _format_to_named_params(results.x, results)
_exp.set_property('best_parameters', str(named_params))
def send_plot_convergence(results, experiment=None, channel_name='convergence'):
"""Logs skopt plot_convergence figure to neptune.
Image channel `convergence` is created and the output of the
plot_convergence function is first covented to `neptune.Image` and
then sent to neptune.
Args:
results('scipy.optimize.OptimizeResult'): Results object that is typically an
output of the function like `skopt.forest_minimize(...)`
experiment(`neptune.experiments.Experiment`): Neptune experiment. Default is None.
Examples:
Run skopt training::
...
results = skopt.forest_minimize(objective, space,
base_estimator='ET', n_calls=100, n_random_starts=10)
Send skopt plot_convergence figure to neptune::
import neptune
import neptunecontrib.monitoring.skopt as sk_utils
neptune.init(project_qualified_name='USER_NAME/PROJECT_NAME')
sk_monitor.send_plot_convergence(results)
"""
_exp = experiment if experiment else neptune
fig, ax = plt.subplots(figsize=(16, 12))
sk_plots.plot_convergence(results, ax=ax)
with tempfile.NamedTemporaryFile(suffix='.png') as f:
fig.savefig(f.name)
_exp.send_image(channel_name, f.name)
def send_plot_evaluations(results, experiment=None, channel_name='evaluations'):
"""Logs skopt plot_evaluations figure to neptune.
Image channel `evaluations` is created and the output of the
plot_evaluations function is first covented to `neptune.Image` and
then sent to neptune.
Args:
results('scipy.optimize.OptimizeResult'): Results object that is typically an
output of the function like `skopt.forest_minimize(...)`
experiment(`neptune.experiments.Experiment`): Neptune experiment. Default is None.
Examples:
Run skopt training::
...
results = skopt.forest_minimize(objective, space,
base_estimator='ET', n_calls=100, n_random_starts=10)
Send skopt plot_evaluations figure to neptune::
import neptune
import neptunecontrib.monitoring.skopt as sk_utils
neptune.init(project_qualified_name='USER_NAME/PROJECT_NAME')
sk_monitor.send_plot_evaluations(results)
"""
_exp = experiment if experiment else neptune
fig = plt.figure(figsize=(16, 12))
fig = axes2fig(sk_plots.plot_evaluations(results, bins=10), fig=fig)
with tempfile.NamedTemporaryFile(suffix='.png') as f:
fig.savefig(f.name)
_exp.send_image(channel_name, f.name)
def send_plot_objective(results, experiment=None, channel_name='objective'):
"""Logs skopt plot_objective figure to neptune.
Image channel `objective` is created and the output of the
plot_objective function is first covented to `neptune.Image` and
then sent to neptune.
Args:
results('scipy.optimize.OptimizeResult'): Results object that is typically an
output of the function like `skopt.forest_minimize(...)`
experiment(`neptune.experiments.Experiment`): Neptune experiment. Default is None.
Examples:
Run skopt training::
...
results = skopt.forest_minimize(objective, space,
base_estimator='ET', n_calls=100, n_random_starts=10)
Send skopt plot_objective figure to neptune::
import neptune
import neptunecontrib.monitoring.skopt as sk_utils
neptune.init(project_qualified_name='USER_NAME/PROJECT_NAME')
sk_monitor.send_plot_objective(results)
"""
_exp = experiment if experiment else neptune
fig = plt.figure(figsize=(16, 12))
try:
fig = axes2fig(sk_plots.plot_objective(results), fig=fig)
with tempfile.NamedTemporaryFile(suffix='.png') as f:
fig.savefig(f.name)
_exp.send_image(channel_name, f.name)
except Exception as e:
print('Could not create ans objective chart due to error: {}'.format(e))
def _format_to_named_params(params, result):
return ([(dimension.name, param) for dimension, param in zip(result.space, params)])
| StarcoderdataPython |
1682213 | import torch
from torch.nn import Dropout
import torch.nn.functional as F
import math
class PositionalEncoding(torch.nn.Module):
def __init__(self,
d_model,
dropout=0.1,
max_len=500,
identical_sizes = True
):
super(PositionalEncoding, self).__init__()
self.identical_sizes = identical_sizes
self.d_model = d_model
self.dropout = Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
if identical_sizes:
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self,
x,
batch):
if self.identical_sizes:
return self.forward_identical_sizes(x,
batch)
else:
return self.forward_different_sizes(x,
batch)
def forward_different_sizes(self,
x,
batch):
num = batch.max()
for graph in torch.arange(0,num):
idxs = batch == graph
x[idxs,:] = x[idxs,:] + self.pe[:torch.sum(idxs), :]
return self.dropout(x)
def forward_identical_sizes(self,
x,
batch):
size = torch.sum(batch == 0).item()
x = x.view(size,-1,self.d_model).float()
x = x + self.pe[:x.size(0), :]
x = x.view(-1, self.d_model)
return self.dropout(x)
class GRU(torch.nn.Module):
"""
Wrapper class for the GRU used by the GNN framework, nn.GRU is used for the Gated Recurrent Unit itself
"""
def __init__(self,
input_size,
hidden_size):
super(GRU, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.gru = torch.nn.GRU(input_size=input_size,
hidden_size=hidden_size)
def forward(self, x, y):
"""
:param x: shape: (B, N, Din) where Din <= input_size (difference is padded)
:param y: shape: (B, N, Dh) where Dh <= hidden_size (difference is padded)
:return: shape: (B, N, Dh)
"""
x = x.unsqueeze(1)
y = y.unsqueeze(1)
assert (x.shape[-1] <= self.input_size and y.shape[-1] <= self.hidden_size)
(B, N,_) = x.shape
x = x.reshape(1, B * N, -1).contiguous()
y = y.reshape(1, B * N, -1).contiguous()
# padding if necessary
if x.shape[-1] < self.input_size:
x = F.pad(input=x, pad=[0, self.input_size - x.shape[-1]], mode='constant', value=0)
if y.shape[-1] < self.hidden_size:
y = F.pad(input=y, pad=[0, self.hidden_size - y.shape[-1]], mode='constant', value=0)
x = self.gru(x, y)[1]
x = x.reshape(B, N, -1)
return x[:,0,:]
def reset(nn):
def _reset(item):
if hasattr(item, 'reset_parameters'):
item.reset_parameters()
if nn is not None:
if hasattr(nn, 'children') and len(list(nn.children())) > 0:
for item in nn.children():
_reset(item)
else:
_reset(nn) | StarcoderdataPython |
102812 | <gh_stars>0
import pathlib
import random
from absl import app
from absl import flags
import tensorflow as tf
import tensorflow_federated as tff
from data_helpers import make_client_ids
from data_helpers import provide_client_data_fn
from model_helpers import build_vgg16
# Hyperparams
flags.DEFINE_integer("num_rounds", default=10,
help="Number of rounds of federated averaging.")
flags.DEFINE_integer("clients_per_round", default=10,
help="Number of clients to sample for training per round.")
flags.DEFINE_float("client_learning_rate", default=.02,
help="Learning rate for client optimizers.")
flags.DEFINE_float("server_learning_rate", default=1.0,
help="Learning rate for client optimizers.")
flags.DEFINE_bool("freeze_model", default=True,
help="Freeze early layers in the model (if its builder fn allows)")
flags.DEFINE_integer("image_width", default=224,
help="Width dimension of input radiology images.")
flags.DEFINE_integer("image_height", default=224,
help="Height dimension of input radiology images.")
flags.DEFINE_integer("batch_size", default=4,
help="Local batch size for each client.")
flags.DEFINE_enum("model", default="vgg16", enum_values=["vgg16"],
help="Which model to use. Must have a builder in model_helpers.")
# Data flags
flags.DEFINE_string("data_root", default="./data",
help="Path to the root folder containing chest xray data")
flags.DEFINE_string("train_clients_subdir", default="train_clients",
help="Subdirectory of `data_root` containing data allocated to the "
"training subset of clients.")
flags.DEFINE_string("test_clients_subdir", default="test_clients",
help="Subdirectory of `data-root` containing data allocated to the "
"evaluation subset of clients.")
FLAGS = flags.FLAGS
def main(argv):
dataroot = pathlib.Path(FLAGS.data_root)
train_path = dataroot.joinpath(FLAGS.train_clients_subdir)
test_path = dataroot.joinpath(FLAGS.test_clients_subdir)
train_client_ids = make_client_ids(train_path)
test_client_ids = make_client_ids(test_path)
img_dims = (FLAGS.image_width, FLAGS.image_height)
train_client_fn = provide_client_data_fn(train_path, *img_dims, FLAGS.batch_size)
test_client_fn = provide_client_data_fn(test_path, *img_dims, FLAGS.batch_size)
train_clients = tff.simulation.ClientData.from_clients_and_fn(
train_client_ids, train_client_fn)
test_clients = tff.simulation.ClientData.from_clients_and_fn(
test_client_ids, test_client_fn)
federated_train_data = [
train_clients.create_tf_dataset_for_client(client_id)
for client_id in train_client_ids
]
federated_test_data = [
test_clients.create_tf_dataset_for_client(client_id)
for client_id in test_client_ids
]
client_opt_fn = lambda: tf.keras.optimizers.SGD(FLAGS.client_learning_rate)
server_opt_fn = lambda: tf.keras.optimizers.SGD(FLAGS.server_learning_rate)
iterative_process = tff.learning.build_federated_averaging_process(
model_fn, client_opt_fn, server_opt_fn)
state = iterative_process.initialize()
for rnd in range(FLAGS.num_rounds):
round_clients = random.sample(federated_train_data, FLAGS.clients_per_round)
state, metrics = iterative_process.next(state, round_clients)
print('round {rnd}, metrics={metrics}'.format(rnd=rnd, metrics=metrics))
def model_fn():
x_spec = (tf.float32, [None, 224, 224, 3])
y_spec = (tf.int64, [None])
input_spec = (x_spec, y_spec)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model = build_vgg16(freeze=FLAGS.freeze_model)
return tff.learning.from_keras_model(
model, loss_fn, input_spec=input_spec,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
if __name__ == "__main__":
app.run(main)
| StarcoderdataPython |
173823 | <reponame>CrazyAZ/BLSS-Input-Viewer
# This Pro Controller driver code is based on https://github.com/yvbbrjdr/procon/blob/master/src/procon.py
import math
import time
import hid
def to_int16(uint16):
return -((uint16 ^ 0xFFFF) + 1) if uint16 & 0x8000 else uint16
class ProCon:
VENDOR_ID = 0x057E
PRODUCT_ID = 0x2009
PACKET_SIZE = 64
CALIBRATION_OFFSET = 0x603D
CALIBRATION_LENGTH = 0x12
COMMAND_RETRIES = 10
RUMBLE_NEUTRAL = (0x00, 0x01, 0x40, 0x40)
RUMBLE = (0x74, 0xBE, 0xBD, 0x6F)
DEFAULT_IMU_SENSITIVITY = (0x03, 0x00, 0x00, 0x01)
class OutputReportID:
RUMBLE_SUBCOMMAND = 0x01
RUMBLE = 0x10
COMMAND = 0x80
class InputReportID:
SUBCOMMAND_REPLY = 0x21
CONTROLLER_STATE = 0x30
COMMAND_ACK = 0x81
class CommandID:
HANDSHAKE = 0x02
HIGH_SPEED = 0x03
FORCE_USB = 0x04
class SubcommandID:
SET_INPUT_REPORT_MODE = 0x03
SPI_FLASH_READ = 0x10
SET_PLAYER_LIGHTS = 0x30
SET_HOME_LIGHT = 0x38
ENABLE_IMU = 0x40
SET_IMU_SENSITIVITY = 0x41
ENABLE_VIBRATION = 0x48
class Button:
A = 'A'
B = 'B'
X = 'X'
Y = 'Y'
UP = 'Up'
DOWN = 'Down'
LEFT = 'Left'
RIGHT = 'Right'
MINUS = '-'
PLUS = '+'
SCREENSHOT = 'Screenshot'
HOME = 'Home'
L = 'L'
ZL = 'ZL'
R = 'R'
ZR = 'ZR'
LS = 'LS'
RS = 'RS'
def __init__(self):
self.subcommand_counter = 0
self.dev = hid.device()
self.dev.open(ProCon.VENDOR_ID, ProCon.PRODUCT_ID)
self.handshake()
self.high_speed()
self.handshake()
self.rumble_low = self.rumble_high = ProCon.RUMBLE_NEUTRAL
self.rumble_expire = 0
self.load_stick_calibration()
self.enable_vibration(True)
self.set_input_report_mode(ProCon.InputReportID.CONTROLLER_STATE)
self.force_usb()
self.set_player_lights(True, False, False, False)
self.enable_imu(True)
self.set_imu_sensitivity(ProCon.DEFAULT_IMU_SENSITIVITY)
run = True
def start(self, callback):
while self.run:
state = self.recv()
if state[0] != ProCon.InputReportID.CONTROLLER_STATE:
continue
buttons = {
ProCon.Button.A: state[3] & 0x08 > 0,
ProCon.Button.B: state[3] & 0x04 > 0,
ProCon.Button.X: state[3] & 0x02 > 0,
ProCon.Button.Y: state[3] & 0x01 > 0,
ProCon.Button.UP: state[5] & 0x02 > 0,
ProCon.Button.DOWN: state[5] & 0x01 > 0,
ProCon.Button.LEFT: state[5] & 0x08 > 0,
ProCon.Button.RIGHT: state[5] & 0x04 > 0,
ProCon.Button.MINUS: state[4] & 0x01 > 0,
ProCon.Button.PLUS: state[4] & 0x02 > 0,
ProCon.Button.SCREENSHOT: state[4] & 0x20 > 0,
ProCon.Button.HOME: state[4] & 0x10 > 0,
ProCon.Button.L: state[5] & 0x40 > 0,
ProCon.Button.ZL: state[5] & 0x80 > 0,
ProCon.Button.R: state[3] & 0x40 > 0,
ProCon.Button.ZR: state[3] & 0x80 > 0,
ProCon.Button.LS: state[4] & 0x08 > 0,
ProCon.Button.RS: state[4] & 0x04 > 0
}
l_x = state[6] | ((state[7] & 0xF) << 8)
l_y = (state[7] >> 4) | (state[8] << 4)
r_x = state[9] | ((state[10] & 0xF) << 8)
r_y = (state[10] >> 4) | (state[11] << 4)
l_x = self.apply_stick_calibration(l_x, 0, 0)
l_y = self.apply_stick_calibration(l_y, 0, 1)
r_x = self.apply_stick_calibration(r_x, 1, 0)
r_y = self.apply_stick_calibration(r_y, 1, 1)
l_stick = (l_x, l_y)
r_stick = (r_x, r_y)
accel = (state[13] | state[14] << 8, state[15] | state[16] << 8, state[17] | state[18] << 8)
gyro = (state[19] | state[20] << 8, state[21] | state[22] << 8, state[23] | state[24] << 8)
accel = tuple(map(to_int16, accel))
gyro = tuple(map(to_int16, gyro))
battery = (state[2] & 0xF0) >> 4
callback(buttons, l_stick, r_stick, accel, gyro, battery)
if self.rumble_expire and int(time.time() * 1000) >= self.rumble_expire:
self.send_rumble(False, False, 0)
def stop(self):
self.run = False
def load_stick_calibration(self):
ok, reply = self.spi_flash_read(ProCon.CALIBRATION_OFFSET, ProCon.CALIBRATION_LENGTH)
if not ok:
raise RuntimeError('cannot load stick calibration')
self.stick_calibration = [
[
[
((reply[27] & 0xF) << 8) | reply[26],
((reply[24] & 0xF) << 8) | reply[23],
((reply[21] & 0xF) << 8) | reply[20]
],
[
(reply[28] << 4) | (reply[27] >> 4),
(reply[25] << 4) | (reply[24] >> 4),
(reply[22] << 4) | (reply[21] >> 4)
]
],
[
[
((reply[33] & 0xF) << 8) | reply[32],
((reply[30] & 0xF) << 8) | reply[29],
((reply[36] & 0xF) << 8) | reply[35]
],
[
(reply[34] << 4) | (reply[33] >> 4),
(reply[31] << 4) | (reply[30] >> 4),
(reply[37] << 4) | (reply[36] >> 4)
]
]
]
for i in range(len(self.stick_calibration)):
for j in range(len(self.stick_calibration[i])):
for k in range(len(self.stick_calibration[i][j])):
if self.stick_calibration[i][j][k] == 0xFFF:
self.stick_calibration[i][j][k] = 0
self.stick_extends = [
[
[
-int(self.stick_calibration[0][0][0] * 0.7),
int(self.stick_calibration[0][0][2] * 0.7)
],
[
-int(self.stick_calibration[0][1][0] * 0.7),
int(self.stick_calibration[0][1][2] * 0.7)
]
],
[
[
-int(self.stick_calibration[1][0][0] * 0.7),
int(self.stick_calibration[1][0][2] * 0.7)
],
[
-int(self.stick_calibration[1][1][0] * 0.7),
int(self.stick_calibration[1][1][2] * 0.7)
]
]
]
def apply_stick_calibration(self, value, stick, axis):
value -= self.stick_calibration[stick][axis][1]
if value < self.stick_extends[stick][axis][0]:
self.stick_extends[stick][axis][0] = value
if value > self.stick_extends[stick][axis][1]:
self.stick_extends[stick][axis][1] = value
if value > 0:
return int(value * 0x7FFF / self.stick_extends[stick][axis][1])
return int(value * -0x7FFF / self.stick_extends[stick][axis][0])
def send(self, data):
return self.dev.write(data) == len(data)
def recv(self):
return self.dev.read(ProCon.PACKET_SIZE)
def send_command(self, id, wait_for_reply=True):
data = (ProCon.OutputReportID.COMMAND, id)
for _ in range(ProCon.COMMAND_RETRIES):
if not self.send(data):
continue
if not wait_for_reply:
return True
reply = self.recv()
if reply[0] == ProCon.InputReportID.COMMAND_ACK and reply[1] == id:
return True
return False
def send_subcommand(self, id, param, wait_for_reply=True):
data = (ProCon.OutputReportID.RUMBLE_SUBCOMMAND, self.subcommand_counter) + self.rumble_low + self.rumble_high + (id,) + param
self.subcommand_counter = (self.subcommand_counter + 1) & 0xFF
for _ in range(ProCon.COMMAND_RETRIES):
if not self.send(data):
continue
if not wait_for_reply:
return True, []
reply = self.recv()
if reply[0] == ProCon.InputReportID.SUBCOMMAND_REPLY and reply[14] == id:
return True, reply
return False, []
def send_rumble(self, low, high, duration):
self.rumble_low = ProCon.RUMBLE if low else ProCon.RUMBLE_NEUTRAL
self.rumble_high = ProCon.RUMBLE if high else ProCon.RUMBLE_NEUTRAL
self.rumble_expire = int(time.time() * 1000) + duration if (low or high) and duration else 0
data = (ProCon.OutputReportID.RUMBLE, self.subcommand_counter) + self.rumble_low + self.rumble_high
self.subcommand_counter = (self.subcommand_counter + 1) & 0xFF
for _ in range(ProCon.COMMAND_RETRIES):
if self.send(data):
return True
return False
def handshake(self):
return self.send_command(ProCon.CommandID.HANDSHAKE)
def high_speed(self):
return self.send_command(ProCon.CommandID.HIGH_SPEED)
def force_usb(self):
return self.send_command(ProCon.CommandID.FORCE_USB, False)
def set_input_report_mode(self, mode):
return self.send_subcommand(ProCon.SubcommandID.SET_INPUT_REPORT_MODE, (mode,))
def spi_flash_read(self, addr, l):
param = (addr & 0x000000FF, (addr & 0x0000FF00) >> 8, (addr & 0x00FF0000) >> 16, (addr & 0xFF000000) >> 24, l)
return self.send_subcommand(ProCon.SubcommandID.SPI_FLASH_READ, param)
def set_player_lights(self, one, two, three, four):
param = (one << 0) | (two << 1) | (three << 2) | (four << 3)
return self.send_subcommand(ProCon.SubcommandID.SET_PLAYER_LIGHTS, (param,))
def set_home_light(self, brightness):
intensity = 0
if brightness > 0:
if brightness < 65:
intensity = (brightness + 5) // 10
else:
intensity = math.ceil(0xF * ((brightness / 100) ** 2.13))
intensity = (intensity & 0xF) << 4
param = (0x01, intensity, intensity, 0x00)
return self.send_subcommand(ProCon.SubcommandID.SET_HOME_LIGHT, param)
def enable_imu(self, enable):
return self.send_subcommand(ProCon.SubcommandID.ENABLE_IMU, (int(enable),))
def set_imu_sensitivity(self, sensitivity):
return self.send_subcommand(ProCon.SubcommandID.SET_IMU_SENSITIVITY, sensitivity)
def enable_vibration(self, enable):
return self.send_subcommand(ProCon.SubcommandID.ENABLE_VIBRATION, (int(enable),))
def print_state(buttons, l_stick, r_stick, accel, gyro, battery):
print('\33[2JButtons:')
for k, v in buttons.items():
if v:
print('[{}]'.format(k), end=' ')
else:
print(' {} '.format(k), end=' ')
print()
print('L Stick: ({:6}, {:6})'.format(l_stick[0], l_stick[1]))
print('R Stick: ({:6}, {:6})'.format(r_stick[0], r_stick[1]))
print('Accelerometer: ({:6}, {:6}, {:6})'.format(accel[0], accel[1], accel[2]))
print('Gyroscope: ({:6}, {:6}, {:6})'.format(gyro[0], gyro[1], gyro[2]))
print('Battery: {}/9'.format(battery))
if __name__ == '__main__':
try:
ProCon().start(print_state)
except KeyboardInterrupt:
print('\rGoodbye!')
| StarcoderdataPython |
4807535 | <reponame>victoria-cds-sig/explore_mimiciv
import bqutils.auth as auth
import bqutils.ibis as iq
def main():
client = iq.get_client(*auth.get_gcreds())
db = client.database("bigquery-public-data.stackoverflow")
expression = db.table("posts_questions").projection(["creation_date", "answer_count"]).limit(5)
df = expression.execute()
for _, r in df.iterrows():
print(r)
if __name__ == "__main__":
main()
| StarcoderdataPython |
88261 | # Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
# functions shared across transformers
def _define_variables(variables):
# Check that variable names are passed in a list.
# Can take None as value
if not variables or isinstance(variables, list):
variables = variables
else:
variables = [variables]
return variables
def _find_numerical_variables(X, variables=None):
# Find numerical variables in a data set or check that
# the variables entered by the user are numerical.
if not variables:
variables = list(X.select_dtypes(include='number').columns)
else:
if len(X[variables].select_dtypes(exclude='number').columns) != 0:
raise TypeError("Some of the variables are not numerical. Please cast them as numerical "
"before calling this transformer")
return variables
def _find_categorical_variables(X, variables=None):
# Find categorical variables in a data set or check that
# the variables entered by user are categorical.
if not variables:
variables = list(X.select_dtypes(include='O').columns)
else:
# variables indicated by user
if len(X[variables].select_dtypes(exclude='O').columns) != 0:
raise TypeError("Some of the variables are not categorical. Please cast them as object "
"before calling this transformer")
return variables
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.